diff options
Diffstat (limited to 'drivers/target/target_core_iblock.c')
| -rw-r--r-- | drivers/target/target_core_iblock.c | 779 |
1 files changed, 489 insertions, 290 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8572eae62da..7e6b857c6b3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -4,10 +4,7 @@ * This file contains the Storage Engine <-> Linux BlockIO transport * specific functions. * - * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -40,15 +37,23 @@ #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> +#include <asm/unaligned.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include "target_core_iblock.h" -static struct se_subsystem_api iblock_template; +#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ +#define IBLOCK_BIO_POOL_SIZE 128 -static void iblock_bio_done(struct bio *, int); +static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) +{ + return container_of(dev, struct iblock_dev, dev); +} + + +static struct se_subsystem_api iblock_template; /* iblock_attach_hba(): (Part of se_subsystem_api_t template) * @@ -56,117 +61,70 @@ static void iblock_bio_done(struct bio *, int); */ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) { - struct iblock_hba *ib_host; - - ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); - if (!ib_host) { - pr_err("Unable to allocate memory for" - " struct iblock_hba\n"); - return -ENOMEM; - } - - ib_host->iblock_host_id = host_id; - - hba->hba_ptr = ib_host; - pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); - - pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", - hba->hba_id, ib_host->iblock_host_id); - return 0; } static void iblock_detach_hba(struct se_hba *hba) { - struct iblock_hba *ib_host = hba->hba_ptr; - - pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" - " Target Core\n", hba->hba_id, ib_host->iblock_host_id); - - kfree(ib_host); - hba->hba_ptr = NULL; } -static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) +static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) { struct iblock_dev *ib_dev = NULL; - struct iblock_hba *ib_host = hba->hba_ptr; ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); if (!ib_dev) { pr_err("Unable to allocate struct iblock_dev\n"); return NULL; } - ib_dev->ibd_host = ib_host; pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); - return ib_dev; + return &ib_dev->dev; } -static struct se_device *iblock_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p) +static int iblock_configure_device(struct se_device *dev) { - struct iblock_dev *ib_dev = p; - struct se_device *dev; - struct se_dev_limits dev_limits; - struct block_device *bd = NULL; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; - struct queue_limits *limits; - u32 dev_flags = 0; - int ret = -EINVAL; + struct block_device *bd = NULL; + struct blk_integrity *bi; + fmode_t mode; + int ret = -ENOMEM; - if (!ib_dev) { - pr_err("Unable to locate struct iblock_dev parameter\n"); - return ERR_PTR(ret); + if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { + pr_err("Missing udev_path= parameters for IBLOCK\n"); + return -EINVAL; } - memset(&dev_limits, 0, sizeof(struct se_dev_limits)); - /* - * These settings need to be made tunable.. - */ - ib_dev->ibd_bio_set = bioset_create(32, 0); + + ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); if (!ib_dev->ibd_bio_set) { - pr_err("IBLOCK: Unable to create bioset()\n"); - return ERR_PTR(-ENOMEM); + pr_err("IBLOCK: Unable to create bioset\n"); + goto out; } - pr_debug("IBLOCK: Created bio_set()\n"); - /* - * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path - * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. - */ + pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); - bd = blkdev_get_by_path(ib_dev->ibd_udev_path, - FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); + mode = FMODE_READ|FMODE_EXCL; + if (!ib_dev->ibd_readonly) + mode |= FMODE_WRITE; + + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); if (IS_ERR(bd)) { ret = PTR_ERR(bd); - goto failed; + goto out_free_bioset; } - /* - * Setup the local scope queue_limits from struct request_queue->limits - * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. - */ - q = bdev_get_queue(bd); - limits = &dev_limits.limits; - limits->logical_block_size = bdev_logical_block_size(bd); - limits->max_hw_sectors = queue_max_hw_sectors(q); - limits->max_sectors = queue_max_sectors(q); - dev_limits.hw_queue_depth = q->nr_requests; - dev_limits.queue_depth = q->nr_requests; - ib_dev->ibd_bd = bd; - dev = transport_add_device_to_core_hba(hba, - &iblock_template, se_dev, dev_flags, ib_dev, - &dev_limits, "IBLOCK", IBLOCK_VERSION); - if (!dev) - goto failed; + q = bdev_get_queue(bd); + + dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); + dev->dev_attrib.hw_max_sectors = UINT_MAX; + dev->dev_attrib.hw_queue_depth = q->nr_requests; /* * Check if the underlying struct block_device request_queue supports @@ -174,64 +132,81 @@ static struct se_device *iblock_create_virtdevice( * in ATA and we need to set TPE=1 */ if (blk_queue_discard(q)) { - dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = + dev->dev_attrib.max_unmap_lba_count = q->limits.max_discard_sectors; + /* * Currently hardcoded to 1 in Linux/SCSI code.. */ - dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; - dev->se_sub_dev->se_dev_attrib.unmap_granularity = + dev->dev_attrib.max_unmap_block_desc_count = 1; + dev->dev_attrib.unmap_granularity = q->limits.discard_granularity >> 9; - dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = + dev->dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); } + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. + */ + dev->dev_attrib.max_write_same_len = 0xFFFF; if (blk_queue_nonrot(q)) - dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; + dev->dev_attrib.is_nonrot = 1; + + bi = bdev_get_integrity(bd); + if (bi) { + struct bio_set *bs = ib_dev->ibd_bio_set; + + if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || + !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { + pr_err("IBLOCK export of blk_integrity: %s not" + " supported\n", bi->name); + ret = -ENOSYS; + goto out_blkdev_put; + } - return dev; + if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; + } -failed: - if (ib_dev->ibd_bio_set) { - bioset_free(ib_dev->ibd_bio_set); - ib_dev->ibd_bio_set = NULL; + if (dev->dev_attrib.pi_prot_type) { + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; + } + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + bs->bio_integrity_pool); + } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; } - ib_dev->ibd_bd = NULL; - return ERR_PTR(ret); + + return 0; + +out_blkdev_put: + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); +out_free_bioset: + bioset_free(ib_dev->ibd_bio_set); + ib_dev->ibd_bio_set = NULL; +out: + return ret; } -static void iblock_free_device(void *p) +static void iblock_free_device(struct se_device *dev) { - struct iblock_dev *ib_dev = p; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); if (ib_dev->ibd_bd != NULL) blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); if (ib_dev->ibd_bio_set != NULL) bioset_free(ib_dev->ibd_bio_set); - kfree(ib_dev); -} -static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) -{ - return container_of(task, struct iblock_req, ib_task); -} - -static struct se_task * -iblock_alloc_task(unsigned char *cdb) -{ - struct iblock_req *ib_req; - - ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); - if (!ib_req) { - pr_err("Unable to allocate memory for struct iblock_req\n"); - return NULL; - } - - atomic_set(&ib_req->ib_bio_cnt, 0); - return &ib_req->ib_task; + kfree(ib_dev); } static unsigned long long iblock_emulate_read_cap_with_block_size( @@ -243,12 +218,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( bdev_logical_block_size(bd)) - 1); u32 block_size = bdev_logical_block_size(bd); - if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) + if (block_size == dev->dev_attrib.block_size) return blocks_long; switch (block_size) { case 4096: - switch (dev->se_sub_dev->se_dev_attrib.block_size) { + switch (dev->dev_attrib.block_size) { case 2048: blocks_long <<= 1; break; @@ -262,7 +237,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 2048: - switch (dev->se_sub_dev->se_dev_attrib.block_size) { + switch (dev->dev_attrib.block_size) { case 4096: blocks_long >>= 1; break; @@ -277,7 +252,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 1024: - switch (dev->se_sub_dev->se_dev_attrib.block_size) { + switch (dev->dev_attrib.block_size) { case 4096: blocks_long >>= 2; break; @@ -292,7 +267,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 512: - switch (dev->se_sub_dev->se_dev_attrib.block_size) { + switch (dev->dev_attrib.block_size) { case 4096: blocks_long >>= 3; break; @@ -313,6 +288,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( return blocks_long; } +static void iblock_complete_cmd(struct se_cmd *cmd) +{ + struct iblock_req *ibr = cmd->priv; + u8 status; + + if (!atomic_dec_and_test(&ibr->pending)) + return; + + if (atomic_read(&ibr->ib_bio_err_cnt)) + status = SAM_STAT_CHECK_CONDITION; + else + status = SAM_STAT_GOOD; + + target_complete_cmd(cmd, status); + kfree(ibr); +} + +static void iblock_bio_done(struct bio *bio, int err) +{ + struct se_cmd *cmd = bio->bi_private; + struct iblock_req *ibr = cmd->priv; + + /* + * Set -EIO if !BIO_UPTODATE and the passed is still err=0 + */ + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) + err = -EIO; + + if (err != 0) { + pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," + " err: %d\n", bio, err); + /* + * Bump the ib_bio_err_cnt and release bio. + */ + atomic_inc(&ibr->ib_bio_err_cnt); + smp_mb__after_atomic(); + } + + bio_put(bio); + + iblock_complete_cmd(cmd); +} + +static struct bio * +iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); + struct bio *bio; + + /* + * Only allocate as many vector entries as the bio code allows us to, + * we'll loop later on until we have handled the whole request. + */ + if (sg_num > BIO_MAX_PAGES) + sg_num = BIO_MAX_PAGES; + + bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); + if (!bio) { + pr_err("Unable to allocate memory for bio\n"); + return NULL; + } + + bio->bi_bdev = ib_dev->ibd_bd; + bio->bi_private = cmd; + bio->bi_end_io = &iblock_bio_done; + bio->bi_iter.bi_sector = lba; + + return bio; +} + +static void iblock_submit_bios(struct bio_list *list, int rw) +{ + struct blk_plug plug; + struct bio *bio; + + blk_start_plug(&plug); + while ((bio = bio_list_pop(list))) + submit_bio(rw, bio); + blk_finish_plug(&plug); +} + static void iblock_end_io_flush(struct bio *bio, int err) { struct se_cmd *cmd = bio->bi_private; @@ -320,8 +376,13 @@ static void iblock_end_io_flush(struct bio *bio, int err) if (err) pr_err("IBLOCK: cache flush failed: %d\n", err); - if (cmd) - transport_complete_sync_cache(cmd, err == 0); + if (cmd) { + if (err) + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + else + target_complete_cmd(cmd, SAM_STAT_GOOD); + } + bio_put(bio); } @@ -329,10 +390,10 @@ static void iblock_end_io_flush(struct bio *bio, int err) * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache. */ -static void iblock_emulate_sync_cache(struct se_task *task) +static sense_reason_t +iblock_execute_sync_cache(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; - struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); int immed = (cmd->t_task_cdb[1] & 0x2); struct bio *bio; @@ -341,7 +402,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) * for this SYNCHRONIZE_CACHE op. */ if (immed) - transport_complete_sync_cache(cmd, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; @@ -349,40 +410,131 @@ static void iblock_emulate_sync_cache(struct se_task *task) if (!immed) bio->bi_private = cmd; submit_bio(WRITE_FLUSH, bio); + return 0; +} + +static sense_reason_t +iblock_do_unmap(struct se_cmd *cmd, void *priv, + sector_t lba, sector_t nolb) +{ + struct block_device *bdev = priv; + int ret; + + ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + return 0; } -static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) +static sense_reason_t +iblock_execute_unmap(struct se_cmd *cmd) { - struct iblock_dev *ibd = dev->dev_ptr; - struct block_device *bd = ibd->ibd_bd; - int barrier = 0; + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; - return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); + return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); } -static void iblock_free_task(struct se_task *task) +static sense_reason_t +iblock_execute_write_same_unmap(struct se_cmd *cmd) { - kfree(IBLOCK_REQ(task)); + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; + sector_t lba = cmd->t_task_lba; + sector_t nolb = sbc_get_write_same_sectors(cmd); + int ret; + + ret = iblock_do_unmap(cmd, bdev, lba, nolb); + if (ret) + return ret; + + target_complete_cmd(cmd, GOOD); + return 0; +} + +static sense_reason_t +iblock_execute_write_same(struct se_cmd *cmd) +{ + struct iblock_req *ibr; + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; + sector_t block_lba = cmd->t_task_lba; + sector_t sectors = sbc_get_write_same_sectors(cmd); + + sg = &cmd->t_data_sg[0]; + + if (cmd->t_data_nents > 1 || + sg->length != cmd->se_dev->dev_attrib.block_size) { + pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" + " block_size: %u\n", cmd->t_data_nents, sg->length, + cmd->se_dev->dev_attrib.block_size); + return TCM_INVALID_CDB_FIELD; + } + + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + bio = iblock_get_bio(cmd, block_lba, 1); + if (!bio) + goto fail_free_ibr; + + bio_list_init(&list); + bio_list_add(&list, bio); + + atomic_set(&ibr->pending, 1); + + while (sectors) { + while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) + != sg->length) { + + bio = iblock_get_bio(cmd, block_lba, 1); + if (!bio) + goto fail_put_bios; + + atomic_inc(&ibr->pending); + bio_list_add(&list, bio); + } + + /* Always in 512 byte units for Linux/Block */ + block_lba += sg->length >> IBLOCK_LBA_SHIFT; + sectors -= 1; + } + + iblock_submit_bios(&list, WRITE); + return 0; + +fail_put_bios: + while ((bio = bio_list_pop(&list))) + bio_put(bio); +fail_free_ibr: + kfree(ibr); +fail: + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } enum { - Opt_udev_path, Opt_force, Opt_err + Opt_udev_path, Opt_readonly, Opt_force, Opt_err }; static match_table_t tokens = { {Opt_udev_path, "udev_path=%s"}, + {Opt_readonly, "readonly=%d"}, {Opt_force, "force=%d"}, {Opt_err, NULL} }; -static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, - struct se_subsystem_dev *se_dev, - const char *page, ssize_t count) +static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) { - struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; + unsigned long tmp_readonly; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -403,17 +555,30 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, ret = -EEXIST; goto out; } + if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], + SE_UDEV_PATH_LEN) == 0) { + ret = -EINVAL; + break; + } + pr_debug("IBLOCK: Referencing UDEV path: %s\n", + ib_dev->ibd_udev_path); + ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; + break; + case Opt_readonly: arg_p = match_strdup(&args[0]); if (!arg_p) { ret = -ENOMEM; break; } - snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, - "%s", arg_p); + ret = kstrtoul(arg_p, 0, &tmp_readonly); kfree(arg_p); - pr_debug("IBLOCK: Referencing UDEV path: %s\n", - ib_dev->ibd_udev_path); - ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; + if (ret < 0) { + pr_err("kstrtoul() failed for" + " readonly=\n"); + goto out; + } + ib_dev->ibd_readonly = tmp_readonly; + pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); break; case Opt_force: break; @@ -427,44 +592,26 @@ out: return (!ret) ? count : ret; } -static ssize_t iblock_check_configfs_dev_params( - struct se_hba *hba, - struct se_subsystem_dev *se_dev) +static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) { - struct iblock_dev *ibd = se_dev->se_dev_su_ptr; - - if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { - pr_err("Missing udev_path= parameters for IBLOCK\n"); - return -EINVAL; - } - - return 0; -} - -static ssize_t iblock_show_configfs_dev_params( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - char *b) -{ - struct iblock_dev *ibd = se_dev->se_dev_su_ptr; - struct block_device *bd = ibd->ibd_bd; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; char buf[BDEVNAME_SIZE]; ssize_t bl = 0; if (bd) bl += sprintf(b + bl, "iBlock device: %s", bdevname(bd, buf)); - if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { - bl += sprintf(b + bl, " UDEV PATH: %s\n", - ibd->ibd_udev_path); - } else - bl += sprintf(b + bl, "\n"); + if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) + bl += sprintf(b + bl, " UDEV PATH: %s", + ib_dev->ibd_udev_path); + bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); bl += sprintf(b + bl, " "); if (bd) { bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? - "" : (bd->bd_holder == ibd) ? + "" : (bd->bd_holder == ib_dev) ? "CLAIMED: IBLOCK" : "CLAIMED: OS"); } else { bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); @@ -473,107 +620,128 @@ static ssize_t iblock_show_configfs_dev_params( return bl; } -static void iblock_bio_destructor(struct bio *bio) +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) { - struct se_task *task = bio->bi_private; - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; + struct se_device *dev = cmd->se_dev; + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct scatterlist *sg; + int i, rc; - bio_free(bio, ib_dev->ibd_bio_set); -} + bi = bdev_get_integrity(ib_dev->ibd_bd); + if (!bi) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } -static struct bio * -iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) -{ - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; - struct iblock_req *ib_req = IBLOCK_REQ(task); - struct bio *bio; + bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); + if (!bip) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return -ENOMEM; + } - /* - * Only allocate as many vector entries as the bio code allows us to, - * we'll loop later on until we have handled the whole request. - */ - if (sg_num > BIO_MAX_PAGES) - sg_num = BIO_MAX_PAGES; + bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * + dev->prot_length; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; - bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); - if (!bio) { - pr_err("Unable to allocate memory for bio\n"); - return NULL; - } + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, + (unsigned long long)bip->bip_iter.bi_sector); - pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" - " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); - pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); + for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { - bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = task; - bio->bi_destructor = iblock_bio_destructor; - bio->bi_end_io = &iblock_bio_done; - bio->bi_sector = lba; - atomic_inc(&ib_req->ib_bio_cnt); + rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, + sg->offset); + if (rc != sg->length) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + return -ENOMEM; + } - pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); - pr_debug("Set ib_req->ib_bio_cnt: %d\n", - atomic_read(&ib_req->ib_bio_cnt)); - return bio; + pr_debug("Added bio integrity page: %p length: %d offset; %d\n", + sg_page(sg), sg->length, sg->offset); + } + + return 0; } -static int iblock_do_task(struct se_task *task) +static sense_reason_t +iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - struct bio *bio; + struct iblock_req *ibr; + struct bio *bio, *bio_start; struct bio_list list; struct scatterlist *sg; - u32 i, sg_num = task->task_sg_nents; + u32 sg_num = sgl_nents; sector_t block_lba; - struct blk_plug plug; - int rw; + unsigned bio_cnt; + int rw = 0; + int i; - if (task->task_data_direction == DMA_TO_DEVICE) { + if (data_direction == DMA_TO_DEVICE) { + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); /* - * Force data to disk if we pretend to not have a volatile - * write cache, or the initiator set the Force Unit Access bit. + * Force writethrough using WRITE_FUA if a volatile write cache + * is not enabled, or if initiator set the Force Unit Access bit. */ - if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - (cmd->se_cmd_flags & SCF_FUA))) - rw = WRITE_FUA; - else + if (q->flush_flags & REQ_FUA) { + if (cmd->se_cmd_flags & SCF_FUA) + rw = WRITE_FUA; + else if (!(q->flush_flags & REQ_FLUSH)) + rw = WRITE_FUA; + else + rw = WRITE; + } else { rw = WRITE; + } } else { rw = READ; } /* - * Do starting conversion up from non 512-byte blocksize with - * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. + * Convert the blocksize advertised to the initiator to the 512 byte + * units unconditionally used by the Linux block layer. */ - if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) - block_lba = (task->task_lba << 3); - else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) - block_lba = (task->task_lba << 2); - else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) - block_lba = (task->task_lba << 1); - else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) - block_lba = task->task_lba; + if (dev->dev_attrib.block_size == 4096) + block_lba = (cmd->t_task_lba << 3); + else if (dev->dev_attrib.block_size == 2048) + block_lba = (cmd->t_task_lba << 2); + else if (dev->dev_attrib.block_size == 1024) + block_lba = (cmd->t_task_lba << 1); + else if (dev->dev_attrib.block_size == 512) + block_lba = cmd->t_task_lba; else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" - " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENOSYS; + " %u\n", dev->dev_attrib.block_size); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - bio = iblock_get_bio(task, block_lba, sg_num); - if (!bio) { - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENOMEM; + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + if (!sgl_nents) { + atomic_set(&ibr->pending, 1); + iblock_complete_cmd(cmd); + return 0; } + bio = iblock_get_bio(cmd, block_lba, sgl_nents); + if (!bio) + goto fail_free_ibr; + + bio_start = bio; bio_list_init(&list); bio_list_add(&list, bio); - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + atomic_set(&ibr->pending, 2); + bio_cnt = 1; + + for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and @@ -581,10 +749,18 @@ static int iblock_do_task(struct se_task *task) */ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { - bio = iblock_get_bio(task, block_lba, sg_num); + if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { + iblock_submit_bios(&list, rw); + bio_cnt = 0; + } + + bio = iblock_get_bio(cmd, block_lba, sg_num); if (!bio) - goto fail; + goto fail_put_bios; + + atomic_inc(&ibr->pending); bio_list_add(&list, bio); + bio_cnt++; } /* Always in 512 byte units for Linux/Block */ @@ -592,94 +768,117 @@ static int iblock_do_task(struct se_task *task) sg_num--; } - blk_start_plug(&plug); - while ((bio = bio_list_pop(&list))) - submit_bio(rw, bio); - blk_finish_plug(&plug); + if (cmd->prot_type) { + int rc = iblock_alloc_bip(cmd, bio_start); + if (rc) + goto fail_put_bios; + } + iblock_submit_bios(&list, rw); + iblock_complete_cmd(cmd); return 0; -fail: +fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENOMEM; +fail_free_ibr: + kfree(ibr); +fail: + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } -static u32 iblock_get_device_rev(struct se_device *dev) +static sector_t iblock_get_blocks(struct se_device *dev) { - return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + struct request_queue *q = bdev_get_queue(bd); + + return iblock_emulate_read_cap_with_block_size(dev, bd, q); } -static u32 iblock_get_device_type(struct se_device *dev) +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) { - return TYPE_DISK; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int ret; + + ret = bdev_alignment_offset(bd); + if (ret == -1) + return 0; + + /* convert offset-bytes to offset-lbas */ + return ret / bdev_logical_block_size(bd); } -static sector_t iblock_get_blocks(struct se_device *dev) +static unsigned int iblock_get_lbppbe(struct se_device *dev) { - struct iblock_dev *ibd = dev->dev_ptr; - struct block_device *bd = ibd->ibd_bd; - struct request_queue *q = bdev_get_queue(bd); + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); - return iblock_emulate_read_cap_with_block_size(dev, bd, q); + return ilog2(logs_per_phys); } -static void iblock_bio_done(struct bio *bio, int err) +static unsigned int iblock_get_io_min(struct se_device *dev) { - struct se_task *task = bio->bi_private; - struct iblock_req *ibr = IBLOCK_REQ(task); + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; - /* - * Set -EIO if !BIO_UPTODATE and the passed is still err=0 - */ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) - err = -EIO; + return bdev_io_min(bd); +} - if (err != 0) { - pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," - " err: %d\n", bio, err); - /* - * Bump the ib_bio_err_cnt and release bio. - */ - atomic_inc(&ibr->ib_bio_err_cnt); - smp_mb__after_atomic_inc(); - } +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; - bio_put(bio); + return bdev_io_opt(bd); +} - if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) - return; +static struct sbc_ops iblock_sbc_ops = { + .execute_rw = iblock_execute_rw, + .execute_sync_cache = iblock_execute_sync_cache, + .execute_write_same = iblock_execute_write_same, + .execute_write_same_unmap = iblock_execute_write_same_unmap, + .execute_unmap = iblock_execute_unmap, +}; + +static sense_reason_t +iblock_parse_cdb(struct se_cmd *cmd) +{ + return sbc_parse_cdb(cmd, &iblock_sbc_ops); +} - pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, - (unsigned long long)bio->bi_sector, err); +static bool iblock_get_write_cache(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + struct request_queue *q = bdev_get_queue(bd); - transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); + return q->flush_flags & REQ_FLUSH; } static struct se_subsystem_api iblock_template = { .name = "iblock", + .inquiry_prod = "IBLOCK", + .inquiry_rev = IBLOCK_VERSION, .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .write_cache_emulated = 1, - .fua_write_emulated = 1, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, - .allocate_virtdevice = iblock_allocate_virtdevice, - .create_virtdevice = iblock_create_virtdevice, + .alloc_device = iblock_alloc_device, + .configure_device = iblock_configure_device, .free_device = iblock_free_device, - .alloc_task = iblock_alloc_task, - .do_task = iblock_do_task, - .do_discard = iblock_do_discard, - .do_sync_cache = iblock_emulate_sync_cache, - .free_task = iblock_free_task, - .check_configfs_dev_params = iblock_check_configfs_dev_params, + .parse_cdb = iblock_parse_cdb, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, - .get_device_rev = iblock_get_device_rev, - .get_device_type = iblock_get_device_type, + .get_device_type = sbc_get_device_type, .get_blocks = iblock_get_blocks, + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, + .get_lbppbe = iblock_get_lbppbe, + .get_io_min = iblock_get_io_min, + .get_io_opt = iblock_get_io_opt, + .get_write_cache = iblock_get_write_cache, }; static int __init iblock_module_init(void) @@ -687,7 +886,7 @@ static int __init iblock_module_init(void) return transport_subsystem_register(&iblock_template); } -static void iblock_module_exit(void) +static void __exit iblock_module_exit(void) { transport_subsystem_release(&iblock_template); } |
