diff options
Diffstat (limited to 'drivers/target/target_core_sbc.c')
| -rw-r--r-- | drivers/target/target_core_sbc.c | 508 | 
1 files changed, 461 insertions, 47 deletions
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6c17295e8d7..bd78d9235ac 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -23,6 +23,7 @@  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/ratelimit.h> +#include <linux/crc-t10dif.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h>  #include <scsi/scsi_tcq.h> @@ -33,7 +34,7 @@  #include "target_core_internal.h"  #include "target_core_ua.h" - +#include "target_core_alua.h"  static sense_reason_t  sbc_emulate_readcapacity(struct se_cmd *cmd) @@ -80,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)  		transport_kunmap_data_sg(cmd);  	} -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, 8);  	return 0;  } @@ -88,6 +89,7 @@ static sense_reason_t  sbc_emulate_readcapacity_16(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; +	struct se_session *sess = cmd->se_sess;  	unsigned char *rbuf;  	unsigned char buf[32];  	unsigned long long blocks = dev->transport->get_blocks(dev); @@ -106,11 +108,28 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;  	buf[11] = dev->dev_attrib.block_size & 0xff;  	/* +	 * Set P_TYPE and PROT_EN bits for DIF support +	 */ +	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { +		if (dev->dev_attrib.pi_prot_type) +			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; +	} + +	if (dev->transport->get_lbppbe) +		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + +	if (dev->transport->get_alignment_offset_lbas) { +		u16 lalba = dev->transport->get_alignment_offset_lbas(dev); +		buf[14] = (lalba >> 8) & 0x3f; +		buf[15] = lalba & 0xff; +	} + +	/*  	 * Set Thin Provisioning Enable bit following sbc3r22 in section  	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.  	 */  	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) -		buf[14] = 0x80; +		buf[14] |= 0x80;  	rbuf = transport_kmap_data_sg(cmd);  	if (rbuf) { @@ -118,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  		transport_kunmap_data_sg(cmd);  	} -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, 32);  	return 0;  } @@ -157,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)  	return cmd->se_dev->dev_attrib.block_size * sectors;  } -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ -	struct se_device *dev = cmd->se_dev; -	unsigned long long end_lba; -	u32 sectors; - -	sectors = cmd->data_length / dev->dev_attrib.block_size; -	end_lba = dev->transport->get_blocks(dev) + 1; - -	if (cmd->t_task_lba + sectors > end_lba) { -		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", -			cmd->t_task_lba, sectors, end_lba); -		return -EINVAL; -	} - -	return 0; -} -  static inline u32 transport_get_sectors_6(unsigned char *cdb)  {  	/* @@ -263,6 +264,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o  			sectors, cmd->se_dev->dev_attrib.max_write_same_len);  		return TCM_INVALID_CDB_FIELD;  	} +	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ +	if (flags[0] & 0x10) { +		pr_warn("WRITE SAME with ANCHOR not supported\n"); +		return TCM_INVALID_CDB_FIELD; +	}  	/*  	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting  	 * translated into block discard requests within backend code. @@ -349,7 +355,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; -	cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; +	/* +	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through +	 * within target_complete_ok_work() if the command was successfully +	 * sent to the backend driver. +	 */ +	spin_lock_irq(&cmd->t_state_lock); +	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) +		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; +	spin_unlock_irq(&cmd->t_state_lock); +  	/*  	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()  	 * before the original READ I/O submission. @@ -363,7 +378,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev;  	struct scatterlist *write_sg = NULL, *sg; -	unsigned char *buf, *addr; +	unsigned char *buf = NULL, *addr;  	struct sg_mapping_iter m;  	unsigned int offset = 0, len;  	unsigned int nlbas = cmd->t_task_nolb; @@ -378,6 +393,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  	 */  	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)  		return TCM_NO_SENSE; +	/* +	 * Immediately exit + release dev->caw_sem if command has already +	 * been failed with a non-zero SCSI status. +	 */ +	if (cmd->scsi_status) { +		pr_err("compare_and_write_callback: non zero scsi_status:" +			" 0x%02x\n", cmd->scsi_status); +		goto out; +	}  	buf = kzalloc(cmd->data_length, GFP_KERNEL);  	if (!buf) { @@ -386,13 +410,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  		goto out;  	} -	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, +	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,  			   GFP_KERNEL);  	if (!write_sg) {  		pr_err("Unable to allocate compare_and_write sg\n");  		ret = TCM_OUT_OF_RESOURCES;  		goto out;  	} +	sg_init_table(write_sg, cmd->t_data_nents);  	/*  	 * Setup verify and write data payloads from total NumberLBAs.  	 */ @@ -508,6 +533,12 @@ sbc_compare_and_write(struct se_cmd *cmd)  		cmd->transport_complete_callback = NULL;  		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  	} +	/* +	 * Reset cmd->data_length to individual block_size in order to not +	 * confuse backend drivers that depend on this value matching the +	 * size of the I/O being submitted. +	 */ +	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;  	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,  			      DMA_FROM_DEVICE); @@ -524,6 +555,116 @@ sbc_compare_and_write(struct se_cmd *cmd)  	return TCM_NO_SENSE;  } +static int +sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, +		       bool is_write, struct se_cmd *cmd) +{ +	if (is_write) { +		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : +					 TARGET_PROT_DOUT_INSERT; +		switch (protect) { +		case 0x0: +		case 0x3: +			cmd->prot_checks = 0; +			break; +		case 0x1: +		case 0x5: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x2: +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x4: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			break; +		default: +			pr_err("Unsupported protect field %d\n", protect); +			return -EINVAL; +		} +	} else { +		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : +					 TARGET_PROT_DIN_STRIP; +		switch (protect) { +		case 0x0: +		case 0x1: +		case 0x5: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x2: +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x3: +			cmd->prot_checks = 0; +			break; +		case 0x4: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			break; +		default: +			pr_err("Unsupported protect field %d\n", protect); +			return -EINVAL; +		} +	} + +	return 0; +} + +static bool +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, +	       u32 sectors, bool is_write) +{ +	u8 protect = cdb[1] >> 5; + +	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) +		return true; + +	switch (dev->dev_attrib.pi_prot_type) { +	case TARGET_DIF_TYPE3_PROT: +		cmd->reftag_seed = 0xffffffff; +		break; +	case TARGET_DIF_TYPE2_PROT: +		if (protect) +			return false; + +		cmd->reftag_seed = cmd->t_task_lba; +		break; +	case TARGET_DIF_TYPE1_PROT: +		cmd->reftag_seed = cmd->t_task_lba; +		break; +	case TARGET_DIF_TYPE0_PROT: +	default: +		return true; +	} + +	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, +				   is_write, cmd)) +		return false; + +	cmd->prot_type = dev->dev_attrib.pi_prot_type; +	cmd->prot_length = dev->prot_length * sectors; + +	/** +	 * In case protection information exists over the wire +	 * we modify command data length to describe pure data. +	 * The actual transfer length is data length + protection +	 * length +	 **/ +	if (protect) +		cmd->data_length = sectors * dev->dev_attrib.block_size; + +	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " +		 "prot_op=%d prot_checks=%d\n", +		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, +		 cmd->prot_op, cmd->prot_checks); + +	return true; +} +  sense_reason_t  sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  { @@ -544,6 +685,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_10:  		sectors = transport_get_sectors_10(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -551,6 +696,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_12:  		sectors = transport_get_sectors_12(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -558,6 +707,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_16:  		sectors = transport_get_sectors_16(cdb);  		cmd->t_task_lba = transport_lba_64(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -573,6 +726,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_VERIFY:  		sectors = transport_get_sectors_10(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -582,6 +739,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_12:  		sectors = transport_get_sectors_12(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -591,6 +752,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_16:  		sectors = transport_get_sectors_16(cdb);  		cmd->t_task_lba = transport_lba_64(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -692,6 +857,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		case SAI_READ_CAPACITY_16:  			cmd->execute_cmd = sbc_emulate_readcapacity_16;  			break; +		case SAI_REPORT_REFERRALS: +			cmd->execute_cmd = target_emulate_report_referrals; +			break;  		default:  			pr_err("Unsupported SA: 0x%02x\n",  				cmd->t_task_cdb[1] & 0x1f); @@ -702,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		break;  	case SYNCHRONIZE_CACHE:  	case SYNCHRONIZE_CACHE_16: -		if (!ops->execute_sync_cache) { -			size = 0; -			cmd->execute_cmd = sbc_emulate_noop; -			break; -		} - -		/* -		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE -		 */  		if (cdb[0] == SYNCHRONIZE_CACHE) {  			sectors = transport_get_sectors_10(cdb);  			cmd->t_task_lba = transport_lba_32(cdb); @@ -718,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  			sectors = transport_get_sectors_16(cdb);  			cmd->t_task_lba = transport_lba_64(cdb);  		} - -		size = sbc_get_size(cmd, sectors); - -		/* -		 * Check to ensure that LBA + Range does not exceed past end of -		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls -		 */ -		if (cmd->t_task_lba || sectors) { -			if (sbc_check_valid_sectors(cmd) < 0) -				return TCM_ADDRESS_OUT_OF_RANGE; +		if (ops->execute_sync_cache) { +			cmd->execute_cmd = ops->execute_sync_cache; +			goto check_lba;  		} -		cmd->execute_cmd = ops->execute_sync_cache; +		size = 0; +		cmd->execute_cmd = sbc_emulate_noop;  		break;  	case UNMAP:  		if (!ops->execute_unmap) @@ -772,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		break;  	case VERIFY:  		size = 0; +		sectors = transport_get_sectors_10(cdb); +		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->execute_cmd = sbc_emulate_noop; -		break; +		goto check_lba;  	case REZERO_UNIT:  	case SEEK_6:  	case SEEK_10: @@ -813,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  				dev->dev_attrib.hw_max_sectors);  			return TCM_INVALID_CDB_FIELD;  		} - +check_lba:  		end_lba = dev->transport->get_blocks(dev) + 1;  		if (cmd->t_task_lba + sectors > end_lba) {  			pr_err("cmd exceeds last lba %llu " @@ -920,3 +1075,262 @@ err:  	return ret;  }  EXPORT_SYMBOL(sbc_execute_unmap); + +void +sbc_dif_generate(struct se_cmd *cmd) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = cmd->t_prot_sg; +	sector_t sector = cmd->t_task_lba; +	void *daddr, *paddr; +	int i, j, offset = 0; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; +			sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, +						dev->dev_attrib.block_size)); +			if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) +				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); +			sdt->app_tag = 0; + +			pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			sector++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} +} + +static sense_reason_t +sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, +		  const void *p, sector_t sector, unsigned int ei_lba) +{ +	int block_size = dev->dev_attrib.block_size; +	__be16 csum; + +	csum = cpu_to_be16(crc_t10dif(p, block_size)); + +	if (sdt->guard_tag != csum) { +		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" +			" csum 0x%04x\n", (unsigned long long)sector, +			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); +		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; +	} + +	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && +	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { +		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" +		       " sector MSB: 0x%08x\n", (unsigned long long)sector, +		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); +		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; +	} + +	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && +	    be32_to_cpu(sdt->ref_tag) != ei_lba) { +		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" +		       " ei_lba: 0x%08x\n", (unsigned long long)sector, +			be32_to_cpu(sdt->ref_tag), ei_lba); +		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; +	} + +	return 0; +} + +static void +sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, +		  struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct scatterlist *psg; +	void *paddr, *addr; +	unsigned int i, len, left; +	unsigned int offset = sg_off; + +	left = sectors * dev->prot_length; + +	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { +		unsigned int psg_len, copied = 0; + +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; +		psg_len = min(left, psg->length); +		while (psg_len) { +			len = min(psg_len, sg->length - offset); +			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + +			if (read) +				memcpy(paddr + copied, addr, len); +			else +				memcpy(addr, paddr + copied, len); + +			left -= len; +			offset += len; +			copied += len; +			psg_len -= len; + +			if (offset >= sg->length) { +				sg = sg_next(sg); +				offset = 0; +			} +			kunmap_atomic(addr); +		} +		kunmap_atomic(paddr); +	} +} + +sense_reason_t +sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		     unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = cmd->t_prot_sg; +	sector_t sector = start; +	void *daddr, *paddr; +	int i, j, offset = 0; +	sense_reason_t rc; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; + +			pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, +					       ei_lba); +			if (rc) { +				kunmap_atomic(paddr); +				kunmap_atomic(daddr); +				cmd->bad_sector = sector; +				return rc; +			} + +			sector++; +			ei_lba++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} +	sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); + +	return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_write); + +static sense_reason_t +__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		      unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = sg; +	sector_t sector = start; +	void *daddr, *paddr; +	int i, j, offset = sg_off; +	sense_reason_t rc; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + sg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; + +			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			if (sdt->app_tag == cpu_to_be16(0xffff)) { +				sector++; +				offset += sizeof(struct se_dif_v1_tuple); +				continue; +			} + +			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, +					       ei_lba); +			if (rc) { +				kunmap_atomic(paddr); +				kunmap_atomic(daddr); +				cmd->bad_sector = sector; +				return rc; +			} + +			sector++; +			ei_lba++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} + +	return 0; +} + +sense_reason_t +sbc_dif_read_strip(struct se_cmd *cmd) +{ +	struct se_device *dev = cmd->se_dev; +	u32 sectors = cmd->prot_length / dev->prot_length; + +	return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, +				     cmd->t_prot_sg, 0); +} + +sense_reason_t +sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		    unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	sense_reason_t rc; + +	rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); +	if (rc) +		return rc; + +	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); +	return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_read);  | 
