diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
| -rw-r--r-- | drivers/target/target_core_transport.c | 498 | 
1 files changed, 237 insertions, 261 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 84747cc1aac..7fa62fc93e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -28,7 +28,6 @@  #include <linux/string.h>  #include <linux/timer.h>  #include <linux/slab.h> -#include <linux/blkdev.h>  #include <linux/spinlock.h>  #include <linux/kthread.h>  #include <linux/in.h> @@ -63,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;  struct kmem_cache *t10_alua_lu_gp_mem_cache;  struct kmem_cache *t10_alua_tg_pt_gp_cache;  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache;  static void transport_complete_task_attr(struct se_cmd *cmd);  static void transport_handle_queue_full(struct se_cmd *cmd, @@ -129,14 +130,36 @@ int init_se_kmem_caches(void)  				"mem_t failed\n");  		goto out_free_tg_pt_gp_cache;  	} +	t10_alua_lba_map_cache = kmem_cache_create( +			"t10_alua_lba_map_cache", +			sizeof(struct t10_alua_lba_map), +			__alignof__(struct t10_alua_lba_map), 0, NULL); +	if (!t10_alua_lba_map_cache) { +		pr_err("kmem_cache_create() for t10_alua_lba_map_" +				"cache failed\n"); +		goto out_free_tg_pt_gp_mem_cache; +	} +	t10_alua_lba_map_mem_cache = kmem_cache_create( +			"t10_alua_lba_map_mem_cache", +			sizeof(struct t10_alua_lba_map_member), +			__alignof__(struct t10_alua_lba_map_member), 0, NULL); +	if (!t10_alua_lba_map_mem_cache) { +		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" +				"cache failed\n"); +		goto out_free_lba_map_cache; +	}  	target_completion_wq = alloc_workqueue("target_completion",  					       WQ_MEM_RECLAIM, 0);  	if (!target_completion_wq) -		goto out_free_tg_pt_gp_mem_cache; +		goto out_free_lba_map_mem_cache;  	return 0; +out_free_lba_map_mem_cache: +	kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: +	kmem_cache_destroy(t10_alua_lba_map_cache);  out_free_tg_pt_gp_mem_cache:  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);  out_free_tg_pt_gp_cache: @@ -165,6 +188,8 @@ void release_se_kmem_caches(void)  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +	kmem_cache_destroy(t10_alua_lba_map_cache); +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);  }  /* This code ensures unique mib indexes are handed out. */ @@ -210,7 +235,7 @@ void transport_subsystem_check_init(void)  	sub_api_initialized = 1;  } -struct se_session *transport_init_session(void) +struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)  {  	struct se_session *se_sess; @@ -226,6 +251,7 @@ struct se_session *transport_init_session(void)  	INIT_LIST_HEAD(&se_sess->sess_wait_list);  	spin_lock_init(&se_sess->sess_cmd_lock);  	kref_init(&se_sess->sess_kref); +	se_sess->sup_prot_ops = sup_prot_ops;  	return se_sess;  } @@ -236,17 +262,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,  {  	int rc; -	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); +	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, +					GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);  	if (!se_sess->sess_cmd_map) { -		pr_err("Unable to allocate se_sess->sess_cmd_map\n"); -		return -ENOMEM; +		se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); +		if (!se_sess->sess_cmd_map) { +			pr_err("Unable to allocate se_sess->sess_cmd_map\n"); +			return -ENOMEM; +		}  	}  	rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);  	if (rc < 0) {  		pr_err("Unable to init se_sess->sess_tag_pool,"  			" tag_num: %u\n", tag_num); -		kfree(se_sess->sess_cmd_map); +		if (is_vmalloc_addr(se_sess->sess_cmd_map)) +			vfree(se_sess->sess_cmd_map); +		else +			kfree(se_sess->sess_cmd_map);  		se_sess->sess_cmd_map = NULL;  		return -ENOMEM;  	} @@ -256,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,  EXPORT_SYMBOL(transport_alloc_session_tags);  struct se_session *transport_init_session_tags(unsigned int tag_num, -					       unsigned int tag_size) +					       unsigned int tag_size, +					       enum target_prot_op sup_prot_ops)  {  	struct se_session *se_sess;  	int rc; -	se_sess = transport_init_session(); +	se_sess = transport_init_session(sup_prot_ops);  	if (IS_ERR(se_sess))  		return se_sess; @@ -412,7 +446,10 @@ void transport_free_session(struct se_session *se_sess)  {  	if (se_sess->sess_cmd_map) {  		percpu_ida_destroy(&se_sess->sess_tag_pool); -		kfree(se_sess->sess_cmd_map); +		if (is_vmalloc_addr(se_sess->sess_cmd_map)) +			vfree(se_sess->sess_cmd_map); +		else +			kfree(se_sess->sess_cmd_map);  	}  	kmem_cache_free(se_sess_cache, se_sess);  } @@ -463,11 +500,11 @@ void transport_deregister_session(struct se_session *se_sess)  	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",  		se_tpg->se_tpg_tfo->get_fabric_name());  	/* -	 * If last kref is dropping now for an explict NodeACL, awake sleeping +	 * If last kref is dropping now for an explicit NodeACL, awake sleeping  	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group  	 * removal context.  	 */ -	if (se_nacl && comp_nacl == true) +	if (se_nacl && comp_nacl)  		target_put_nacl(se_nacl);  	transport_free_session(se_sess); @@ -505,23 +542,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,  	if (write_pending)  		cmd->t_state = TRANSPORT_WRITE_PENDING; -	/* -	 * Determine if IOCTL context caller in requesting the stopping of this -	 * command for LUN shutdown purposes. -	 */ -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", -			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - -		cmd->transport_state &= ~CMD_T_ACTIVE; -		if (remove_from_lists) -			target_remove_from_state_list(cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -		complete(&cmd->transport_lun_stop_comp); -		return 1; -	} -  	if (remove_from_lists) {  		target_remove_from_state_list(cmd); @@ -542,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return 1;  	} @@ -575,19 +595,25 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)  static void transport_lun_remove_cmd(struct se_cmd *cmd)  {  	struct se_lun *lun = cmd->se_lun; -	unsigned long flags;  	if (!lun)  		return; -	spin_lock_irqsave(&lun->lun_cmd_lock, flags); -	if (!list_empty(&cmd->se_lun_node)) -		list_del_init(&cmd->se_lun_node); -	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); +	if (cmpxchg(&cmd->lun_ref_active, true, false)) +		percpu_ref_put(&lun->lun_ref);  }  void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)  { +	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) +		transport_lun_remove_cmd(cmd); +	/* +	 * Allow the fabric driver to unmap any resources before +	 * releasing the descriptor via TFO->release_cmd() +	 */ +	if (remove) +		cmd->se_tfo->aborted_task(cmd); +  	if (transport_cmd_check_stop_to_fabric(cmd))  		return;  	if (remove) @@ -654,19 +680,16 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)  		return;  	} -	if (!success) -		cmd->transport_state |= CMD_T_FAILED; -  	/* -	 * Check for case where an explict ABORT_TASK has been received +	 * Check for case where an explicit ABORT_TASK has been received  	 * and transport_wait_for_tasks() will be waiting for completion..  	 */  	if (cmd->transport_state & CMD_T_ABORTED &&  	    cmd->transport_state & CMD_T_STOP) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return; -	} else if (cmd->transport_state & CMD_T_FAILED) { +	} else if (!success) {  		INIT_WORK(&cmd->work, target_complete_failure_work);  	} else {  		INIT_WORK(&cmd->work, target_complete_ok_work); @@ -680,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)  }  EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ +	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { +		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { +			cmd->residual_count += cmd->data_length - length; +		} else { +			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; +			cmd->residual_count = cmd->data_length - length; +		} + +		cmd->data_length = length; +	} + +	target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); +  static void target_add_to_state_list(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; @@ -713,7 +753,7 @@ void target_qf_do_work(struct work_struct *work)  	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {  		list_del(&cmd->se_qf_node);  		atomic_dec(&dev->dev_qf_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"  			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1082,17 +1122,15 @@ void transport_init_se_cmd(  	int task_attr,  	unsigned char *sense_buffer)  { -	INIT_LIST_HEAD(&cmd->se_lun_node);  	INIT_LIST_HEAD(&cmd->se_delayed_node);  	INIT_LIST_HEAD(&cmd->se_qf_node);  	INIT_LIST_HEAD(&cmd->se_cmd_list);  	INIT_LIST_HEAD(&cmd->state_list); -	init_completion(&cmd->transport_lun_fe_stop_comp); -	init_completion(&cmd->transport_lun_stop_comp);  	init_completion(&cmd->t_transport_stop_comp);  	init_completion(&cmd->cmd_wait_comp);  	init_completion(&cmd->task_stop_comp);  	spin_lock_init(&cmd->t_state_lock); +	kref_init(&cmd->cmd_kref);  	cmd->transport_state = CMD_T_DEV_ACTIVE;  	cmd->se_tfo = tfo; @@ -1128,7 +1166,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)  	 * Dormant to Active status.  	 */  	cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",  			cmd->se_ordered_id, cmd->sam_task_attr,  			dev->transport->name); @@ -1299,6 +1337,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,   * @sgl_count: scatterlist count for unidirectional mapping   * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping   * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information   *   * Returns non zero to signal active I/O shutdown failure.  All other   * setup exceptions will be returned as a SCSI CHECK_CONDITION response, @@ -1311,7 +1351,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,  		u32 data_length, int task_attr, int data_dir, int flags,  		struct scatterlist *sgl, u32 sgl_count, -		struct scatterlist *sgl_bidi, u32 sgl_bidi_count) +		struct scatterlist *sgl_bidi, u32 sgl_bidi_count, +		struct scatterlist *sgl_prot, u32 sgl_prot_count)  {  	struct se_portal_group *se_tpg;  	sense_reason_t rc; @@ -1359,6 +1400,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  		transport_generic_request_failure(se_cmd, rc);  		return 0;  	} + +	/* +	 * Save pointers for SGLs containing protection information, +	 * if present. +	 */ +	if (sgl_prot_count) { +		se_cmd->t_prot_sg = sgl_prot; +		se_cmd->t_prot_nents = sgl_prot_count; +	} +  	/*  	 * When a non zero sgl_count has been passed perform SGL passthrough  	 * mapping for pre-allocated fabric memory instead of having target @@ -1395,6 +1446,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  			return 0;  		}  	} +  	/*  	 * Check if we need to delay processing because of ALUA  	 * Active/NonOptimized primary access state.. @@ -1434,7 +1486,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,  {  	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,  			unpacked_lun, data_length, task_attr, data_dir, -			flags, NULL, 0, NULL, 0); +			flags, NULL, 0, NULL, 0, NULL, 0);  }  EXPORT_SYMBOL(target_submit_cmd); @@ -1580,6 +1632,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,  	case TCM_CHECK_CONDITION_ABORT_CMD:  	case TCM_CHECK_CONDITION_UNIT_ATTENTION:  	case TCM_CHECK_CONDITION_NOT_READY: +	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: +	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: +	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:  		break;  	case TCM_OUT_OF_RESOURCES:  		sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1668,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)  		return false;  	case MSG_ORDERED_TAG:  		atomic_inc(&dev->dev_ordered_sync); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "  			 " se_ordered_id: %u\n", @@ -1686,7 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)  		 * For SIMPLE and UNTAGGED Task Attribute commands  		 */  		atomic_inc(&dev->simple_cmds); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		break;  	} @@ -1709,42 +1764,36 @@ void target_execute_cmd(struct se_cmd *cmd)  	/*  	 * If the received CDB has aleady been aborted stop processing it here.  	 */ -	if (transport_check_aborted_status(cmd, 1)) { -		complete(&cmd->transport_lun_stop_comp); +	if (transport_check_aborted_status(cmd, 1))  		return; -	} - -	/* -	 * Determine if IOCTL context caller in requesting the stopping of this -	 * command for LUN shutdown purposes. -	 */ -	spin_lock_irq(&cmd->t_state_lock); -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", -			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); -		cmd->transport_state &= ~CMD_T_ACTIVE; -		spin_unlock_irq(&cmd->t_state_lock); -		complete(&cmd->transport_lun_stop_comp); -		return; -	}  	/*  	 * Determine if frontend context caller is requesting the stopping of  	 * this command for frontend exceptions.  	 */ +	spin_lock_irq(&cmd->t_state_lock);  	if (cmd->transport_state & CMD_T_STOP) {  		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",  			__func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd));  		spin_unlock_irq(&cmd->t_state_lock); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return;  	}  	cmd->t_state = TRANSPORT_PROCESSING;  	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;  	spin_unlock_irq(&cmd->t_state_lock); +	/* +	 * Perform WRITE_INSERT of PI using software emulation when backend +	 * device has PI enabled, if the transport has not already generated +	 * PI using hardware WRITE_INSERT offload. +	 */ +	if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { +		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) +			sbc_dif_generate(cmd); +	}  	if (target_handle_task_attr(cmd)) {  		spin_lock_irq(&cmd->t_state_lock); @@ -1797,7 +1846,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {  		atomic_dec(&dev->simple_cmds); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dev->dev_cur_ordered_id++;  		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"  			" SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1809,7 +1858,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  			cmd->se_ordered_id);  	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {  		atomic_dec(&dev->dev_ordered_sync); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dev->dev_cur_ordered_id++;  		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1868,12 +1917,27 @@ static void transport_handle_queue_full(  	spin_lock_irq(&dev->qf_cmd_lock);  	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);  	atomic_inc(&dev->dev_qf_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);  	schedule_work(&cmd->se_dev->qf_work_queue);  } +static bool target_check_read_strip(struct se_cmd *cmd) +{ +	sense_reason_t rc; + +	if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { +		rc = sbc_dif_read_strip(cmd); +		if (rc) { +			cmd->pi_err = rc; +			return true; +		} +	} + +	return false; +} +  static void target_complete_ok_work(struct work_struct *work)  {  	struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -1938,6 +2002,22 @@ static void target_complete_ok_work(struct work_struct *work)  					cmd->data_length;  		}  		spin_unlock(&cmd->se_lun->lun_sep_lock); +		/* +		 * Perform READ_STRIP of PI using software emulation when +		 * backend had PI enabled, if the transport will not be +		 * performing hardware READ_STRIP offload. +		 */ +		if (cmd->prot_op == TARGET_PROT_DIN_STRIP && +		    target_check_read_strip(cmd)) { +			ret = transport_send_check_condition_and_sense(cmd, +						cmd->pi_err, 0); +			if (ret == -EAGAIN || ret == -ENOMEM) +				goto queue_full; + +			transport_lun_remove_cmd(cmd); +			transport_cmd_check_stop_to_fabric(cmd); +			return; +		}  		trace_target_cmd_complete(cmd);  		ret = cmd->se_tfo->queue_data_in(cmd); @@ -2030,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)  	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);  	cmd->t_bidi_data_sg = NULL;  	cmd->t_bidi_data_nents = 0; + +	transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); +	cmd->t_prot_sg = NULL; +	cmd->t_prot_nents = 0;  }  /** @@ -2193,6 +2277,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)  				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  		} +		if (cmd->prot_op != TARGET_PROT_NORMAL) { +			ret = target_alloc_sgl(&cmd->t_prot_sg, +					       &cmd->t_prot_nents, +					       cmd->prot_length, true); +			if (ret < 0) +				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +		} +  		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,  				       cmd->data_length, zero_flag);  		if (ret < 0) @@ -2283,13 +2375,12 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,  	unsigned long flags;  	int ret = 0; -	kref_init(&se_cmd->cmd_kref);  	/*  	 * Add a second kref if the fabric caller is expecting to handle  	 * fabric acknowledgement that requires two target_put_sess_cmd()  	 * invocations before se_cmd descriptor release.  	 */ -	if (ack_kref == true) { +	if (ack_kref) {  		kref_get(&se_cmd->cmd_kref);  		se_cmd->se_cmd_flags |= SCF_ACK_KREF;  	} @@ -2333,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)   */  int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)  { +	if (!se_sess) { +		se_cmd->se_tfo->release_cmd(se_cmd); +		return 1; +	}  	return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,  			&se_sess->sess_cmd_lock);  } @@ -2394,164 +2489,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)  }  EXPORT_SYMBOL(target_wait_for_sess_cmds); -/*	transport_lun_wait_for_tasks(): - * - *	Called from ConfigFS context to stop the passed struct se_cmd to allow - *	an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) -{ -	unsigned long flags; -	int ret = 0; - -	/* -	 * If the frontend has already requested this struct se_cmd to -	 * be stopped, we can safely ignore this struct se_cmd. -	 */ -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (cmd->transport_state & CMD_T_STOP) { -		cmd->transport_state &= ~CMD_T_LUN_STOP; - -		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", -			 cmd->se_tfo->get_task_tag(cmd)); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		transport_cmd_check_stop(cmd, false, false); -		return -EPERM; -	} -	cmd->transport_state |= CMD_T_LUN_FE_STOP; -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	// XXX: audit task_flags checks. -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if ((cmd->transport_state & CMD_T_BUSY) && -	    (cmd->transport_state & CMD_T_SENT)) { -		if (!target_stop_cmd(cmd, &flags)) -			ret++; -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	pr_debug("ConfigFS: cmd: %p stop tasks ret:" -			" %d\n", cmd, ret); -	if (!ret) { -		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", -				cmd->se_tfo->get_task_tag(cmd)); -		wait_for_completion(&cmd->transport_lun_stop_comp); -		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", -				cmd->se_tfo->get_task_tag(cmd)); -	} - -	return 0; -} - -static void __transport_clear_lun_from_sessions(struct se_lun *lun) -{ -	struct se_cmd *cmd = NULL; -	unsigned long lun_flags, cmd_flags; -	/* -	 * Do exception processing and return CHECK_CONDITION status to the -	 * Initiator Port. -	 */ -	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -	while (!list_empty(&lun->lun_cmd_list)) { -		cmd = list_first_entry(&lun->lun_cmd_list, -		       struct se_cmd, se_lun_node); -		list_del_init(&cmd->se_lun_node); - -		spin_lock(&cmd->t_state_lock); -		pr_debug("SE_LUN[%d] - Setting cmd->transport" -			"_lun_stop for  ITT: 0x%08x\n", -			cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); -		cmd->transport_state |= CMD_T_LUN_STOP; -		spin_unlock(&cmd->t_state_lock); - -		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - -		if (!cmd->se_lun) { -			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", -				cmd->se_tfo->get_task_tag(cmd), -				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); -			BUG(); -		} -		/* -		 * If the Storage engine still owns the iscsi_cmd_t, determine -		 * and/or stop its context. -		 */ -		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" -			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); - -		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { -			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -			continue; -		} - -		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" -			"_wait_for_tasks(): SUCCESS\n", -			cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); - -		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); -		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); -			goto check_cond; -		} -		cmd->transport_state &= ~CMD_T_DEV_ACTIVE; -		target_remove_from_state_list(cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - -		/* -		 * The Storage engine stopped this struct se_cmd before it was -		 * send to the fabric frontend for delivery back to the -		 * Initiator Node.  Return this SCSI CDB back with an -		 * CHECK_CONDITION status. -		 */ -check_cond: -		transport_send_check_condition_and_sense(cmd, -				TCM_NON_EXISTENT_LUN, 0); -		/* -		 *  If the fabric frontend is waiting for this iscsi_cmd_t to -		 * be released, notify the waiting thread now that LU has -		 * finished accessing it. -		 */ -		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); -		if (cmd->transport_state & CMD_T_LUN_FE_STOP) { -			pr_debug("SE_LUN[%d] - Detected FE stop for" -				" struct se_cmd: %p ITT: 0x%08x\n", -				lun->unpacked_lun, -				cmd, cmd->se_tfo->get_task_tag(cmd)); - -			spin_unlock_irqrestore(&cmd->t_state_lock, -					cmd_flags); -			transport_cmd_check_stop(cmd, false, false); -			complete(&cmd->transport_lun_fe_stop_comp); -			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -			continue; -		} -		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", -			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - -		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); -		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -	} -	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); -} - -static int transport_clear_lun_thread(void *p) +static int transport_clear_lun_ref_thread(void *p)  {  	struct se_lun *lun = p; -	__transport_clear_lun_from_sessions(lun); +	percpu_ref_kill(&lun->lun_ref); + +	wait_for_completion(&lun->lun_ref_comp);  	complete(&lun->lun_shutdown_comp);  	return 0;  } -int transport_clear_lun_from_sessions(struct se_lun *lun) +int transport_clear_lun_ref(struct se_lun *lun)  {  	struct task_struct *kt; -	kt = kthread_run(transport_clear_lun_thread, lun, +	kt = kthread_run(transport_clear_lun_ref_thread, lun,  			"tcm_cl_%u", lun->unpacked_lun);  	if (IS_ERR(kt)) {  		pr_err("Unable to start clear_lun thread\n"); @@ -2585,43 +2539,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return false;  	} -	/* -	 * If we are already stopped due to an external event (ie: LUN shutdown) -	 * sleep until the connection can have the passed struct se_cmd back. -	 * The cmd->transport_lun_stopped_sem will be upped by -	 * transport_clear_lun_from_sessions() once the ConfigFS context caller -	 * has completed its operation on the struct se_cmd. -	 */ -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("wait_for_tasks: Stopping" -			" wait_for_completion(&cmd->t_tasktransport_lun_fe" -			"_stop_comp); for ITT: 0x%08x\n", -			cmd->se_tfo->get_task_tag(cmd)); -		/* -		 * There is a special case for WRITES where a FE exception + -		 * LUN shutdown means ConfigFS context is still sleeping on -		 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). -		 * We go ahead and up transport_lun_stop_comp just to be sure -		 * here. -		 */ -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->transport_lun_stop_comp); -		wait_for_completion(&cmd->transport_lun_fe_stop_comp); -		spin_lock_irqsave(&cmd->t_state_lock, flags); - -		target_remove_from_state_list(cmd); -		/* -		 * At this point, the frontend who was the originator of this -		 * struct se_cmd, now owns the structure and can be released through -		 * normal means below. -		 */ -		pr_debug("wait_for_tasks: Stopped" -			" wait_for_completion(&cmd->t_tasktransport_lun_fe_" -			"stop_comp); for ITT: 0x%08x\n", -			cmd->se_tfo->get_task_tag(cmd)); - -		cmd->transport_state &= ~CMD_T_LUN_STOP; -	}  	if (!(cmd->transport_state & CMD_T_ACTIVE)) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2663,6 +2580,19 @@ static int transport_get_sense_codes(  	return 0;  } +static +void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) +{ +	/* Place failed LBA in sense data information descriptor 0. */ +	buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; +	buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ +	buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; +	buffer[SPC_VALIDITY_OFFSET] = 0x80; + +	/* Descriptor Information: failing sector */ +	put_unaligned_be64(bad_sector, &buffer[12]); +} +  int  transport_send_check_condition_and_sense(struct se_cmd *cmd,  		sense_reason_t reason, int from_transport) @@ -2856,6 +2786,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,  		buffer[SPC_ASC_KEY_OFFSET] = 0x1d;  		buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;  		break; +	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK GUARD CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break; +	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break; +	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break;  	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:  	default:  		/* CURRENT ERROR */ @@ -2893,13 +2856,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)  	if (!(cmd->transport_state & CMD_T_ABORTED))  		return 0; -	if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) +	/* +	 * If cmd has been aborted but either no status is to be sent or it has +	 * already been sent, just return +	 */ +	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))  		return 1;  	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",  		 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -	cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; +	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; +	cmd->scsi_status = SAM_STAT_TASK_ABORTED;  	trace_target_cmd_complete(cmd);  	cmd->se_tfo->queue_status(cmd); @@ -2912,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd)  	unsigned long flags;  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { +	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return;  	} @@ -2927,7 +2895,9 @@ void transport_send_task_abort(struct se_cmd *cmd)  	if (cmd->data_direction == DMA_TO_DEVICE) {  		if (cmd->se_tfo->write_pending_status(cmd) != 0) {  			cmd->transport_state |= CMD_T_ABORTED; -			smp_mb__after_atomic_inc(); +			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; +			smp_mb__after_atomic(); +			return;  		}  	}  	cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -2985,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)  int transport_generic_handle_tmr(  	struct se_cmd *cmd)  { +	unsigned long flags; + +	spin_lock_irqsave(&cmd->t_state_lock, flags); +	cmd->transport_state |= CMD_T_ACTIVE; +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); +  	INIT_WORK(&cmd->work, target_tmr_work);  	queue_work(cmd->se_dev->tmr_wq, &cmd->work);  	return 0;  | 
