aboutsummaryrefslogtreecommitdiff
path: root/drivers/target/target_core_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r--drivers/target/target_core_transport.c240
1 files changed, 211 insertions, 29 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da0f62..7fa62fc93e0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
"mem_t failed\n");
goto out_free_tg_pt_gp_cache;
}
+ t10_alua_lba_map_cache = kmem_cache_create(
+ "t10_alua_lba_map_cache",
+ sizeof(struct t10_alua_lba_map),
+ __alignof__(struct t10_alua_lba_map), 0, NULL);
+ if (!t10_alua_lba_map_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_"
+ "cache failed\n");
+ goto out_free_tg_pt_gp_mem_cache;
+ }
+ t10_alua_lba_map_mem_cache = kmem_cache_create(
+ "t10_alua_lba_map_mem_cache",
+ sizeof(struct t10_alua_lba_map_member),
+ __alignof__(struct t10_alua_lba_map_member), 0, NULL);
+ if (!t10_alua_lba_map_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+ "cache failed\n");
+ goto out_free_lba_map_cache;
+ }
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
if (!target_completion_wq)
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_lba_map_mem_cache;
return 0;
+out_free_lba_map_mem_cache:
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+ kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_mem_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_lba_map_cache);
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
@@ -209,7 +235,7 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -225,6 +251,7 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
+ se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
@@ -262,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
EXPORT_SYMBOL(transport_alloc_session_tags);
struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size)
+ unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
- se_sess = transport_init_session();
+ se_sess = transport_init_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -476,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess)
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context.
*/
- if (se_nacl && comp_nacl == true)
+ if (se_nacl && comp_nacl)
target_put_nacl(se_nacl);
transport_free_session(se_sess);
@@ -534,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return 1;
}
@@ -568,14 +596,24 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- if (!lun || !cmd->lun_ref_active)
+ if (!lun)
return;
- percpu_ref_put(&lun->lun_ref);
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_lun_remove_cmd(cmd);
+ /*
+ * Allow the fabric driver to unmap any resources before
+ * releasing the descriptor via TFO->release_cmd()
+ */
+ if (remove)
+ cmd->se_tfo->aborted_task(cmd);
+
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
@@ -642,9 +680,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -652,9 +687,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
if (cmd->transport_state & CMD_T_ABORTED &&
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -668,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
}
EXPORT_SYMBOL(target_complete_cmd);
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ cmd->residual_count += cmd->data_length - length;
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = cmd->data_length - length;
+ }
+
+ cmd->data_length = length;
+ }
+
+ target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+
static void target_add_to_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -701,7 +753,7 @@ void target_qf_do_work(struct work_struct *work)
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1078,6 +1130,7 @@ void transport_init_se_cmd(
init_completion(&cmd->cmd_wait_comp);
init_completion(&cmd->task_stop_comp);
spin_lock_init(&cmd->t_state_lock);
+ kref_init(&cmd->cmd_kref);
cmd->transport_state = CMD_T_DEV_ACTIVE;
cmd->se_tfo = tfo;
@@ -1113,7 +1166,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Dormant to Active status.
*/
cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
dev->transport->name);
@@ -1284,6 +1337,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sgl_count: scatterlist count for unidirectional mapping
* @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
* @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
*
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1296,7 +1351,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags,
struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count)
{
struct se_portal_group *se_tpg;
sense_reason_t rc;
@@ -1344,6 +1400,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
transport_generic_request_failure(se_cmd, rc);
return 0;
}
+
+ /*
+ * Save pointers for SGLs containing protection information,
+ * if present.
+ */
+ if (sgl_prot_count) {
+ se_cmd->t_prot_sg = sgl_prot;
+ se_cmd->t_prot_nents = sgl_prot_count;
+ }
+
/*
* When a non zero sgl_count has been passed perform SGL passthrough
* mapping for pre-allocated fabric memory instead of having target
@@ -1380,6 +1446,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
return 0;
}
}
+
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
@@ -1419,7 +1486,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
{
return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, NULL, 0);
}
EXPORT_SYMBOL(target_submit_cmd);
@@ -1565,6 +1632,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1653,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
return false;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
" se_ordered_id: %u\n",
@@ -1671,7 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
* For SIMPLE and UNTAGGED Task Attribute commands
*/
atomic_inc(&dev->simple_cmds);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
break;
}
@@ -1708,13 +1778,22 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return;
}
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
+ /*
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
+ */
+ if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
+ }
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
@@ -1767,7 +1846,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1779,7 +1858,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
atomic_dec(&dev->dev_ordered_sync);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1838,12 +1917,27 @@ static void transport_handle_queue_full(
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue);
}
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+ sense_reason_t rc;
+
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ rc = sbc_dif_read_strip(cmd);
+ if (rc) {
+ cmd->pi_err = rc;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void target_complete_ok_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1908,6 +2002,22 @@ static void target_complete_ok_work(struct work_struct *work)
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * Perform READ_STRIP of PI using software emulation when
+ * backend had PI enabled, if the transport will not be
+ * performing hardware READ_STRIP offload.
+ */
+ if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+ target_check_read_strip(cmd)) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->pi_err, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2000,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
+
+ transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
}
/**
@@ -2163,6 +2277,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+ if (cmd->prot_op != TARGET_PROT_NORMAL) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg,
+ &cmd->t_prot_nents,
+ cmd->prot_length, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag);
if (ret < 0)
@@ -2253,13 +2375,12 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
unsigned long flags;
int ret = 0;
- kref_init(&se_cmd->cmd_kref);
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref == true) {
+ if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
@@ -2303,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
+ if (!se_sess) {
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+ }
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
&se_sess->sess_cmd_lock);
}
@@ -2455,6 +2580,19 @@ static int transport_get_sense_codes(
return 0;
}
+static
+void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
+{
+ /* Place failed LBA in sense data information descriptor 0. */
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
+ buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
+ buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
+ buffer[SPC_VALIDITY_OFFSET] = 0x80;
+
+ /* Descriptor Information: failing sector */
+ put_unaligned_be64(bad_sector, &buffer[12]);
+}
+
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
@@ -2648,6 +2786,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
break;
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK GUARD CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
default:
/* CURRENT ERROR */
@@ -2685,13 +2856,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ /*
+ * If cmd has been aborted but either no status is to be sent or it has
+ * already been sent, just return
+ */
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
return 1;
pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
@@ -2705,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
@@ -2720,7 +2895,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
- smp_mb__after_atomic_inc();
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ smp_mb__after_atomic();
return;
}
}
@@ -2779,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->transport_state |= CMD_T_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;