aboutsummaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/Kconfig3
-rw-r--r--drivers/target/Makefile7
-rw-r--r--drivers/target/iscsi/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c2960
-rw-r--r--drivers/target/iscsi/iscsi_target.h20
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c132
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c539
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h134
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c157
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c94
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c55
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c814
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c588
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h11
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c236
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h27
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c202
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c47
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c98
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c180
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c180
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c55
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c390
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h19
-rw-r--r--drivers/target/loopback/tcm_loop.c622
-rw-r--r--drivers/target/loopback/tcm_loop.h11
-rw-r--r--drivers/target/sbp/Kconfig11
-rw-r--r--drivers/target/sbp/Makefile1
-rw-r--r--drivers/target/sbp/sbp_target.c2614
-rw-r--r--drivers/target/sbp/sbp_target.h251
-rw-r--r--drivers/target/target_core_alua.c1240
-rw-r--r--drivers/target/target_core_alua.h57
-rw-r--r--drivers/target/target_core_cdb.c1279
-rw-r--r--drivers/target/target_core_configfs.c1167
-rw-r--r--drivers/target/target_core_device.c1237
-rw-r--r--drivers/target/target_core_fabric_configfs.c139
-rw-r--r--drivers/target/target_core_fabric_lib.c11
-rw-r--r--drivers/target/target_core_file.c876
-rw-r--r--drivers/target/target_core_file.h22
-rw-r--r--drivers/target/target_core_hba.c9
-rw-r--r--drivers/target/target_core_iblock.c779
-rw-r--r--drivers/target/target_core_iblock.h10
-rw-r--r--drivers/target/target_core_internal.h60
-rw-r--r--drivers/target/target_core_pr.c1672
-rw-r--r--drivers/target/target_core_pr.h17
-rw-r--r--drivers/target/target_core_pscsi.c628
-rw-r--r--drivers/target/target_core_pscsi.h5
-rw-r--r--drivers/target/target_core_rd.c550
-rw-r--r--drivers/target/target_core_rd.h26
-rw-r--r--drivers/target/target_core_sbc.c1336
-rw-r--r--drivers/target/target_core_spc.c1445
-rw-r--r--drivers/target/target_core_stat.c377
-rw-r--r--drivers/target/target_core_tmr.c229
-rw-r--r--drivers/target/target_core_tpg.c243
-rw-r--r--drivers/target/target_core_transport.c4095
-rw-r--r--drivers/target/target_core_ua.c39
-rw-r--r--drivers/target/target_core_ua.h4
-rw-r--r--drivers/target/target_core_xcopy.c1081
-rw-r--r--drivers/target/target_core_xcopy.h62
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h26
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c238
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c138
-rw-r--r--drivers/target/tcm_fc/tfc_io.c57
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c87
76 files changed, 17606 insertions, 12219 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index b28794b7212..dc2d84ac5a0 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -3,6 +3,7 @@ menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
+ select CRC_T10DIF
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +14,7 @@ if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ select BLK_DEV_INTEGRITY
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
@@ -32,5 +34,6 @@ config TCM_PSCSI
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
source "drivers/target/iscsi/Kconfig"
+source "drivers/target/sbp/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 62e54053bcd..85b012d2f89 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -9,10 +9,12 @@ target_core_mod-y := target_core_configfs.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
- target_core_cdb.o \
+ target_core_sbc.o \
+ target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
- target_core_stat.o
+ target_core_stat.o \
+ target_core_xcopy.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
@@ -25,3 +27,4 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
obj-$(CONFIG_TCM_FC) += tcm_fc/
obj-$(CONFIG_ISCSI_TARGET) += iscsi/
+obj-$(CONFIG_SBP_TARGET) += sbp/
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 5b9a2cf7f0a..13a92403fe3 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -15,6 +15,7 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_util.o \
iscsi_target.o \
iscsi_target_configfs.o \
- iscsi_target_stat.o
+ iscsi_target_stat.o \
+ iscsi_target_transport.o
obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index bf0e8e75a27..1f4c794f5fc 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to the iSCSI Target Core Driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -23,11 +21,14 @@
#include <linux/crypto.h>
#include <linux/completion.h>
#include <linux/module.h>
+#include <linux/idr.h>
#include <asm/unaligned.h>
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -46,10 +47,12 @@
#include "iscsi_target_device.h"
#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_transport.h>
+
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
struct idr sess_idr;
@@ -58,15 +61,13 @@ spinlock_t sess_idr_lock;
struct iscsit_global *iscsit_global;
-struct kmem_cache *lio_cmd_cache;
struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
struct kmem_cache *lio_r2t_cache;
static int iscsit_handle_immediate_data(struct iscsi_cmd *,
- unsigned char *buf, u32);
-static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+ struct iscsi_scsi_req *, u32);
struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
{
@@ -141,23 +142,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
spin_lock_init(&tiqn->login_stats.lock);
spin_lock_init(&tiqn->logout_stats.lock);
- if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for tiqn_idr failed\n");
- kfree(tiqn);
- return ERR_PTR(-ENOMEM);
- }
tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+ idr_preload(GFP_KERNEL);
spin_lock(&tiqn_lock);
- ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+
+ ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
if (ret < 0) {
- pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
spin_unlock(&tiqn_lock);
+ idr_preload_end();
kfree(tiqn);
return ERR_PTR(ret);
}
+ tiqn->tiqn_index = ret;
list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+
spin_unlock(&tiqn_lock);
+ idr_preload_end();
pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
@@ -215,11 +217,6 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
spin_unlock_bh(&np->np_thread_lock);
return -1;
}
- if (np->np_login_tpg) {
- pr_err("np->np_login_tpg() is not NULL!\n");
- spin_unlock_bh(&np->np_thread_lock);
- return -1;
- }
spin_unlock_bh(&np->np_thread_lock);
/*
* Determine if the portal group is accepting storage traffic.
@@ -234,26 +231,38 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
/*
* Here we serialize access across the TIQN+TPG Tuple.
*/
- ret = mutex_lock_interruptible(&tpg->np_login_lock);
+ ret = down_interruptible(&tpg->np_login_sem);
if ((ret != 0) || signal_pending(current))
return -1;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = tpg;
- spin_unlock_bh(&np->np_thread_lock);
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ up(&tpg->np_login_sem);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
return 0;
}
-int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+void iscsit_login_kref_put(struct kref *kref)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(kref,
+ struct iscsi_tpg_np, tpg_np_kref);
+
+ complete(&tpg_np->tpg_np_comp);
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
{
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = NULL;
- spin_unlock_bh(&np->np_thread_lock);
+ up(&tpg->np_login_sem);
- mutex_unlock(&tpg->np_login_lock);
+ if (tpg_np)
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
if (tiqn)
iscsit_put_tiqn_for_login(tiqn);
@@ -261,60 +270,73 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
return 0;
}
-static struct iscsi_np *iscsit_get_np(
+bool iscsit_check_np_match(
struct __kernel_sockaddr_storage *sockaddr,
+ struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
- struct iscsi_np *np;
- int ip_match = 0;
+ bool ip_match = false;
u16 port;
- spin_lock_bh(&np_lock);
- list_for_each_entry(np, &g_np_list, np_list) {
- spin_lock(&np->np_thread_lock);
- if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
- spin_unlock(&np->np_thread_lock);
- continue;
- }
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = true;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
- if (sockaddr->ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)sockaddr;
- sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+ if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
+ ip_match = true;
- if (!memcmp(&sock_in6->sin6_addr.in6_u,
- &sock_in6_e->sin6_addr.in6_u,
- sizeof(struct in6_addr)))
- ip_match = 1;
+ port = ntohs(sock_in->sin_port);
+ }
- port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)sockaddr;
- sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+ if (ip_match && (np->np_port == port) &&
+ (np->np_network_transport == network_transport))
+ return true;
- if (sock_in->sin_addr.s_addr ==
- sock_in_e->sin_addr.s_addr)
- ip_match = 1;
+ return false;
+}
- port = ntohs(sock_in->sin_port);
+/*
+ * Called with mutex np_lock held
+ */
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ bool match;
+
+ list_for_each_entry(np, &g_np_list, np_list) {
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ continue;
}
- if ((ip_match == 1) && (np->np_port == port) &&
- (np->np_network_transport == network_transport)) {
+ match = iscsit_check_np_match(sockaddr, np, network_transport);
+ if (match) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
- spin_unlock(&np->np_thread_lock);
- spin_unlock_bh(&np_lock);
+ spin_unlock_bh(&np->np_thread_lock);
return np;
}
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
}
- spin_unlock_bh(&np_lock);
return NULL;
}
@@ -328,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
+
+ mutex_lock(&np_lock);
+
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
- if (np)
+ if (np) {
+ mutex_unlock(&np_lock);
return np;
+ }
np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
if (!np) {
pr_err("Unable to allocate memory for struct iscsi_np\n");
+ mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
@@ -360,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
@@ -368,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
@@ -378,14 +408,13 @@ struct iscsi_np *iscsit_add_np(
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
- spin_lock_bh(&np_lock);
list_add_tail(&np->np_list, &g_np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
- "TCP" : "SCTP");
+ np->np_ip, np->np_port, np->np_transport->name);
return np;
}
@@ -393,20 +422,10 @@ struct iscsi_np *iscsit_add_np(
int iscsit_reset_np_thread(
struct iscsi_np *np,
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
spin_lock_bh(&np->np_thread_lock);
- if (tpg && tpg_np) {
- /*
- * The reset operation need only be performed when the
- * passed struct iscsi_portal_group has a login in progress
- * to one of the network portals.
- */
- if (tpg_np->tpg_np->np_login_tpg != tpg) {
- spin_unlock_bh(&np->np_thread_lock);
- return 0;
- }
- }
if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
return 0;
@@ -421,32 +440,27 @@ int iscsit_reset_np_thread(
}
spin_unlock_bh(&np->np_thread_lock);
- return 0;
-}
-
-int iscsit_del_np_comm(struct iscsi_np *np)
-{
- if (!np->np_socket)
- return 0;
+ if (tpg_np && shutdown) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
- /*
- * Some network transports allocate their own struct sock->file,
- * see if we need to free any additional allocated resources.
- */
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(np->np_socket->file);
- np->np_socket->file = NULL;
+ wait_for_completion(&tpg_np->tpg_np_comp);
}
- sock_release(np->np_socket);
return 0;
}
+static void iscsit_free_np(struct iscsi_np *np)
+{
+ if (np->np_socket)
+ sock_release(np->np_socket);
+}
+
int iscsit_del_np(struct iscsi_np *np)
{
spin_lock_bh(&np->np_thread_lock);
np->np_exports--;
if (np->np_exports) {
+ np->enabled = true;
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
@@ -460,21 +474,67 @@ int iscsit_del_np(struct iscsi_np *np)
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
+ np->np_thread = NULL;
}
- iscsit_del_np_comm(np);
- spin_lock_bh(&np_lock);
+ np->np_transport->iscsit_free_np(np);
+
+ mutex_lock(&np_lock);
list_del(&np->np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
- "TCP" : "SCTP");
+ np->np_ip, np->np_port, np->np_transport->name);
+ iscsit_put_transport(np->np_transport);
kfree(np);
return 0;
}
+static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+
+static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ __iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport iscsi_target_transport = {
+ .name = "iSCSI/TCP",
+ .transport_type = ISCSI_TCP,
+ .owner = NULL,
+ .iscsit_setup_np = iscsit_setup_np,
+ .iscsit_accept_np = iscsit_accept_np,
+ .iscsit_free_np = iscsit_free_np,
+ .iscsit_get_login_rx = iscsit_get_login_rx,
+ .iscsit_put_login_tx = iscsit_put_login_tx,
+ .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
+ .iscsit_immediate_queue = iscsit_immediate_queue,
+ .iscsit_response_queue = iscsit_response_queue,
+ .iscsit_queue_data_in = iscsit_queue_rsp,
+ .iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
+};
+
static int __init iscsi_target_init_module(void)
{
int ret = 0;
@@ -506,22 +566,13 @@ static int __init iscsi_target_init_module(void)
goto ts_out1;
}
- lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
- sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
- 0, NULL);
- if (!lio_cmd_cache) {
- pr_err("Unable to kmem_cache_create() for"
- " lio_cmd_cache\n");
- goto ts_out2;
- }
-
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
pr_err("nable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto cmd_out;
+ goto ts_out2;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -551,6 +602,8 @@ static int __init iscsi_target_init_module(void)
goto ooo_out;
}
+ iscsit_register_transport(&iscsi_target_transport);
+
if (iscsit_load_discovery_tpg() < 0)
goto r2t_out;
@@ -563,8 +616,6 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-cmd_out:
- kmem_cache_destroy(lio_cmd_cache);
ts_out2:
iscsi_deallocate_thread_sets();
ts_out1:
@@ -581,7 +632,7 @@ static void __exit iscsi_target_cleanup_module(void)
iscsi_deallocate_thread_sets();
iscsi_thread_set_free();
iscsit_release_discovery_tpg();
- kmem_cache_destroy(lio_cmd_cache);
+ iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_qr_cache);
kmem_cache_destroy(lio_dr_cache);
kmem_cache_destroy(lio_ooo_cache);
@@ -592,58 +643,44 @@ static void __exit iscsi_target_cleanup_module(void)
kfree(iscsit_global);
}
-int iscsit_add_reject(
+static int iscsit_add_reject(
+ struct iscsi_conn *conn,
u8 reason,
- int fail_conn,
- unsigned char *buf,
- struct iscsi_conn *conn)
+ unsigned char *buf)
{
struct iscsi_cmd *cmd;
- struct iscsi_reject *hdr;
- int ret;
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
return -1;
cmd->iscsi_opcode = ISCSI_OP_REJECT;
- if (fail_conn)
- cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
- hdr = (struct iscsi_reject *) cmd->pdu;
- hdr->reason = reason;
+ cmd->reject_reason = reason;
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
return -1;
}
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
- ret = wait_for_completion_interruptible(&cmd->reject_comp);
- if (ret != 0)
- return -1;
-
- return (!fail_conn) ? 0 : -1;
+ return -1;
}
-int iscsit_add_reject_from_cmd(
+static int iscsit_add_reject_from_cmd(
+ struct iscsi_cmd *cmd,
u8 reason,
- int fail_conn,
- int add_to_conn,
- unsigned char *buf,
- struct iscsi_cmd *cmd)
+ bool add_to_conn,
+ unsigned char *buf)
{
struct iscsi_conn *conn;
- struct iscsi_reject *hdr;
- int ret;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -653,40 +690,48 @@ int iscsit_add_reject_from_cmd(
conn = cmd->conn;
cmd->iscsi_opcode = ISCSI_OP_REJECT;
- if (fail_conn)
- cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
- hdr = (struct iscsi_reject *) cmd->pdu;
- hdr->reason = reason;
+ cmd->reject_reason = reason;
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
- iscsit_release_cmd(cmd);
+ iscsit_free_cmd(cmd, false);
return -1;
}
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
}
cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ /*
+ * Perform the kref_put now if se_cmd has already been setup by
+ * scsit_setup_scsi_cmd()
+ */
+ if (cmd->se_cmd.se_tfo != NULL) {
+ pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ }
+ return -1;
+}
- ret = wait_for_completion_interruptible(&cmd->reject_comp);
- if (ret != 0)
- return -1;
+static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+ unsigned char *buf)
+{
+ return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
+}
- return (!fail_conn) ? 0 : -1;
+int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+{
+ return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
- * kernel sockets to copy data in/out. This handles both pages and slab-allocated
- * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
- * either case (see iscsit_alloc_buffs)
+ * kernel sockets to copy data in/out.
*/
static int iscsit_map_iovec(
struct iscsi_cmd *cmd,
@@ -699,10 +744,9 @@ static int iscsit_map_iovec(
unsigned int page_off;
/*
- * We have a private mapping of the allocated pages in t_mem_sg.
- * At this point, we also know each contains a page.
+ * We know each entry in t_data_sg contains a page.
*/
- sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+ sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
page_off = (data_offset % PAGE_SIZE);
cmd->first_data_sg = sg;
@@ -738,30 +782,37 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
{
- struct iscsi_cmd *cmd;
+ LIST_HEAD(ack_list);
+ struct iscsi_cmd *cmd, *cmd_p;
conn->exp_statsn = exp_statsn;
+ if (conn->sess->sess_ops->RDMAExtensions)
+ return;
+
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
- (cmd->stat_sn < exp_statsn)) {
+ iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
- iscsit_add_cmd_to_immediate_queue(cmd, conn,
- cmd->i_state);
+ list_move_tail(&cmd->i_conn_node, &ack_list);
continue;
}
spin_unlock(&cmd->istate_lock);
}
spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ iscsit_free_cmd(cmd, false);
+ }
}
static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
{
- u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
- cmd->se_cmd.t_data_nents;
+ u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
iov_count += ISCSI_IOV_DATA_BUFFER;
@@ -775,90 +826,18 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
return 0;
}
-static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
{
- struct scatterlist *sgl;
- u32 length = cmd->se_cmd.data_length;
- int nents = DIV_ROUND_UP(length, PAGE_SIZE);
- int i = 0, ret;
- /*
- * If no SCSI payload is present, allocate the default iovecs used for
- * iSCSI PDU Header
- */
- if (!length)
- return iscsit_allocate_iovecs(cmd);
-
- sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
- if (!sgl)
- return -ENOMEM;
-
- sg_init_table(sgl, nents);
-
- while (length) {
- int buf_size = min_t(int, length, PAGE_SIZE);
- struct page *page;
-
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
- goto page_alloc_failed;
-
- sg_set_page(&sgl[i], page, buf_size, 0);
-
- length -= buf_size;
- i++;
- }
-
- cmd->t_mem_sg = sgl;
- cmd->t_mem_sg_nents = nents;
-
- /* BIDI ops not supported */
-
- /* Tell the core about our preallocated memory */
- transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
- /*
- * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
- * so that cmd->se_cmd.t_tasks_se_num has been set.
- */
- ret = iscsit_allocate_iovecs(cmd);
- if (ret < 0)
- goto page_alloc_failed;
-
- return 0;
-
-page_alloc_failed:
- while (i >= 0) {
- __free_page(sg_page(&sgl[i]));
- i--;
- }
- kfree(cmd->t_mem_sg);
- cmd->t_mem_sg = NULL;
- return -ENOMEM;
-}
-
-static int iscsit_handle_scsi_cmd(
- struct iscsi_conn *conn,
- unsigned char *buf)
-{
- int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
- int dump_immediate_data = 0, send_check_condition = 0, payload_length;
- struct iscsi_cmd *cmd = NULL;
+ int data_direction, payload_length;
struct iscsi_scsi_req *hdr;
+ int iscsi_task_attr;
+ int sam_task_attr;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->cmd_pdus++;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->num_cmds++;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->cmd_pdus);
hdr = (struct iscsi_scsi_req *) buf;
payload_length = ntoh24(hdr->dlength);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->data_length = be32_to_cpu(hdr->data_length);
- hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
/* FIXME; Add checks for AdditionalHeaderSegment */
@@ -866,108 +845,116 @@ static int iscsit_handle_scsi_cmd(
!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
" not set. Bad iSCSI Initiator.\n");
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
- * that adds support for RESERVE/RELEASE. There is a bug
- * add with this new functionality that sets R/W bits when
- * neither CDB carries any READ or WRITE datapayloads.
+ * From RFC-3720 Section 10.3.1:
+ *
+ * "Either or both of R and W MAY be 1 when either the
+ * Expected Data Transfer Length and/or Bidirectional Read
+ * Expected Data Transfer Length are 0"
+ *
+ * For this case, go ahead and clear the unnecssary bits
+ * to avoid any confusion with ->data_direction.
*/
- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- goto done;
- }
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
-done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
" MUST be set if Expected Data Transfer Length is not 0."
" Bad iSCSI Initiator\n");
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
pr_err("Bidirectional operations not supported!\n");
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
pr_err("Illegally set Immediate Bit in iSCSI Initiator"
" Scsi Command PDU.\n");
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (payload_length && !conn->sess->sess_ops->ImmediateData) {
pr_err("ImmediateData=No but DataSegmentLength=%u,"
" protocol error.\n", payload_length);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
- if ((hdr->data_length == payload_length) &&
+ if ((be32_to_cpu(hdr->data_length) == payload_length) &&
(!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
pr_err("Expected Data Transfer Length and Length of"
" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
" bit is not set protocol error\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
- if (payload_length > hdr->data_length) {
+ if (payload_length > be32_to_cpu(hdr->data_length)) {
pr_err("DataSegmentLength: %u is greater than"
" EDTL: %u, protocol error.\n", payload_length,
hdr->data_length);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
- if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
- " MaxRecvDataSegmentLength: %u, protocol error.\n",
- payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ " MaxXmitDataSegmentLength: %u, protocol error.\n",
+ payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
pr_err("DataSegmentLength: %u is greater than"
" FirstBurstLength: %u, protocol error.\n",
payload_length, conn->sess->sess_ops->FirstBurstLength);
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
- cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
- (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
- if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
- buf, conn);
-
- pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
- " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
- hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+ cmd->data_direction = data_direction;
+ iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
+ /*
+ * Figure out the SAM Task Attribute for the incoming SCSI CDB
+ */
+ if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+ (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+ sam_task_attr = MSG_SIMPLE_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+ sam_task_attr = MSG_ORDERED_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+ sam_task_attr = MSG_HEAD_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+ sam_task_attr = MSG_ACA_TAG;
+ else {
+ pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+ " MSG_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = MSG_SIMPLE_TAG;
+ }
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
cmd->i_state = ISTATE_NEW_CMD;
@@ -987,81 +974,85 @@ done:
spin_unlock_bh(&conn->sess->ttt_lock);
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
cmd->targ_xfer_tag = 0xFFFFFFFF;
- cmd->cmd_sn = hdr->cmdsn;
- cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->first_burst_len = payload_length;
- if (cmd->data_direction == DMA_FROM_DEVICE) {
+ if (!conn->sess->sess_ops->RDMAExtensions &&
+ cmd->data_direction == DMA_FROM_DEVICE) {
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
if (!dr)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
iscsit_attach_datain_req(cmd, dr);
}
/*
- * The CDB is going to an se_device_t.
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
- get_unaligned_le64(&hdr->lun));
- if (ret < 0) {
- if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
- pr_debug("Responding to non-acl'ed,"
- " non-existent or non-exported iSCSI LUN:"
- " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+ cmd->data_direction, sam_task_attr,
+ cmd->sense_buffer + 2);
+
+ pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+ hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ conn->cid);
+
+ target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
+ if (cmd->sense_reason)
+ goto attach_cmd;
+
+ cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
- send_check_condition = 1;
+
goto attach_cmd;
}
- /*
- * The Initiator Node has access to the LUN (the addressing method
- * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
- * allocate 1->N transport tasks (depending on sector count and
- * maximum request size the physical HBA(s) can handle.
- */
- transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
- if (transport_ret == -ENOMEM) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
- } else if (transport_ret == -EINVAL) {
- /*
- * Unsupported SAM Opcode. CHECK_CONDITION will be sent
- * in iscsit_execute_cmd() during the CmdSN OOO Execution
- * Mechinism.
- */
- send_check_condition = 1;
- } else {
- cmd->data_length = cmd->se_cmd.data_length;
- if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
attach_cmd:
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
- /*
- * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
- * also call iscsit_allocate_iovecs()
- */
- ret = iscsit_alloc_buffs(cmd);
- if (ret < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 0, buf, cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
+
+void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
+{
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
+
+int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_scsi_req *hdr)
+{
+ int cmdsn_ret = 0;
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
@@ -1073,57 +1064,68 @@ attach_cmd:
* be acknowledged. (See below)
*/
if (!cmd->immediate_data) {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ }
}
- iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+ iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
/*
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
- if (send_check_condition)
+ if (!cmd->sense_reason && cmd->unsolicited_data)
+ iscsit_set_unsoliticed_dataout(cmd);
+ if (!cmd->sense_reason)
return 0;
- if (cmd->unsolicited_data) {
- iscsit_set_dataout_sequence_values(cmd);
-
- spin_lock_bh(&cmd->dataout_timeout_lock);
- iscsit_start_dataout_timer(cmd, cmd->conn);
- spin_unlock_bh(&cmd->dataout_timeout_lock);
- }
-
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 0;
}
/*
- * Early CHECK_CONDITIONs never make it to the transport processing
- * thread. They are processed in CmdSN order by
- * iscsit_check_received_cmdsn() below.
+ * Early CHECK_CONDITIONs with ImmediateData never make it to command
+ * execution. These exceptions are processed in CmdSN order using
+ * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
*/
- if (send_check_condition) {
- immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
- goto after_immediate_data;
+ if (cmd->sense_reason) {
+ if (cmd->reject_reason)
+ return 0;
+
+ return 1;
}
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
- ret = transport_generic_new_cmd(&cmd->se_cmd);
- if (ret < 0) {
- immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
+ cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
+ if (cmd->sense_reason)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_process_scsi_cmd);
+
+static int
+iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ bool dump_payload)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+ */
+ if (dump_payload)
goto after_immediate_data;
- }
- immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+ immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
after_immediate_data:
if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
/*
@@ -1131,26 +1133,20 @@ after_immediate_data:
* DataCRC, check against ExpCmdSN/MaxCmdSN if
* Immediate Bit is not set.
*/
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- /*
- * Special case for Unsupported SAM WRITE Opcodes
- * and ImmediateData=Yes.
- */
- if (dump_immediate_data) {
- if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
- return -1;
- } else if (cmd->unsolicited_data) {
- iscsit_set_dataout_sequence_values(cmd);
+ cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
- spin_lock_bh(&cmd->dataout_timeout_lock);
- iscsit_start_dataout_timer(cmd, cmd->conn);
- spin_unlock_bh(&cmd->dataout_timeout_lock);
- }
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ int rc;
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ rc = iscsit_dump_data_payload(cmd->conn,
+ cmd->first_burst_len, 1);
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ return rc;
+ } else if (cmd->unsolicited_data)
+ iscsit_set_unsoliticed_dataout(cmd);
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
@@ -1165,13 +1161,46 @@ after_immediate_data:
* CmdSN and issue a retry to plug the sequence.
*/
cmd->i_state = ISTATE_REMOVE;
- iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
return -1;
return 0;
}
+static int
+iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
+ int rc, immed_data;
+ bool dump_payload = false;
+
+ rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
+ if (rc < 0)
+ return 0;
+ /*
+ * Allocation iovecs needed for struct socket operations for
+ * traditional iSCSI block I/O.
+ */
+ if (iscsit_allocate_iovecs(cmd) < 0) {
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+ immed_data = cmd->immediate_data;
+
+ rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+ else if (rc > 0)
+ dump_payload = true;
+
+ if (!immed_data)
+ return 0;
+
+ return iscsit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
static u32 iscsit_do_crypto_hash_sg(
struct hash_desc *hash,
struct iscsi_cmd *cmd,
@@ -1214,7 +1243,7 @@ static u32 iscsit_do_crypto_hash_sg(
static void iscsit_do_crypto_hash_buf(
struct hash_desc *hash,
- unsigned char *buf,
+ const void *buf,
u32 payload_length,
u32 padding,
u8 *pad_bytes,
@@ -1234,48 +1263,30 @@ static void iscsit_do_crypto_hash_buf(
crypto_hash_final(hash, data_crc);
}
-static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+int
+iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
+ struct iscsi_cmd **out_cmd)
{
- int iov_ret, ooo_cmdsn = 0, ret;
- u8 data_crc_failed = 0;
- u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
- u32 rx_size = 0, payload_length;
+ struct iscsi_data *hdr = (struct iscsi_data *)buf;
struct iscsi_cmd *cmd = NULL;
struct se_cmd *se_cmd;
- struct iscsi_data *hdr;
- struct kvec *iov;
- unsigned long flags;
-
- hdr = (struct iscsi_data *) buf;
- payload_length = ntoh24(hdr->dlength);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->ttt = be32_to_cpu(hdr->ttt);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
- hdr->datasn = be32_to_cpu(hdr->datasn);
- hdr->offset = be32_to_cpu(hdr->offset);
+ u32 payload_length = ntoh24(hdr->dlength);
+ int rc;
if (!payload_length) {
- pr_err("DataOUT payload is ZERO, protocol error.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ pr_warn("DataOUT payload is ZERO, ignoring.\n");
+ return 0;
}
/* iSCSI write */
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rx_data_octets += payload_length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(payload_length, &conn->sess->rx_data_octets);
- if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
- " MaxRecvDataSegmentLength: %u\n", payload_length,
- conn->conn_ops->MaxRecvDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ " MaxXmitDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxXmitDataSegmentLength);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}
cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
@@ -1285,7 +1296,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
- hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+ hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
payload_length, conn->cid);
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
@@ -1298,18 +1309,16 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
if (cmd->data_direction != DMA_TO_DEVICE) {
pr_err("Command ITT: 0x%08x received DataOUT for a"
" NON-WRITE command.\n", cmd->init_task_tag);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
}
se_cmd = &cmd->se_cmd;
iscsit_mod_dataout_timer(cmd);
- if ((hdr->offset + payload_length) > cmd->data_length) {
+ if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
pr_err("DataOut Offset: %u, Length %u greater than"
" iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->data_length);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 0, buf, cmd);
+ hdr->offset, payload_length, cmd->se_cmd.data_length);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
if (cmd->unsolicited_data) {
@@ -1329,15 +1338,9 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
*/
/* Something's amiss if we're not in WRITE_PENDING state... */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
if (dump_unsolicited_data) {
/*
@@ -1378,16 +1381,31 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
* Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
* within-command recovery checks before receiving the payload.
*/
- ret = iscsit_check_pre_dataout(cmd, buf);
- if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+ rc = iscsit_check_pre_dataout(cmd, buf);
+ if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
return 0;
- else if (ret == DATAOUT_CANNOT_RECOVER)
+ else if (rc == DATAOUT_CANNOT_RECOVER)
return -1;
+ *out_cmd = cmd;
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_check_dataout_hdr);
+
+static int
+iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_data *hdr)
+{
+ struct kvec *iov;
+ u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int iov_ret, data_crc_failed = 0;
+
rx_size += payload_length;
iov = &cmd->iov_data[0];
- iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
+ iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
+ payload_length);
if (iov_ret < 0)
return -1;
@@ -1418,7 +1436,8 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
u32 data_crc;
data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
- hdr->offset, payload_length, padding,
+ be32_to_cpu(hdr->offset),
+ payload_length, padding,
cmd->pad_bytes);
if (checksum != data_crc) {
@@ -1434,17 +1453,27 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
payload_length);
}
}
+
+ return data_crc_failed;
+}
+
+int
+iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
+ bool data_crc_failed)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int rc, ooo_cmdsn;
/*
* Increment post receive data and CRC values or perform
* within-command recovery.
*/
- ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
- if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+ rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
+ if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
return 0;
- else if (ret == DATAOUT_SEND_R2T) {
+ else if (rc == DATAOUT_SEND_R2T) {
iscsit_set_dataout_sequence_values(cmd);
- iscsit_build_r2ts_for_cmd(cmd, conn, 0);
- } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+ conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
+ } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
/*
* Handle extra special case for out of order
* Unsolicited Data Out.
@@ -1456,53 +1485,81 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_bh(&cmd->istate_lock);
iscsit_stop_dataout_timer(cmd);
- return (!ooo_cmdsn) ? transport_generic_handle_data(
- &cmd->se_cmd) : 0;
+ if (ooo_cmdsn)
+ return 0;
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
} else /* DATAOUT_CANNOT_RECOVER */
return -1;
return 0;
}
+EXPORT_SYMBOL(iscsit_check_dataout_payload);
-static int iscsit_handle_nop_out(
- struct iscsi_conn *conn,
- unsigned char *buf)
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
{
- unsigned char *ping_data = NULL;
- int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
- u32 checksum, data_crc, padding = 0, payload_length;
- u64 lun;
struct iscsi_cmd *cmd = NULL;
- struct kvec *iov = NULL;
- struct iscsi_nopout *hdr;
+ struct iscsi_data *hdr = (struct iscsi_data *)buf;
+ int rc;
+ bool data_crc_failed = false;
- hdr = (struct iscsi_nopout *) buf;
- payload_length = ntoh24(hdr->dlength);
- lun = get_unaligned_le64(&hdr->lun);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->ttt = be32_to_cpu(hdr->ttt);
- hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
+ if (rc < 0)
+ return 0;
+ else if (!cmd)
+ return 0;
+
+ rc = iscsit_get_dataout(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+ else if (rc > 0)
+ data_crc_failed = true;
+
+ return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
+}
- if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_nopout *hdr)
+{
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ }
+
+ if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
}
- if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
- " greater than MaxRecvDataSegmentLength: %u, protocol"
+ " greater than MaxXmitDataSegmentLength: %u, protocol"
" error.\n", payload_length,
- conn->conn_ops->MaxRecvDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ conn->conn_ops->MaxXmitDataSegmentLength);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
}
- pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+ pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
- (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
+ hdr->itt == RESERVED_ITT ? "Response" : "Request",
hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
payload_length);
/*
@@ -1512,26 +1569,99 @@ static int iscsit_handle_nop_out(
* Either way, make sure we allocate an struct iscsi_cmd, as both
* can contain ping data.
*/
- if (hdr->ttt == 0xFFFFFFFF) {
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- return iscsit_add_reject(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
-
+ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
cmd->i_state = ISTATE_SEND_NOPIN;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1 : 0);
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
- cmd->cmd_sn = hdr->cmdsn;
- cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
}
- if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
- rx_size = payload_length;
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_nop_out);
+
+int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_nopout *hdr)
+{
+ struct iscsi_cmd *cmd_p = NULL;
+ int cmdsn_ret = 0;
+ /*
+ * Initiator is expecting a NopIN ping reply..
+ */
+ if (hdr->itt != RESERVED_ITT) {
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ return 0;
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+
+ return 0;
+ }
+ /*
+ * This was a response to a unsolicited NOPIN ping.
+ */
+ if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+ cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
+ if (!cmd_p)
+ return -EINVAL;
+
+ iscsit_stop_nopin_response_timer(conn);
+
+ cmd_p->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
+
+ iscsit_start_nopin_timer(conn);
+ return 0;
+ }
+ /*
+ * Otherwise, initiator is not expecting a NOPIN is response.
+ * Just ignore for now.
+ */
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_process_nop_out);
+
+static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ unsigned char *ping_data = NULL;
+ struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
+ struct kvec *iov = NULL;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int ret;
+
+ ret = iscsit_setup_nop_out(conn, cmd, hdr);
+ if (ret < 0)
+ return 0;
+ /*
+ * Handle NOP-OUT payload for traditional iSCSI sockets
+ */
+ if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ u32 checksum, data_crc, padding = 0;
+ int niov = 0, rx_got, rx_size = payload_length;
+
ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
if (!ping_data) {
pr_err("Unable to allocate memory for"
@@ -1610,97 +1740,27 @@ static int iscsit_handle_nop_out(
pr_debug("Ping Data: \"%s\"\n", ping_data);
}
- if (hdr->itt != 0xFFFFFFFF) {
- if (!cmd) {
- pr_err("Checking CmdSN for NOPOUT,"
- " but cmd is NULL!\n");
- return -1;
- }
- /*
- * Initiator is expecting a NopIN ping reply,
- */
- spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
- spin_unlock_bh(&conn->cmd_lock);
-
- iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
-
- if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
- iscsit_add_cmd_to_response_queue(cmd, conn,
- cmd->i_state);
- return 0;
- }
-
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- ret = 0;
- goto ping_out;
- }
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
-
- return 0;
- }
-
- if (hdr->ttt != 0xFFFFFFFF) {
- /*
- * This was a response to a unsolicited NOPIN ping.
- */
- cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
- if (!cmd)
- return -1;
-
- iscsit_stop_nopin_response_timer(conn);
-
- cmd->i_state = ISTATE_REMOVE;
- iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
- iscsit_start_nopin_timer(conn);
- } else {
- /*
- * Initiator is not expecting a NOPIN is response.
- * Just ignore for now.
- *
- * iSCSI v19-91 10.18
- * "A NOP-OUT may also be used to confirm a changed
- * ExpStatSN if another PDU will not be available
- * for a long time."
- */
- ret = 0;
- goto out;
- }
-
- return 0;
+ return iscsit_process_nop_out(conn, cmd, hdr);
out:
if (cmd)
- iscsit_release_cmd(cmd);
-ping_out:
+ iscsit_free_cmd(cmd, false);
+
kfree(ping_data);
return ret;
}
-static int iscsit_handle_task_mgt_cmd(
- struct iscsi_conn *conn,
- unsigned char *buf)
+int
+iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
{
- struct iscsi_cmd *cmd;
struct se_tmr_req *se_tmr;
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
- u32 payload_length;
- int out_of_order_cmdsn = 0;
- int ret;
+ int out_of_order_cmdsn = 0, ret;
+ bool sess_ref = false;
u8 function;
hdr = (struct iscsi_tm *) buf;
- payload_length = ntoh24(hdr->dlength);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->rtt = be32_to_cpu(hdr->rtt);
- hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
- hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
- hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
function = hdr->flags;
@@ -1711,9 +1771,9 @@ static int iscsit_handle_task_mgt_cmd(
if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
- (hdr->rtt != ISCSI_RESERVED_TAG))) {
+ hdr->rtt != RESERVED_ITT)) {
pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
- hdr->rtt = ISCSI_RESERVED_TAG;
+ hdr->rtt = RESERVED_ITT;
}
if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
@@ -1721,35 +1781,95 @@ static int iscsit_handle_task_mgt_cmd(
pr_err("Task Management Request TASK_REASSIGN not"
" issued as immediate command, bad iSCSI Initiator"
"implementation\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
- (hdr->refcmdsn != ISCSI_RESERVED_TAG))
- hdr->refcmdsn = ISCSI_RESERVED_TAG;
+ be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
+ hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
- cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
- if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ cmd->data_direction = DMA_NONE;
+
+ cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+ if (!cmd->tmr_req) {
+ pr_err("Unable to allocate memory for"
+ " Task Management command!\n");
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ buf);
+ }
+
+ /*
+ * TASK_REASSIGN for ERL=2 / connection stays inside of
+ * LIO-Target $FABRIC_MOD
+ */
+ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+
+ u8 tcm_function;
+ int ret;
+
+ transport_init_se_cmd(&cmd->se_cmd,
+ &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+ sess_ref = true;
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ tcm_function = TMR_ABORT_TASK;
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ tcm_function = TMR_ABORT_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ tcm_function = TMR_CLEAR_ACA;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ tcm_function = TMR_CLEAR_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ tcm_function = TMR_LUN_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ tcm_function = TMR_TARGET_WARM_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ tcm_function = TMR_TARGET_COLD_RESET;
+ break;
+ default:
+ pr_err("Unknown iSCSI TMR Function:"
+ " 0x%02x\n", function);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+
+ ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
+ tcm_function, GFP_KERNEL);
+ if (ret < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+
+ cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
+ }
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
- cmd->cmd_sn = hdr->cmdsn;
- cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
se_tmr = cmd->se_cmd.se_tmr_req;
tmr_req = cmd->tmr_req;
/*
* Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- ret = iscsit_get_lun_for_tmr(cmd,
- get_unaligned_le64(&hdr->lun));
+ ret = transport_lookup_tmr_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
if (ret < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
@@ -1758,10 +1878,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (se_tmr->response)
goto attach;
- }
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
@@ -1770,14 +1888,12 @@ static int iscsit_handle_task_mgt_cmd(
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
@@ -1788,18 +1904,16 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
- buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
break;
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}
@@ -1809,21 +1923,19 @@ static int iscsit_handle_task_mgt_cmd(
se_tmr->call_transport = 1;
attach:
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
- int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
out_of_order_cmdsn = 1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return -1;
}
- iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+ iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
return 0;
@@ -1841,54 +1953,142 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
+ if (sess_ref) {
+ pr_debug("Handle TMR, using sess_ref=true check\n");
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ }
+
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
+EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
/* #warning FIXME: Support Text Command parameters besides SendTargets */
-static int iscsit_handle_text_cmd(
- struct iscsi_conn *conn,
- unsigned char *buf)
+int
+iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_text *hdr)
{
- char *text_ptr, *text_in;
- int cmdsn_ret, niov = 0, rx_got, rx_size;
- u32 checksum = 0, data_crc = 0, payload_length;
- u32 padding = 0, pad_bytes = 0, text_length = 0;
- struct iscsi_cmd *cmd;
- struct kvec iov[3];
- struct iscsi_text *hdr;
-
- hdr = (struct iscsi_text *) buf;
- payload_length = ntoh24(hdr->dlength);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->ttt = be32_to_cpu(hdr->ttt);
- hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ u32 payload_length = ntoh24(hdr->dlength);
- if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("Unable to accept text parameter length: %u"
- "greater than MaxRecvDataSegmentLength %u.\n",
- payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ "greater than MaxXmitDataSegmentLength %u.\n",
+ payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ }
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+ (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+ pr_err("Multi sequence text commands currently not supported\n");
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+ (unsigned char *)hdr);
}
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
hdr->exp_statsn, payload_length);
- rx_size = text_length = payload_length;
- if (text_length) {
- text_in = kzalloc(text_length, GFP_KERNEL);
+ cmd->iscsi_opcode = ISCSI_OP_TEXT;
+ cmd->i_state = ISTATE_SEND_TEXTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
+ cmd->data_direction = DMA_NONE;
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_text_cmd);
+
+int
+iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_text *hdr)
+{
+ unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
+ int cmdsn_ret;
+
+ if (!text_in) {
+ pr_err("Unable to locate text_in buffer for sendtargets"
+ " discovery\n");
+ goto reject;
+ }
+ if (strncmp("SendTargets", text_in, 11) != 0) {
+ pr_err("Received Text Data that is not"
+ " SendTargets, cannot continue.\n");
+ goto reject;
+ }
+ text_ptr = strchr(text_in, '=');
+ if (!text_ptr) {
+ pr_err("No \"=\" separator found in Text Data,"
+ " cannot continue.\n");
+ goto reject;
+ }
+ if (!strncmp("=All", text_ptr, 4)) {
+ cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ } else if (!strncmp("=iqn.", text_ptr, 5) ||
+ !strncmp("=eui.", text_ptr, 5)) {
+ cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ } else {
+ pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ goto reject;
+ }
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+
+ return 0;
+ }
+
+ return iscsit_execute_cmd(cmd, 0);
+
+reject:
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+}
+EXPORT_SYMBOL(iscsit_process_text_cmd);
+
+static int
+iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_text *hdr = (struct iscsi_text *)buf;
+ char *text_in = NULL;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int rx_size, rc;
+
+ rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return 0;
+
+ rx_size = payload_length;
+ if (payload_length) {
+ u32 checksum = 0, data_crc = 0;
+ u32 padding = 0, pad_bytes = 0;
+ int niov = 0, rx_got;
+ struct kvec iov[3];
+
+ text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
pr_err("Unable to allocate memory for"
" incoming text parameters\n");
- return -1;
+ goto reject;
}
+ cmd->text_in_ptr = text_in;
memset(iov, 0, 3 * sizeof(struct kvec));
iov[niov].iov_base = text_in;
- iov[niov++].iov_len = text_length;
+ iov[niov++].iov_len = payload_length;
padding = ((-payload_length) & 3);
if (padding != 0) {
@@ -1905,14 +2105,12 @@ static int iscsit_handle_text_cmd(
}
rx_got = rx_data(conn, &iov[0], niov, rx_size);
- if (rx_got != rx_size) {
- kfree(text_in);
- return -1;
- }
+ if (rx_got != rx_size)
+ goto reject;
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
- text_in, text_length,
+ text_in, payload_length,
padding, (u8 *)&pad_bytes,
(u8 *)&data_crc);
@@ -1924,8 +2122,7 @@ static int iscsit_handle_text_cmd(
pr_err("Unable to recover from"
" Text Data digest failure while in"
" ERL=0.\n");
- kfree(text_in);
- return -1;
+ goto reject;
} else {
/*
* Silently drop this PDU and let the
@@ -1940,68 +2137,22 @@ static int iscsit_handle_text_cmd(
} else {
pr_debug("Got CRC32C DataDigest"
" 0x%08x for %u bytes of text data.\n",
- checksum, text_length);
+ checksum, payload_length);
}
}
- text_in[text_length - 1] = '\0';
+ text_in[payload_length - 1] = '\0';
pr_debug("Successfully read %d bytes of text"
- " data.\n", text_length);
-
- if (strncmp("SendTargets", text_in, 11) != 0) {
- pr_err("Received Text Data that is not"
- " SendTargets, cannot continue.\n");
- kfree(text_in);
- return -1;
- }
- text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- kfree(text_in);
- return -1;
- }
- if (strncmp("=All", text_ptr, 4) != 0) {
- pr_err("Unable to locate All value for"
- " SendTargets key, cannot continue.\n");
- kfree(text_in);
- return -1;
- }
-/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
- kfree(text_in);
+ " data.\n", payload_length);
}
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ return iscsit_process_text_cmd(conn, cmd, hdr);
- cmd->iscsi_opcode = ISCSI_OP_TEXT;
- cmd->i_state = ISTATE_SEND_TEXTRSP;
- cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
- conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- cmd->targ_xfer_tag = 0xFFFFFFFF;
- cmd->cmd_sn = hdr->cmdsn;
- cmd->exp_stat_sn = hdr->exp_statsn;
- cmd->data_direction = DMA_NONE;
-
- spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
- spin_unlock_bh(&conn->cmd_lock);
-
- iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
-
- if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
-
- return 0;
- }
-
- return iscsit_execute_cmd(cmd, 0);
+reject:
+ kfree(cmd->text_in_ptr);
+ cmd->text_in_ptr = NULL;
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
+EXPORT_SYMBOL(iscsit_handle_text_cmd);
int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
@@ -2110,22 +2261,17 @@ int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn
return 0;
}
-static int iscsit_handle_logout_cmd(
- struct iscsi_conn *conn,
- unsigned char *buf)
+int
+iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf)
{
int cmdsn_ret, logout_remove = 0;
u8 reason_code = 0;
- struct iscsi_cmd *cmd;
struct iscsi_logout *hdr;
struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
hdr = (struct iscsi_logout *) buf;
reason_code = (hdr->flags & 0x7f);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->cid = be16_to_cpu(hdr->cid);
- hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
if (tiqn) {
spin_lock(&tiqn->logout_stats.lock);
@@ -2144,22 +2290,18 @@ static int iscsit_handle_logout_cmd(
if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
pr_err("Received logout request on connection that"
" is not in logged in state, ignoring request.\n");
+ iscsit_free_cmd(cmd, false);
return 0;
}
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
- buf, conn);
-
cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
cmd->i_state = ISTATE_SEND_LOGOUTRSP;
cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
cmd->targ_xfer_tag = 0xFFFFFFFF;
- cmd->cmd_sn = hdr->cmdsn;
- cmd->exp_stat_sn = hdr->exp_statsn;
- cmd->logout_cid = hdr->cid;
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
+ cmd->logout_cid = be16_to_cpu(hdr->cid);
cmd->logout_reason = reason_code;
cmd->data_direction = DMA_NONE;
@@ -2169,56 +2311,45 @@ static int iscsit_handle_logout_cmd(
*/
if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
- (hdr->cid == conn->cid)))
+ be16_to_cpu(hdr->cid) == conn->cid))
logout_remove = 1;
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
- iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+ iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
/*
* Immediate commands are executed, well, immediately.
* Non-Immediate Logout Commands are executed in CmdSN order.
*/
- if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ if (cmd->immediate_cmd) {
int ret = iscsit_execute_cmd(cmd, 0);
if (ret < 0)
return ret;
} else {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
logout_remove = 0;
- } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
- }
+ else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
}
return logout_remove;
}
+EXPORT_SYMBOL(iscsit_handle_logout_cmd);
static int iscsit_handle_snack(
struct iscsi_conn *conn,
unsigned char *buf)
{
- u32 unpacked_lun;
- u64 lun;
struct iscsi_snack *hdr;
hdr = (struct iscsi_snack *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
- lun = get_unaligned_le64(&hdr->lun);
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
- hdr->itt = be32_to_cpu(hdr->itt);
- hdr->ttt = be32_to_cpu(hdr->ttt);
- hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
- hdr->begrun = be32_to_cpu(hdr->begrun);
- hdr->runlength = be32_to_cpu(hdr->runlength);
pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
@@ -2228,8 +2359,8 @@ static int iscsit_handle_snack(
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Initiator sent SNACK request while in"
" ErrorRecoveryLevel=0.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}
/*
* SNACK_DATA and SNACK_R2T are both 0, so check which function to
@@ -2238,23 +2369,28 @@ static int iscsit_handle_snack(
switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
case 0:
return iscsit_handle_recovery_datain_or_r2t(conn, buf,
- hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
+ hdr->itt,
+ be32_to_cpu(hdr->ttt),
+ be32_to_cpu(hdr->begrun),
+ be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_STATUS:
- return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
- hdr->begrun, hdr->runlength);
+ return iscsit_handle_status_snack(conn, hdr->itt,
+ be32_to_cpu(hdr->ttt),
+ be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
- return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
- hdr->runlength);
+ return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
+ be32_to_cpu(hdr->begrun),
+ be32_to_cpu(hdr->runlength));
case ISCSI_FLAG_SNACK_TYPE_RDATA:
/* FIXME: Support R-Data SNACK */
pr_err("R-Data SNACK Not Supported.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
default:
pr_err("Unknown SNACK type 0x%02x, protocol"
" error.\n", hdr->flags & 0x0f);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}
return 0;
@@ -2272,7 +2408,7 @@ static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
static int iscsit_handle_immediate_data(
struct iscsi_cmd *cmd,
- unsigned char *buf,
+ struct iscsi_scsi_req *hdr,
u32 length)
{
int iov_ret, rx_got = 0, rx_size = 0;
@@ -2326,14 +2462,14 @@ static int iscsit_handle_immediate_data(
pr_err("Unable to recover from"
" Immediate Data digest failure while"
" in ERL=0.\n");
- iscsit_add_reject_from_cmd(
+ iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
- 1, 0, buf, cmd);
+ (unsigned char *)hdr);
return IMMEDIATE_DATA_CANNOT_RECOVER;
} else {
- iscsit_add_reject_from_cmd(
+ iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
- 0, 0, buf, cmd);
+ (unsigned char *)hdr);
return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
}
} else {
@@ -2345,7 +2481,7 @@ static int iscsit_handle_immediate_data(
cmd->write_data_done += length;
- if (cmd->write_data_done == cmd->data_length) {
+ if (cmd->write_data_done == cmd->se_cmd.data_length) {
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -2364,6 +2500,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd;
struct iscsi_conn *conn_p;
+ bool found = false;
/*
* Only send a Asynchronous Message on connections whos network
@@ -2372,14 +2509,15 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
iscsit_inc_conn_usage_count(conn_p);
+ found = true;
break;
}
}
- if (!conn_p)
+ if (!found)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
@@ -2390,7 +2528,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
cmd->i_state = ISTATE_SEND_ASYNCMSG;
spin_lock_bh(&conn_p->cmd_lock);
- list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
spin_unlock_bh(&conn_p->cmd_lock);
iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
@@ -2409,7 +2547,7 @@ static int iscsit_send_conn_drop_async_message(
hdr = (struct iscsi_async *) cmd->pdu;
hdr->opcode = ISCSI_OP_ASYNC_EVENT;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
- cmd->init_task_tag = 0xFFFFFFFF;
+ cmd->init_task_tag = RESERVED_ITT;
cmd->targ_xfer_tag = 0xFFFFFFFF;
put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
cmd->stat_sn = conn->stat_sn++;
@@ -2424,9 +2562,8 @@ static int iscsit_send_conn_drop_async_message(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2443,17 +2580,70 @@ static int iscsit_send_conn_drop_async_message(
return 0;
}
-static int iscsit_send_data_in(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn,
- int *eodr)
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
{
- int iov_ret = 0, set_statsn = 0;
- u32 iov_count = 0, tx_size = 0;
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->tx_half_close_comp,
+ ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+static void
+iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
+ bool set_statsn)
+{
+ hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
+ hdr->flags = datain->flags;
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+ }
+ }
+ hton24(hdr->dlength, datain->length);
+ if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ else
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+ hdr->itt = cmd->init_task_tag;
+
+ if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ else
+ hdr->ttt = cpu_to_be32(0xFFFFFFFF);
+ if (set_statsn)
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ else
+ hdr->statsn = cpu_to_be32(0xFFFFFFFF);
+
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->datasn = cpu_to_be32(datain->data_sn);
+ hdr->offset = cpu_to_be32(datain->offset);
+
+ pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+ ntohl(hdr->offset), datain->length, conn->cid);
+}
+
+static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
struct iscsi_datain datain;
struct iscsi_datain_req *dr;
- struct iscsi_data_rsp *hdr;
struct kvec *iov;
+ u32 iov_count = 0, tx_size = 0;
+ int eodr = 0, ret, iov_ret;
+ bool set_statsn = false;
memset(&datain, 0, sizeof(struct iscsi_datain));
dr = iscsit_get_datain_values(cmd, &datain);
@@ -2462,26 +2652,18 @@ static int iscsit_send_data_in(
cmd->init_task_tag);
return -1;
}
-
/*
* Be paranoid and double check the logic for now.
*/
- if ((datain.offset + datain.length) > cmd->data_length) {
+ if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x, datain.offset: %u and"
" datain.length: %u exceeds cmd->data_length: %u\n",
cmd->init_task_tag, datain.offset, datain.length,
- cmd->data_length);
+ cmd->se_cmd.data_length);
return -1;
}
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->tx_data_octets += datain.length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(datain.length, &conn->sess->tx_data_octets);
/*
* Special case for successfully execution w/ both DATAIN
* and Sense Data.
@@ -2494,42 +2676,13 @@ static int iscsit_send_data_in(
(dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
iscsit_increment_maxcmdsn(cmd, conn->sess);
cmd->stat_sn = conn->stat_sn++;
- set_statsn = 1;
+ set_statsn = true;
} else if (dr->dr_complete ==
- DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
- set_statsn = 1;
- }
-
- hdr = (struct iscsi_data_rsp *) cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
- hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
- hdr->flags = datain.flags;
- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
- hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
- } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
- hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
- }
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+ set_statsn = true;
}
- hton24(hdr->dlength, datain.length);
- if (hdr->flags & ISCSI_FLAG_DATA_ACK)
- int_to_scsilun(cmd->se_cmd.orig_fe_lun,
- (struct scsi_lun *)&hdr->lun);
- else
- put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
- hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
- cpu_to_be32(cmd->targ_xfer_tag) :
- 0xFFFFFFFF;
- hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
- 0xFFFFFFFF;
- hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
- hdr->datasn = cpu_to_be32(datain.data_sn);
- hdr->offset = cpu_to_be32(datain.offset);
+ iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
iov = &cmd->iov_data[0];
iov[iov_count].iov_base = cmd->pdu;
@@ -2539,9 +2692,8 @@ static int iscsit_send_data_in(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -2550,7 +2702,8 @@ static int iscsit_send_data_in(
" for DataIN PDU 0x%08x\n", *header_digest);
}
- iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+ datain.offset, datain.length);
if (iov_ret < 0)
return -1;
@@ -2581,30 +2734,35 @@ static int iscsit_send_data_in(
cmd->iov_data_count = iov_count;
cmd->tx_size = tx_size;
- pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
- " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
- cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
- ntohl(hdr->offset), datain.length, conn->cid);
+ /* sendpage is preferred but can't insert markers */
+ if (!conn->conn_ops->IFMarker)
+ ret = iscsit_fe_sendpage_sg(cmd, conn);
+ else
+ ret = iscsit_send_tx_data(cmd, conn, 0);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
if (dr->dr_complete) {
- *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+ eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2 : 1;
iscsit_free_datain_req(cmd, dr);
}
- return 0;
+ return eodr;
}
-static int iscsit_send_logout_response(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+int
+iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_logout_rsp *hdr)
{
- int niov = 0, tx_size;
struct iscsi_conn *logout_conn = NULL;
struct iscsi_conn_recovery *cr = NULL;
struct iscsi_session *sess = conn->sess;
- struct kvec *iov;
- struct iscsi_logout_rsp *hdr;
/*
* The actual shutting down of Sessions and/or Connections
* for CLOSESESSION and CLOSECONNECTION Logout Requests
@@ -2645,7 +2803,7 @@ static int iscsit_send_logout_response(
*/
logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
cmd->logout_cid);
- if ((logout_conn)) {
+ if (logout_conn) {
iscsit_connection_reinstatement_rcfr(logout_conn);
iscsit_dec_conn_usage_count(logout_conn);
}
@@ -2673,13 +2831,10 @@ static int iscsit_send_logout_response(
return -1;
}
- tx_size = ISCSI_HDR_LEN;
- hdr = (struct iscsi_logout_rsp *)cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hdr->response = cmd->logout_response;
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->itt = cmd->init_task_tag;
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
@@ -2687,6 +2842,27 @@ static int iscsit_send_logout_response(
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
+ " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response,
+ cmd->logout_cid, conn->cid);
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_build_logout_rsp);
+
+static int
+iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct kvec *iov;
+ int niov = 0, tx_size, rc;
+
+ rc = iscsit_build_logout_rsp(cmd, conn,
+ (struct iscsi_logout_rsp *)&cmd->pdu[0]);
+ if (rc < 0)
+ return rc;
+
+ tx_size = ISCSI_HDR_LEN;
iov = &cmd->iov_misc[0];
iov[niov].iov_base = cmd->pdu;
iov[niov++].iov_len = ISCSI_HDR_LEN;
@@ -2694,9 +2870,8 @@ static int iscsit_send_logout_response(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -2706,14 +2881,37 @@ static int iscsit_send_logout_response(
cmd->iov_misc_count = niov;
cmd->tx_size = tx_size;
- pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
- " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
- cmd->init_task_tag, cmd->stat_sn, hdr->response,
- cmd->logout_cid, conn->cid);
-
return 0;
}
+void
+iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_nopin *hdr, bool nopout_response)
+{
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, cmd->buf_ptr_size);
+ if (nopout_response)
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+ hdr->itt = cmd->init_task_tag;
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
+ conn->stat_sn;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ if (nopout_response)
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
+ " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
+ "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+}
+EXPORT_SYMBOL(iscsit_build_nopin_rsp);
+
/*
* Unsolicited NOPIN, either requesting a response or not.
*/
@@ -2722,26 +2920,16 @@ static int iscsit_send_unsolicited_nopin(
struct iscsi_conn *conn,
int want_response)
{
- int tx_size = ISCSI_HDR_LEN;
- struct iscsi_nopin *hdr;
+ struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
+ int tx_size = ISCSI_HDR_LEN, ret;
- hdr = (struct iscsi_nopin *) cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
- hdr->opcode = ISCSI_OP_NOOP_IN;
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
- hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
- cmd->stat_sn = conn->stat_sn;
- hdr->statsn = cpu_to_be32(cmd->stat_sn);
- hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ iscsit_build_nopin_rsp(cmd, conn, hdr, false);
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2756,34 +2944,31 @@ static int iscsit_send_unsolicited_nopin(
pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+ ret = iscsit_send_tx_data(cmd, conn, 1);
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->i_state = want_response ?
+ ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+
return 0;
}
-static int iscsit_send_nopin_response(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+static int
+iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- int niov = 0, tx_size;
- u32 padding = 0;
+ struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
struct kvec *iov;
- struct iscsi_nopin *hdr;
-
- tx_size = ISCSI_HDR_LEN;
- hdr = (struct iscsi_nopin *) cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
- hdr->opcode = ISCSI_OP_NOOP_IN;
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- hton24(hdr->dlength, cmd->buf_ptr_size);
- put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
- hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
- cmd->stat_sn = conn->stat_sn++;
- hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ u32 padding = 0;
+ int niov = 0, tx_size;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ iscsit_build_nopin_rsp(cmd, conn, hdr, true);
+ tx_size = ISCSI_HDR_LEN;
iov = &cmd->iov_misc[0];
iov[niov].iov_base = cmd->pdu;
iov[niov++].iov_len = ISCSI_HDR_LEN;
@@ -2791,9 +2976,8 @@ static int iscsit_send_nopin_response(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -2839,20 +3023,17 @@ static int iscsit_send_nopin_response(
cmd->iov_misc_count = niov;
cmd->tx_size = tx_size;
- pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
- " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
- cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
-
return 0;
}
-int iscsit_send_r2t(
+static int iscsit_send_r2t(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
int tx_size = 0;
struct iscsi_r2t *r2t;
struct iscsi_r2t_rsp *hdr;
+ int ret;
r2t = iscsit_get_r2t_from_list(cmd);
if (!r2t)
@@ -2864,7 +3045,7 @@ int iscsit_send_r2t(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->itt = cmd->init_task_tag;
spin_lock_bh(&conn->sess->ttt_lock);
r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
if (r2t->targ_xfer_tag == 0xFFFFFFFF)
@@ -2885,9 +3066,8 @@ int iscsit_send_r2t(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -2908,19 +3088,27 @@ int iscsit_send_r2t(
r2t->sent_r2t = 1;
spin_unlock_bh(&cmd->r2t_lock);
+ ret = iscsit_send_tx_data(cmd, conn, 1);
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+
return 0;
}
/*
- * type 0: Normal Operation.
- * type 1: Called from Storage Transport.
- * type 2: Called from iscsi_task_reassign_complete_write() for
- * connection recovery.
+ * @recovery: If called from iscsi_task_reassign_complete_write() for
+ * connection recovery.
*/
int iscsit_build_r2ts_for_cmd(
- struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
- int type)
+ struct iscsi_cmd *cmd,
+ bool recovery)
{
int first_r2t = 1;
u32 offset = 0, xfer_len = 0;
@@ -2931,32 +3119,37 @@ int iscsit_build_r2ts_for_cmd(
return 0;
}
- if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
- if (cmd->r2t_offset < cmd->write_data_done)
- cmd->r2t_offset = cmd->write_data_done;
+ if (conn->sess->sess_ops->DataSequenceInOrder &&
+ !recovery)
+ cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
if (conn->sess->sess_ops->DataSequenceInOrder) {
offset = cmd->r2t_offset;
- if (first_r2t && (type == 2)) {
- xfer_len = ((offset +
- (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len) >
- cmd->data_length) ?
- (cmd->data_length - offset) :
- (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len));
+ if (first_r2t && recovery) {
+ int new_data_end = offset +
+ conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len;
+
+ if (new_data_end > cmd->se_cmd.data_length)
+ xfer_len = cmd->se_cmd.data_length - offset;
+ else
+ xfer_len =
+ conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len;
} else {
- xfer_len = ((offset +
- conn->sess->sess_ops->MaxBurstLength) >
- cmd->data_length) ?
- (cmd->data_length - offset) :
- conn->sess->sess_ops->MaxBurstLength;
+ int new_data_end = offset +
+ conn->sess->sess_ops->MaxBurstLength;
+
+ if (new_data_end > cmd->se_cmd.data_length)
+ xfer_len = cmd->se_cmd.data_length - offset;
+ else
+ xfer_len = conn->sess->sess_ops->MaxBurstLength;
}
cmd->r2t_offset += xfer_len;
- if (cmd->r2t_offset == cmd->data_length)
+ if (cmd->r2t_offset == cmd->se_cmd.data_length)
cmd->cmd_flags |= ICF_SENT_LAST_R2T;
} else {
struct iscsi_seq *seq;
@@ -2989,24 +3182,14 @@ int iscsit_build_r2ts_for_cmd(
return 0;
}
-static int iscsit_send_status(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
{
- u8 iov_count = 0, recovery;
- u32 padding = 0, tx_size = 0;
- struct iscsi_scsi_rsp *hdr;
- struct kvec *iov;
-
- recovery = (cmd->i_state != ISTATE_SEND_STATUS);
- if (!recovery)
+ if (inc_stat_sn)
cmd->stat_sn = conn->stat_sn++;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rsp_pdus++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->rsp_pdus);
- hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -3019,13 +3202,30 @@ static int iscsit_send_status(
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->itt = cmd->init_task_tag;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+ " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
+ cmd->se_cmd.scsi_status, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_rsp_pdu);
+
+static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
+ struct kvec *iov;
+ u32 padding = 0, tx_size = 0;
+ int iov_count = 0;
+ bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+
+ iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
+
iov = &cmd->iov_misc[0];
iov[iov_count].iov_base = cmd->pdu;
iov[iov_count++].iov_len = ISCSI_HDR_LEN;
@@ -3037,15 +3237,18 @@ static int iscsit_send_status(
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+ put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
+ cmd->se_cmd.scsi_sense_length += sizeof (__be16);
+
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
- hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
- iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
+ hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
+ iov[iov_count].iov_base = cmd->sense_buffer;
iov[iov_count++].iov_len =
(cmd->se_cmd.scsi_sense_length + padding);
tx_size += cmd->se_cmd.scsi_sense_length;
if (padding) {
- memset(cmd->se_cmd.sense_buffer +
+ memset(cmd->sense_buffer +
cmd->se_cmd.scsi_sense_length, 0, padding);
tx_size += padding;
pr_debug("Adding %u bytes of padding to"
@@ -3054,7 +3257,7 @@ static int iscsit_send_status(
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- cmd->se_cmd.sense_buffer,
+ cmd->sense_buffer,
(cmd->se_cmd.scsi_sense_length + padding),
0, NULL, (u8 *)&cmd->data_crc);
@@ -3076,9 +3279,8 @@ static int iscsit_send_status(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -3089,11 +3291,6 @@ static int iscsit_send_status(
cmd->iov_misc_count = iov_count;
cmd->tx_size = tx_size;
- pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
- " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
- (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
- cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
-
return 0;
}
@@ -3108,28 +3305,22 @@ static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
return ISCSI_TMF_RSP_NO_LUN;
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
return ISCSI_TMF_RSP_NOT_SUPPORTED;
- case TMR_FUNCTION_AUTHORIZATION_FAILED:
- return ISCSI_TMF_RSP_AUTH_FAILED;
case TMR_FUNCTION_REJECTED:
default:
return ISCSI_TMF_RSP_REJECTED;
}
}
-static int iscsit_send_task_mgt_rsp(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+void
+iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_tm_rsp *hdr)
{
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
- struct iscsi_tm_rsp *hdr;
- u32 tx_size = 0;
- hdr = (struct iscsi_tm_rsp *) cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->itt = cmd->init_task_tag;
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
@@ -3137,6 +3328,20 @@ static int iscsit_send_task_mgt_rsp(
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ pr_debug("Built Task Management Response ITT: 0x%08x,"
+ " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
+
+static int
+iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
+ u32 tx_size = 0;
+
+ iscsit_build_task_mgt_rsp(cmd, conn, hdr);
+
cmd->iov_misc[0].iov_base = cmd->pdu;
cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
tx_size += ISCSI_HDR_LEN;
@@ -3144,9 +3349,8 @@ static int iscsit_send_task_mgt_rsp(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -3157,10 +3361,6 @@ static int iscsit_send_task_mgt_rsp(
cmd->iov_misc_count = 1;
cmd->tx_size = tx_size;
- pr_debug("Built Task Management Response ITT: 0x%08x,"
- " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
- cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
-
return 0;
}
@@ -3181,14 +3381,18 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
struct sockaddr_in * sock_in =
(struct sockaddr_in *)&np->np_sockaddr;
- if (sock_in->sin_addr.s_addr == INADDR_ANY)
+ if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
ret = true;
}
return ret;
}
-static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+#define SENDTARGETS_BUF_LIMIT 32768U
+
+static int
+iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+ enum iscsit_transport_type network_transport)
{
char *payload = NULL;
struct iscsi_conn *conn = cmd->conn;
@@ -3196,12 +3400,12 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np;
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
- unsigned char buf[256];
+ int target_name_printed;
+ unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
+ unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
- buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
- 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
-
- memset(buf, 0, 256);
+ buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+ SENDTARGETS_BUF_LIMIT);
payload = kzalloc(buffer_len, GFP_KERNEL);
if (!payload) {
@@ -3209,23 +3413,48 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
" response.\n");
return -ENOMEM;
}
+ /*
+ * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * explicit case..
+ */
+ if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ text_ptr = strchr(text_in, '=');
+ if (!text_ptr) {
+ pr_err("Unable to locate '=' string in text_in:"
+ " %s\n", text_in);
+ kfree(payload);
+ return -EINVAL;
+ }
+ /*
+ * Skip over '=' character..
+ */
+ text_ptr += 1;
+ }
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
- len += 1;
-
- if ((len + payload_len) > buffer_len) {
- spin_unlock(&tiqn->tiqn_tpg_lock);
- end_of_buf = 1;
- goto eob;
+ if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ strcmp(tiqn->tiqn, text_ptr)) {
+ continue;
}
- memcpy(payload + payload_len, buf, len);
- payload_len += len;
+
+ target_name_printed = 0;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+ /* If demo_mode_discovery=0 and generate_node_acls=0
+ * (demo mode dislabed) do not return
+ * TargetName+TargetAddress unless a NodeACL exists.
+ */
+
+ if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+ (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+ (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
+ cmd->conn->sess->sess_ops->InitiatorName))) {
+ continue;
+ }
+
spin_lock(&tpg->tpg_state_lock);
if ((tpg->tpg_state == TPG_STATE_FREE) ||
(tpg->tpg_state == TPG_STATE_INACTIVE)) {
@@ -3240,14 +3469,29 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
+ if (np->np_network_transport != network_transport)
+ continue;
+
+ if (!target_name_printed) {
+ len = sprintf(buf, "TargetName=%s",
+ tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy(payload + payload_len, buf, len);
+ payload_len += len;
+ target_name_printed = 1;
+ }
+
len = sprintf(buf, "TargetAddress="
- "%s%s%s:%hu,%hu",
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "[" : "", (inaddr_any == false) ?
- np->np_ip : conn->local_ip,
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "]" : "", (inaddr_any == false) ?
- np->np_port : conn->local_port,
+ "%s:%hu,%hu",
+ inaddr_any ? conn->local_ip : np->np_ip,
+ inaddr_any ? conn->local_port : np->np_port,
tpg->tpgt);
len += 1;
@@ -3266,6 +3510,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
eob:
if (end_of_buf)
break;
+
+ if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ break;
}
spin_unlock(&tiqn_lock);
@@ -3274,6 +3521,38 @@ eob:
return payload_len;
}
+int
+iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_text_rsp *hdr,
+ enum iscsit_transport_type network_transport)
+{
+ int text_length, padding;
+
+ text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+ if (text_length < 0)
+ return text_length;
+
+ hdr->opcode = ISCSI_OP_TEXT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ padding = ((-text_length) & 3);
+ hton24(hdr->dlength, text_length);
+ hdr->itt = cmd->init_task_tag;
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
+ " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
+ text_length, conn->cid);
+
+ return text_length + padding;
+}
+EXPORT_SYMBOL(iscsit_build_text_rsp);
+
/*
* FIXME: Add support for F_BIT and C_BIT when the length is longer than
* MaxRecvDataSegmentLength.
@@ -3282,51 +3561,29 @@ static int iscsit_send_text_rsp(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
- struct iscsi_text_rsp *hdr;
+ struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
struct kvec *iov;
- u32 padding = 0, tx_size = 0;
- int text_length, iov_count = 0;
-
- text_length = iscsit_build_sendtargets_response(cmd);
- if (text_length < 0)
- return text_length;
-
- padding = ((-text_length) & 3);
- if (padding != 0) {
- memset(cmd->buf_ptr + text_length, 0, padding);
- pr_debug("Attaching %u additional bytes for"
- " padding.\n", padding);
- }
-
- hdr = (struct iscsi_text_rsp *) cmd->pdu;
- memset(hdr, 0, ISCSI_HDR_LEN);
- hdr->opcode = ISCSI_OP_TEXT_RSP;
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- hton24(hdr->dlength, text_length);
- hdr->itt = cpu_to_be32(cmd->init_task_tag);
- hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
- cmd->stat_sn = conn->stat_sn++;
- hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ u32 tx_size = 0;
+ int text_length, iov_count = 0, rc;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
+ if (rc < 0)
+ return rc;
+ text_length = rc;
iov = &cmd->iov_misc[0];
-
iov[iov_count].iov_base = cmd->pdu;
iov[iov_count++].iov_len = ISCSI_HDR_LEN;
iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = text_length + padding;
+ iov[iov_count++].iov_len = text_length;
- tx_size += (ISCSI_HDR_LEN + text_length + padding);
+ tx_size += (ISCSI_HDR_LEN + text_length);
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -3336,7 +3593,7 @@ static int iscsit_send_text_rsp(
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- cmd->buf_ptr, (text_length + padding),
+ cmd->buf_ptr, text_length,
0, NULL, (u8 *)&cmd->data_crc);
iov[iov_count].iov_base = &cmd->data_crc;
@@ -3344,38 +3601,44 @@ static int iscsit_send_text_rsp(
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching DataDigest for %u bytes of text"
- " data, CRC 0x%08x\n", (text_length + padding),
+ " data, CRC 0x%08x\n", text_length,
cmd->data_crc);
}
cmd->iov_misc_count = iov_count;
cmd->tx_size = tx_size;
- pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
- " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
- text_length, conn->cid);
return 0;
}
-static int iscsit_send_reject(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+void
+iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+ struct iscsi_reject *hdr)
{
- u32 iov_count = 0, tx_size = 0;
- struct iscsi_reject *hdr;
- struct kvec *iov;
-
- hdr = (struct iscsi_reject *) cmd->pdu;
hdr->opcode = ISCSI_OP_REJECT;
+ hdr->reason = cmd->reject_reason;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hton24(hdr->dlength, ISCSI_HDR_LEN);
+ hdr->ffffffff = cpu_to_be32(0xffffffff);
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
- hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
- iov = &cmd->iov_misc[0];
+}
+EXPORT_SYMBOL(iscsit_build_reject);
+
+static int iscsit_send_reject(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
+ struct kvec *iov;
+ u32 iov_count = 0, tx_size;
+
+ iscsit_build_reject(cmd, conn, hdr);
+ iov = &cmd->iov_misc[0];
iov[iov_count].iov_base = cmd->pdu;
iov[iov_count++].iov_len = ISCSI_HDR_LEN;
iov[iov_count].iov_base = cmd->buf_ptr;
@@ -3386,9 +3649,8 @@ static int iscsit_send_reject(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)hdr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)header_digest);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -3397,9 +3659,8 @@ static int iscsit_send_reject(
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&cmd->data_crc);
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+ ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
@@ -3417,18 +3678,6 @@ static int iscsit_send_reject(
return 0;
}
-static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
-{
- if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
- (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
- wait_for_completion_interruptible_timeout(
- &conn->tx_half_close_comp,
- ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
- }
-}
-
-#ifdef CONFIG_SMP
-
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
struct iscsi_thread_set *ts = conn->thread_set;
@@ -3442,10 +3691,6 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
* execute upon.
*/
ord = ts->thread_id % cpumask_weight(cpu_online_mask);
-#if 0
- pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
- " thread_id: %d\n", ord, ts->thread_id);
-#endif
for_each_online_cpu(cpu) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3485,35 +3730,215 @@ static inline void iscsit_thread_check_cpumask(
*/
memset(buf, 0, 128);
cpumask_scnprintf(buf, 128, conn->conn_cpumask);
-#if 0
- pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
- " %s for %s\n", buf, p->comm);
-#endif
set_cpus_allowed_ptr(p, conn->conn_cpumask);
}
-#else
+static int
+iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+ int ret;
-void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ ret = iscsit_send_r2t(cmd, conn);
+ if (ret < 0)
+ goto err;
+ break;
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_cmd(cmd, false);
+ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ iscsit_mod_nopin_response_timer(conn);
+ ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
+ if (ret < 0)
+ goto err;
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
+ if (ret < 0)
+ goto err;
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag, state,
+ conn->cid);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static int
+iscsit_handle_immediate_queue(struct iscsi_conn *conn)
{
- return;
+ struct iscsit_transport *t = conn->conn_transport;
+ struct iscsi_queue_req *qr;
+ struct iscsi_cmd *cmd;
+ u8 state;
+ int ret;
+
+ while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
+ atomic_set(&conn->check_immediate_queue, 0);
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ ret = t->iscsit_immediate_queue(conn, cmd, state);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
-#define iscsit_thread_check_cpumask(X, Y, Z) ({})
-#endif /* CONFIG_SMP */
+static int
+iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+ int ret;
-int iscsi_target_tx_thread(void *arg)
+check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ ret = iscsit_send_datain(cmd, conn);
+ if (ret < 0)
+ goto err;
+ else if (!ret)
+ /* more drs */
+ goto check_rsp_state;
+ else if (ret == 1) {
+ /* all done */
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->i_state = ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ return 1;
+
+ return 0;
+ } else if (ret == 2) {
+ /* Still must send status,
+ SCF_TRANSPORT_TASK_SENSE was set */
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->i_state = ISTATE_SEND_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ state = ISTATE_SEND_STATUS;
+ goto check_rsp_state;
+ }
+
+ break;
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ ret = iscsit_send_response(cmd, conn);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ ret = iscsit_send_logout(cmd, conn);
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ ret = iscsit_send_conn_drop_async_message(
+ cmd, conn);
+ break;
+ case ISTATE_SEND_NOPIN:
+ ret = iscsit_send_nopin(cmd, conn);
+ break;
+ case ISTATE_SEND_REJECT:
+ ret = iscsit_send_reject(cmd, conn);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ ret = iscsit_send_task_mgt_rsp(cmd, conn);
+ if (ret != 0)
+ break;
+ ret = iscsit_tmr_post_handler(cmd, conn);
+ if (ret != 0)
+ iscsit_fall_back_to_erl0(conn->sess);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+ ret = iscsit_send_text_rsp(cmd, conn);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ goto err;
+ }
+ if (ret < 0)
+ goto err;
+
+ if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto err;
+ }
+ iscsit_unmap_iovec(cmd);
+
+ switch (state) {
+ case ISTATE_SEND_LOGOUTRSP:
+ if (!iscsit_logout_post_handler(cmd, conn))
+ goto restart;
+ /* fall through */
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_ASYNCMSG:
+ case ISTATE_SEND_NOPIN:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ case ISTATE_SEND_TEXTRSP:
+ case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_REJECT:
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->i_state = ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ cmd->i_state, conn->cid);
+ goto err;
+ }
+
+ if (atomic_read(&conn->check_immediate_queue))
+ return 1;
+
+ return 0;
+
+err:
+ return -1;
+restart:
+ return -EAGAIN;
+}
+
+static int iscsit_handle_response_queue(struct iscsi_conn *conn)
{
+ struct iscsit_transport *t = conn->conn_transport;
+ struct iscsi_queue_req *qr;
+ struct iscsi_cmd *cmd;
u8 state;
- int eodr = 0;
+ int ret;
+
+ while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ ret = t->iscsit_response_queue(conn, cmd, state);
+ if (ret == 1 || ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iscsi_target_tx_thread(void *arg)
+{
int ret = 0;
- int sent_status = 0;
- int use_misc = 0;
- int map_sg = 0;
- struct iscsi_cmd *cmd = NULL;
struct iscsi_conn *conn;
- struct iscsi_queue_req *qr = NULL;
- struct se_cmd *se_cmd;
struct iscsi_thread_set *ts = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
@@ -3526,7 +3951,7 @@ restart:
if (!conn)
goto out;
- eodr = map_sg = ret = sent_status = use_misc = 0;
+ ret = 0;
while (!kthread_should_stop()) {
/*
@@ -3535,266 +3960,112 @@ restart:
*/
iscsit_thread_check_cpumask(conn, current, 1);
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ wait_event_interruptible(conn->queues_wq,
+ !iscsit_conn_all_queues_empty(conn) ||
+ ts->status == ISCSI_THREAD_SET_RESET);
if ((ts->status == ISCSI_THREAD_SET_RESET) ||
signal_pending(current))
goto transport_err;
get_immediate:
- qr = iscsit_get_cmd_from_immediate_queue(conn);
- if (qr) {
- atomic_set(&conn->check_immediate_queue, 0);
- cmd = qr->cmd;
- state = qr->state;
- kmem_cache_free(lio_qr_cache, qr);
-
- spin_lock_bh(&cmd->istate_lock);
- switch (state) {
- case ISTATE_SEND_R2T:
- spin_unlock_bh(&cmd->istate_lock);
- ret = iscsit_send_r2t(cmd, conn);
- break;
- case ISTATE_REMOVE:
- spin_unlock_bh(&cmd->istate_lock);
-
- if (cmd->data_direction == DMA_TO_DEVICE)
- iscsit_stop_dataout_timer(cmd);
-
- spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_list);
- spin_unlock_bh(&conn->cmd_lock);
-
- iscsit_free_cmd(cmd);
- goto get_immediate;
- case ISTATE_SEND_NOPIN_WANT_RESPONSE:
- spin_unlock_bh(&cmd->istate_lock);
- iscsit_mod_nopin_response_timer(conn);
- ret = iscsit_send_unsolicited_nopin(cmd,
- conn, 1);
- break;
- case ISTATE_SEND_NOPIN_NO_RESPONSE:
- spin_unlock_bh(&cmd->istate_lock);
- ret = iscsit_send_unsolicited_nopin(cmd,
- conn, 0);
- break;
- default:
- pr_err("Unknown Opcode: 0x%02x ITT:"
- " 0x%08x, i_state: %d on CID: %hu\n",
- cmd->iscsi_opcode, cmd->init_task_tag, state,
- conn->cid);
- spin_unlock_bh(&cmd->istate_lock);
- goto transport_err;
- }
- if (ret < 0) {
- conn->tx_immediate_queue = 0;
- goto transport_err;
- }
-
- if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
- conn->tx_immediate_queue = 0;
- iscsit_tx_thread_wait_for_tcp(conn);
- goto transport_err;
- }
+ ret = iscsit_handle_immediate_queue(conn);
+ if (ret < 0)
+ goto transport_err;
- spin_lock_bh(&cmd->istate_lock);
- switch (state) {
- case ISTATE_SEND_R2T:
- spin_unlock_bh(&cmd->istate_lock);
- spin_lock_bh(&cmd->dataout_timeout_lock);
- iscsit_start_dataout_timer(cmd, conn);
- spin_unlock_bh(&cmd->dataout_timeout_lock);
- break;
- case ISTATE_SEND_NOPIN_WANT_RESPONSE:
- cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
- spin_unlock_bh(&cmd->istate_lock);
- break;
- case ISTATE_SEND_NOPIN_NO_RESPONSE:
- cmd->i_state = ISTATE_SENT_STATUS;
- spin_unlock_bh(&cmd->istate_lock);
- break;
- default:
- pr_err("Unknown Opcode: 0x%02x ITT:"
- " 0x%08x, i_state: %d on CID: %hu\n",
- cmd->iscsi_opcode, cmd->init_task_tag,
- state, conn->cid);
- spin_unlock_bh(&cmd->istate_lock);
- goto transport_err;
- }
+ ret = iscsit_handle_response_queue(conn);
+ if (ret == 1)
goto get_immediate;
- } else
- conn->tx_immediate_queue = 0;
-
-get_response:
- qr = iscsit_get_cmd_from_response_queue(conn);
- if (qr) {
- cmd = qr->cmd;
- state = qr->state;
- kmem_cache_free(lio_qr_cache, qr);
+ else if (ret == -EAGAIN)
+ goto restart;
+ else if (ret < 0)
+ goto transport_err;
+ }
- spin_lock_bh(&cmd->istate_lock);
-check_rsp_state:
- switch (state) {
- case ISTATE_SEND_DATAIN:
- spin_unlock_bh(&cmd->istate_lock);
- ret = iscsit_send_data_in(cmd, conn,
- &eodr);
- map_sg = 1;
- break;
- case ISTATE_SEND_STATUS:
- case ISTATE_SEND_STATUS_RECOVERY:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_status(cmd, conn);
- break;
- case ISTATE_SEND_LOGOUTRSP:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_logout_response(cmd, conn);
- break;
- case ISTATE_SEND_ASYNCMSG:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_conn_drop_async_message(
- cmd, conn);
- break;
- case ISTATE_SEND_NOPIN:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_nopin_response(cmd, conn);
- break;
- case ISTATE_SEND_REJECT:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_reject(cmd, conn);
- break;
- case ISTATE_SEND_TASKMGTRSP:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_task_mgt_rsp(cmd, conn);
- if (ret != 0)
- break;
- ret = iscsit_tmr_post_handler(cmd, conn);
- if (ret != 0)
- iscsit_fall_back_to_erl0(conn->sess);
- break;
- case ISTATE_SEND_TEXTRSP:
- spin_unlock_bh(&cmd->istate_lock);
- use_misc = 1;
- ret = iscsit_send_text_rsp(cmd, conn);
- break;
- default:
- pr_err("Unknown Opcode: 0x%02x ITT:"
- " 0x%08x, i_state: %d on CID: %hu\n",
- cmd->iscsi_opcode, cmd->init_task_tag,
- state, conn->cid);
- spin_unlock_bh(&cmd->istate_lock);
- goto transport_err;
- }
- if (ret < 0) {
- conn->tx_response_queue = 0;
- goto transport_err;
- }
+transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
- se_cmd = &cmd->se_cmd;
+static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
+{
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
+ struct iscsi_cmd *cmd;
+ int ret = 0;
- if (map_sg && !conn->conn_ops->IFMarker) {
- if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
- conn->tx_response_queue = 0;
- iscsit_tx_thread_wait_for_tcp(conn);
- iscsit_unmap_iovec(cmd);
- goto transport_err;
- }
- } else {
- if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
- conn->tx_response_queue = 0;
- iscsit_tx_thread_wait_for_tcp(conn);
- iscsit_unmap_iovec(cmd);
- goto transport_err;
- }
- }
- map_sg = 0;
- iscsit_unmap_iovec(cmd);
+ switch (hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_SCSI_CMD:
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
- spin_lock_bh(&cmd->istate_lock);
- switch (state) {
- case ISTATE_SEND_DATAIN:
- if (!eodr)
- goto check_rsp_state;
-
- if (eodr == 1) {
- cmd->i_state = ISTATE_SENT_LAST_DATAIN;
- sent_status = 1;
- eodr = use_misc = 0;
- } else if (eodr == 2) {
- cmd->i_state = state =
- ISTATE_SEND_STATUS;
- sent_status = 0;
- eodr = use_misc = 0;
- goto check_rsp_state;
- }
- break;
- case ISTATE_SEND_STATUS:
- use_misc = 0;
- sent_status = 1;
- break;
- case ISTATE_SEND_ASYNCMSG:
- case ISTATE_SEND_NOPIN:
- case ISTATE_SEND_STATUS_RECOVERY:
- case ISTATE_SEND_TEXTRSP:
- use_misc = 0;
- sent_status = 1;
- break;
- case ISTATE_SEND_REJECT:
- use_misc = 0;
- if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
- cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
- spin_unlock_bh(&cmd->istate_lock);
- complete(&cmd->reject_comp);
- goto transport_err;
- }
- complete(&cmd->reject_comp);
- break;
- case ISTATE_SEND_TASKMGTRSP:
- use_misc = 0;
- sent_status = 1;
- break;
- case ISTATE_SEND_LOGOUTRSP:
- spin_unlock_bh(&cmd->istate_lock);
- if (!iscsit_logout_post_handler(cmd, conn))
- goto restart;
- spin_lock_bh(&cmd->istate_lock);
- use_misc = 0;
- sent_status = 1;
- break;
- default:
- pr_err("Unknown Opcode: 0x%02x ITT:"
- " 0x%08x, i_state: %d on CID: %hu\n",
- cmd->iscsi_opcode, cmd->init_task_tag,
- cmd->i_state, conn->cid);
- spin_unlock_bh(&cmd->istate_lock);
- goto transport_err;
- }
+ ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ ret = iscsit_handle_data_out(conn, buf);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ cmd = NULL;
+ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
+ }
+ ret = iscsit_handle_nop_out(conn, cmd, buf);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
- if (sent_status) {
- cmd->i_state = ISTATE_SENT_STATUS;
- sent_status = 0;
- }
- spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
+ break;
+ case ISCSI_OP_TEXT:
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
- if (atomic_read(&conn->check_immediate_queue))
- goto get_immediate;
+ ret = iscsit_handle_text_cmd(conn, cmd, buf);
+ break;
+ case ISCSI_OP_LOGOUT:
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
- goto get_response;
- } else
- conn->tx_response_queue = 0;
+ ret = iscsit_handle_logout_cmd(conn, cmd, buf);
+ if (ret > 0)
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ break;
+ case ISCSI_OP_SNACK:
+ ret = iscsit_handle_snack(conn, buf);
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Cannot recover from unknown"
+ " opcode while ERL=0, closing iSCSI connection.\n");
+ return -1;
+ }
+ if (!conn->conn_ops->OFMarker) {
+ pr_err("Unable to recover from unknown"
+ " opcode while OFMarker=No, closing iSCSI"
+ " connection.\n");
+ return -1;
+ }
+ if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+ pr_err("Unable to recover from unknown"
+ " opcode, closing iSCSI connection.\n");
+ return -1;
+ }
+ break;
}
-transport_err:
- iscsit_take_action_for_connection_exit(conn);
- goto restart;
-out:
- return 0;
+ return ret;
+reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
int iscsi_target_rx_thread(void *arg)
@@ -3816,6 +4087,18 @@ restart:
if (!conn)
goto out;
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ struct completion comp;
+ int rc;
+
+ init_completion(&comp);
+ rc = wait_for_completion_interruptible(&comp);
+ if (rc < 0)
+ goto transport_err;
+
+ goto out;
+ }
+
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3835,11 +4118,6 @@ restart:
goto transport_err;
}
- /*
- * Set conn->bad_hdr for use with REJECT PDUs.
- */
- memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
-
if (conn->conn_ops->HeaderDigest) {
iov.iov_base = &digest;
iov.iov_len = ISCSI_CRC_LEN;
@@ -3863,9 +4141,7 @@ restart:
* hit default in the switch below.
*/
memset(buffer, 0xff, ISCSI_HDR_LEN);
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->conn_digest_errors++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->conn_digest_errors);
} else {
pr_debug("Got HeaderDigest CRC32C"
" 0x%08x\n", checksum);
@@ -3882,67 +4158,14 @@ restart:
(!(opcode & ISCSI_OP_LOGOUT)))) {
pr_err("Received illegal iSCSI Opcode: 0x%02x"
" while in Discovery Session, rejecting.\n", opcode);
- iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buffer, conn);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buffer);
goto transport_err;
}
- switch (opcode) {
- case ISCSI_OP_SCSI_CMD:
- if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
- goto transport_err;
- break;
- case ISCSI_OP_SCSI_DATA_OUT:
- if (iscsit_handle_data_out(conn, buffer) < 0)
- goto transport_err;
- break;
- case ISCSI_OP_NOOP_OUT:
- if (iscsit_handle_nop_out(conn, buffer) < 0)
- goto transport_err;
- break;
- case ISCSI_OP_SCSI_TMFUNC:
- if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
- goto transport_err;
- break;
- case ISCSI_OP_TEXT:
- if (iscsit_handle_text_cmd(conn, buffer) < 0)
- goto transport_err;
- break;
- case ISCSI_OP_LOGOUT:
- ret = iscsit_handle_logout_cmd(conn, buffer);
- if (ret > 0) {
- wait_for_completion_timeout(&conn->conn_logout_comp,
- SECONDS_FOR_LOGOUT_COMP * HZ);
- goto transport_err;
- } else if (ret < 0)
- goto transport_err;
- break;
- case ISCSI_OP_SNACK:
- if (iscsit_handle_snack(conn, buffer) < 0)
- goto transport_err;
- break;
- default:
- pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
- opcode);
- if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
- pr_err("Cannot recover from unknown"
- " opcode while ERL=0, closing iSCSI connection"
- ".\n");
- goto transport_err;
- }
- if (!conn->conn_ops->OFMarker) {
- pr_err("Unable to recover from unknown"
- " opcode while OFMarker=No, closing iSCSI"
- " connection.\n");
- goto transport_err;
- }
- if (iscsit_recover_from_unknown_opcode(conn) < 0) {
- pr_err("Unable to recover from unknown"
- " opcode, closing iSCSI connection.\n");
- goto transport_err;
- }
- break;
- }
+ ret = iscsi_target_rx_opcode(conn, buffer);
+ if (ret < 0)
+ goto transport_err;
}
transport_err:
@@ -3964,14 +4187,14 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
@@ -3984,7 +4207,7 @@ static void iscsit_stop_timers_for_cmds(
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
}
@@ -4011,7 +4234,9 @@ int iscsit_close_connection(
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- iscsit_free_queue_reqs_for_conn(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
/*
* During Connection recovery drop unacknowledged out of order
@@ -4029,6 +4254,7 @@ int iscsit_close_connection(
iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn);
}
+ iscsit_free_queue_reqs_for_conn(conn);
/*
* Handle decrementing session or connection usage count if
@@ -4106,13 +4332,14 @@ int iscsit_close_connection(
kfree(conn->conn_ops);
conn->conn_ops = NULL;
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
+ if (conn->sock)
sock_release(conn->sock);
- }
+
+ if (conn->conn_transport->iscsit_free_conn)
+ conn->conn_transport->iscsit_free_conn(conn);
+
+ iscsit_put_transport(conn->conn_transport);
+
conn->thread_set = NULL;
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
@@ -4170,7 +4397,7 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
- iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
@@ -4204,7 +4431,7 @@ int iscsit_close_connection(
int iscsit_close_session(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (atomic_read(&sess->nconn)) {
@@ -4291,7 +4518,7 @@ static void iscsit_logout_post_handler_closesession(
iscsit_dec_conn_usage_count(conn);
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4344,7 +4571,7 @@ static void iscsit_logout_post_handler_diffcid(
/*
* Return of 0 causes the TX thread to restart.
*/
-static int iscsit_logout_post_handler(
+int iscsit_logout_post_handler(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
@@ -4402,6 +4629,7 @@ static int iscsit_logout_post_handler(
}
return ret;
}
+EXPORT_SYMBOL(iscsit_logout_post_handler);
void iscsit_fail_session(struct iscsi_session *sess)
{
@@ -4457,7 +4685,7 @@ int iscsit_free_session(struct iscsi_session *sess)
} else
spin_unlock_bh(&sess->conn_lock);
- iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 5db2ddeed5e..e936d56fb52 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -7,19 +7,23 @@ extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
-extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern void iscsit_login_kref_put(struct kref *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
+extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+ struct iscsi_np *, int);
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
char *, int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
- struct iscsi_portal_group *);
+ struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
-extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
+extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
-extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
@@ -35,8 +39,12 @@ extern struct target_fabric_configfs *lio_target_fabric_configfs;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
-extern struct kmem_cache *lio_cmd_cache;
extern struct kmem_cache *lio_qr_cache;
extern struct kmem_cache *lio_r2t_cache;
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+
#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index db0cf7c8add..ab4915c0d93 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -49,32 +47,6 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
}
}
-static void chap_set_random(char *data, int length)
-{
- long r;
- unsigned n;
-
- while (length > 0) {
- get_random_bytes(&r, sizeof(long));
- r = r ^ (r >> 8);
- r = r ^ (r >> 4);
- n = r & 0x7;
-
- get_random_bytes(&r, sizeof(long));
- r = r ^ (r >> 8);
- r = r ^ (r >> 5);
- n = (n << 3) | (r & 0x7);
-
- get_random_bytes(&r, sizeof(long));
- r = r ^ (r >> 8);
- r = r ^ (r >> 5);
- n = (n << 2) | (r & 0x3);
-
- *data++ = n;
- length--;
- }
-}
-
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -86,7 +58,7 @@ static void chap_gen_challenge(
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
- chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
@@ -99,6 +71,40 @@ static void chap_gen_challenge(
challenge_asciihex);
}
+static int chap_check_algorithm(const char *a_str)
+{
+ char *tmp, *orig, *token;
+
+ tmp = kstrdup(a_str, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
+ return CHAP_DIGEST_UNKNOWN;
+ }
+ orig = tmp;
+
+ token = strsep(&tmp, "=");
+ if (!token)
+ goto out;
+
+ if (strcmp(token, "CHAP_A")) {
+ pr_err("Unable to locate CHAP_A key\n");
+ goto out;
+ }
+ while (token) {
+ token = strsep(&tmp, ",");
+ if (!token)
+ goto out;
+
+ if (!strncmp(token, "5", 1)) {
+ pr_debug("Selected MD5 Algorithm\n");
+ kfree(orig);
+ return CHAP_DIGEST_MD5;
+ }
+ }
+out:
+ kfree(orig);
+ return CHAP_DIGEST_UNKNOWN;
+}
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
@@ -107,6 +113,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
+ int ret;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -121,25 +128,28 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- /*
- * We only support MD5 MDA presently.
- */
- if (strncmp(a_str, "CHAP_A=5", 8)) {
- pr_err("CHAP_A is not MD5.\n");
+ ret = chap_check_algorithm(a_str);
+ switch (ret) {
+ case CHAP_DIGEST_MD5:
+ pr_debug("[server] Got CHAP_A=5\n");
+ /*
+ * Send back CHAP_A set to MD5.
+ */
+ *aic_len = sprintf(aic_str, "CHAP_A=5");
+ *aic_len += 1;
+ chap->digest_type = CHAP_DIGEST_MD5;
+ pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ break;
+ case CHAP_DIGEST_UNKNOWN:
+ default:
+ pr_err("Unsupported CHAP_A value\n");
return NULL;
}
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+
/*
* Set Identifier.
*/
- chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
@@ -164,8 +174,8 @@ static int chap_server_compute_md5(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- char *endptr;
unsigned long id;
+ unsigned char id_as_uchar;
unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
@@ -173,6 +183,7 @@ static int chap_server_compute_md5(
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
@@ -211,7 +222,9 @@ static int chap_server_compute_md5(
goto out;
}
- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ /* Include the terminating NULL in the compare */
+ compare_len = strlen(auth->userid) + 1;
+ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
@@ -306,9 +319,14 @@ static int chap_server_compute_md5(
}
if (type == HEX)
- id = simple_strtoul(&identifier[2], &endptr, 0);
+ ret = kstrtoul(&identifier[2], 0, &id);
else
- id = simple_strtoul(identifier, &endptr, 0);
+ ret = kstrtoul(identifier, 0, &id);
+
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+ goto out;
+ }
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
@@ -337,6 +355,20 @@ static int chap_server_compute_md5(
pr_err("Unable to convert incoming challenge\n");
goto out;
}
+ if (challenge_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ /*
+ * During mutual authentication, the CHAP_C generated by the
+ * initiator must not match the original CHAP_C generated by
+ * the target.
+ */
+ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ pr_err("initiator CHAP_C matches target CHAP_C, failing"
+ " login attempt\n");
+ goto out;
+ }
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
@@ -355,7 +387,9 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, &id, 1);
+ /* To handle both endiannesses */
+ id_as_uchar = id;
+ sg_init_one(&sg, &id_as_uchar, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index 2f463c09626..d22f7b96a06 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,7 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 6b35b37988e..ae03f3e5de1 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -2,9 +2,7 @@
* This file contains the configfs implementation for iSCSI Target mode
* from the LIO-Target Project.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -20,6 +18,7 @@
****************************************************************************/
#include <linux/configfs.h>
+#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
#include <target/target_core_base.h>
@@ -27,6 +26,7 @@
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
+#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
-struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
- struct config_item *item,
- struct iscsi_tiqn **tiqn_out)
-{
- struct se_portal_group *se_tpg = container_of(to_config_group(item),
- struct se_portal_group, tpg_group);
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- int ret;
-
- if (!tpg) {
- pr_err("Unable to locate struct iscsi_portal_group "
- "pointer\n");
- return NULL;
- }
- ret = iscsit_get_tpg(tpg);
- if (ret < 0)
- return NULL;
-
- *tiqn_out = tpg->tpg_tiqn;
- return tpg;
-}
-
/* Start items for lio_target_portal_cit */
static ssize_t lio_target_np_show_sctp(
@@ -99,11 +77,12 @@ static ssize_t lio_target_np_store_sctp(
struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
struct iscsi_tpg_np, se_tpg_np);
struct iscsi_tpg_np *tpg_np_sctp = NULL;
- char *endptr;
u32 op;
int ret;
- op = simple_strtoul(page, &endptr, 0);
+ ret = kstrtou32(page, 0, &op);
+ if (ret)
+ return ret;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@@ -146,8 +125,88 @@ out:
TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_np_show_iser(
+ struct se_tpg_np *se_tpg_np,
+ char *page)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_iser;
+ ssize_t rb;
+
+ tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+ if (tpg_np_iser)
+ rb = sprintf(page, "1\n");
+ else
+ rb = sprintf(page, "0\n");
+
+ return rb;
+}
+
+static ssize_t lio_target_np_store_iser(
+ struct se_tpg_np *se_tpg_np,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_np *np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_iser = NULL;
+ char *endptr;
+ u32 op;
+ int rc = 0;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ tpg = tpg_np->tpg;
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+
+ if (op) {
+ rc = request_module("ib_isert");
+ if (rc != 0) {
+ pr_warn("Unable to request_module for ib_isert\n");
+ rc = 0;
+ }
+
+ tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+ np->np_ip, tpg_np, ISCSI_INFINIBAND);
+ if (IS_ERR(tpg_np_iser)) {
+ rc = PTR_ERR(tpg_np_iser);
+ goto out;
+ }
+ } else {
+ tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+ if (tpg_np_iser) {
+ rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+ if (rc < 0)
+ goto out;
+ }
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return rc;
+}
+
+TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *lio_target_portal_attrs[] = {
&lio_target_np_sctp.attr,
+ &lio_target_np_iser.attr,
NULL,
};
@@ -157,7 +216,7 @@ static struct configfs_attribute *lio_target_portal_attrs[] = {
#define MAX_PORTAL_LEN 256
-struct se_tpg_np *lio_target_call_addnptotpg(
+static struct se_tpg_np *lio_target_call_addnptotpg(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
@@ -204,9 +263,9 @@ struct se_tpg_np *lio_target_call_addnptotpg(
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
- ret = strict_strtoul(port_str, 0, &port);
+ ret = kstrtoul(port_str, 0, &port);
if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ pr_err("kstrtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in6 = (struct sockaddr_in6 *)&sockaddr;
@@ -229,9 +288,9 @@ struct se_tpg_np *lio_target_call_addnptotpg(
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
- ret = strict_strtoul(port_str, 0, &port);
+ ret = kstrtoul(port_str, 0, &port);
if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ pr_err("kstrtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in = (struct sockaddr_in *)&sockaddr;
@@ -257,7 +316,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
* iSER/SCTP (TODO, software emulation with osc-iwarp)
* iSER/IB (TODO, hardware available)
*
- * can be enabled with atributes under
+ * can be enabled with attributes under
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
@@ -313,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
\
- return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+ return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_store_##name( \
@@ -323,11 +382,12 @@ static ssize_t iscsi_nacl_attrib_store_##name( \
{ \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
- char *endptr; \
u32 val; \
int ret; \
\
- val = simple_strtoul(page, &endptr, 0); \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
ret = iscsit_na_##name(nacl, val); \
if (ret < 0) \
return ret; \
@@ -414,8 +474,9 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- \
- snprintf(auth->name, PAGE_SIZE, "%s", page); \
+ if (count >= sizeof(auth->name)) \
+ return -EINVAL; \
+ snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
else \
@@ -730,11 +791,12 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
- char *endptr;
u32 cmdsn_depth = 0;
int ret;
- cmdsn_depth = simple_strtoul(page, &endptr, 0);
+ ret = kstrtou32(page, 0, &cmdsn_depth);
+ if (ret)
+ return ret;
if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Passed cmdsn_depth: %u exceeds"
" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
@@ -776,9 +838,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_nacl_show_tag(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
+}
+
+static ssize_t lio_target_nacl_store_tag(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ int ret;
+
+ ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr,
+ &lio_target_nacl_tag.attr,
NULL,
};
@@ -812,10 +898,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
- acl = container_of(se_nacl_new, struct iscsi_node_acl,
- se_node_acl);
-
- cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NdoeACL from demo mode -> explict
@@ -825,9 +908,10 @@ static struct se_node_acl *lio_target_make_nodeacl(
if (IS_ERR(se_nacl))
return se_nacl;
- stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+ acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+ stats_cg = &se_nacl->acl_fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
@@ -837,9 +921,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
stats_cg->default_groups[1] = NULL;
- config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
return se_nacl;
@@ -884,7 +968,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
@@ -896,14 +980,15 @@ static ssize_t iscsi_tpg_attrib_store_##name( \
{ \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
- char *endptr; \
u32 val; \
int ret; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- val = simple_strtoul(page, &endptr, 0); \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ goto out; \
ret = iscsit_ta_##name(tpg, val); \
if (ret < 0) \
goto out; \
@@ -957,6 +1042,21 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(prod_mode_write_protect);
TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_discovery,
+ */
+DEF_TPG_ATTRIB(demo_mode_discovery);
+TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_erl
+ */
+DEF_TPG_ATTRIB(default_erl);
+TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_t10_pi
+ */
+DEF_TPG_ATTRIB(t10_pi);
+TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -967,11 +1067,139 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_cache_dynamic_acls.attr,
&iscsi_tpg_attrib_demo_mode_write_protect.attr,
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ &iscsi_tpg_attrib_demo_mode_discovery.attr,
+ &iscsi_tpg_attrib_default_erl.attr,
+ &iscsi_tpg_attrib_t10_pi.attr,
NULL,
};
/* End items for lio_target_tpg_attrib_cit */
+/* Start items for lio_target_tpg_auth_cit */
+
+#define __DEF_TPG_AUTH_STR(prefix, name, flags) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
+} \
+ \
+static ssize_t __iscsi_##prefix##_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ snprintf(auth->name, sizeof(auth->name), "%s", page); \
+ if (!(strncmp("NULL", auth->name, 4))) \
+ auth->naf_flags &= ~flags; \
+ else \
+ auth->naf_flags |= flags; \
+ \
+ if ((auth->naf_flags & NAF_USERID_IN_SET) && \
+ (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
+ auth->authenticate_target = 1; \
+ else \
+ auth->authenticate_target = 0; \
+ \
+ return count; \
+}
+
+#define __DEF_TPG_AUTH_INT(prefix, name) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
+}
+
+#define DEF_TPG_AUTH_STR(name, flags) \
+ __DEF_TPG_AUTH_STR(tpg_auth, name, flags) \
+static ssize_t iscsi_tpg_auth_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ return __iscsi_tpg_auth_show_##name(se_tpg, page); \
+} \
+ \
+static ssize_t iscsi_tpg_auth_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_tpg_auth_store_##name(se_tpg, page, count); \
+}
+
+#define DEF_TPG_AUTH_INT(name) \
+ __DEF_TPG_AUTH_INT(tpg_auth, name) \
+static ssize_t iscsi_tpg_auth_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ return __iscsi_tpg_auth_show_##name(se_tpg, page); \
+}
+
+#define TPG_AUTH_ATTR(_name, _mode) TF_TPG_AUTH_ATTR(iscsi, _name, _mode);
+#define TPG_AUTH_ATTR_RO(_name) TF_TPG_AUTH_ATTR_RO(iscsi, _name);
+
+/*
+ * * One-way authentication userid
+ * */
+DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
+TPG_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * * One-way authentication password
+ * */
+DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
+TPG_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * * Enforce mutual authentication
+ * */
+DEF_TPG_AUTH_INT(authenticate_target);
+TPG_AUTH_ATTR_RO(authenticate_target);
+/*
+ * * Mutual authentication userid
+ * */
+DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+TPG_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * * Mutual authentication password
+ * */
+DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+TPG_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
+ &iscsi_tpg_auth_userid.attr,
+ &iscsi_tpg_auth_password.attr,
+ &iscsi_tpg_auth_authenticate_target.attr,
+ &iscsi_tpg_auth_userid_mutual.attr,
+ &iscsi_tpg_auth_password_mutual.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_auth_cit */
+
/* Start items for lio_target_tpg_param_cit */
#define DEF_TPG_PARAM(name) \
@@ -1006,13 +1234,14 @@ static ssize_t iscsi_tpg_param_store_##name( \
struct iscsi_portal_group *tpg = container_of(se_tpg, \
struct iscsi_portal_group, tpg_se_tpg); \
char *buf; \
- int ret; \
+ int ret, len; \
\
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
if (!buf) \
return -ENOMEM; \
- snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
- buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
+ len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
+ if (isspace(buf[len-1])) \
+ buf[len-1] = '\0'; /* Kill newline */ \
\
if (iscsit_get_tpg(tpg) < 0) { \
kfree(buf); \
@@ -1058,6 +1287,9 @@ TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
DEF_TPG_PARAM(MaxRecvDataSegmentLength);
TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
+DEF_TPG_PARAM(MaxXmitDataSegmentLength);
+TPG_PARAM_ATTR(MaxXmitDataSegmentLength, S_IRUGO | S_IWUSR);
+
DEF_TPG_PARAM(MaxBurstLength);
TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
@@ -1103,6 +1335,7 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
&iscsi_tpg_param_InitialR2T.attr,
&iscsi_tpg_param_ImmediateData.attr,
&iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
+ &iscsi_tpg_param_MaxXmitDataSegmentLength.attr,
&iscsi_tpg_param_MaxBurstLength.attr,
&iscsi_tpg_param_FirstBurstLength.attr,
&iscsi_tpg_param_DefaultTime2Wait.attr,
@@ -1145,11 +1378,12 @@ static ssize_t lio_target_tpg_store_enable(
{
struct iscsi_portal_group *tpg = container_of(se_tpg,
struct iscsi_portal_group, tpg_se_tpg);
- char *endptr;
u32 op;
- int ret = 0;
+ int ret;
- op = simple_strtoul(page, &endptr, 0);
+ ret = kstrtou32(page, 0, &op);
+ if (ret)
+ return ret;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@@ -1190,22 +1424,22 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-struct se_portal_group *lio_target_tiqn_addtpg(
+static struct se_portal_group *lio_target_tiqn_addtpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
- char *tpgt_str, *end_ptr;
- int ret = 0;
- unsigned short int tpgt;
+ char *tpgt_str;
+ int ret;
+ u16 tpgt;
tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
/*
* Only tpgt_# directory groups can be created below
* target/iscsi/iqn.superturodiskarry/
- */
+ */
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
@@ -1213,7 +1447,9 @@ struct se_portal_group *lio_target_tiqn_addtpg(
return NULL;
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
- tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+ ret = kstrtou16(tpgt_str, 0, &tpgt);
+ if (ret)
+ return NULL;
tpg = iscsit_alloc_portal_group(tiqn, tpgt);
if (!tpg)
@@ -1240,7 +1476,7 @@ out:
return NULL;
}
-void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
@@ -1262,7 +1498,7 @@ static ssize_t lio_target_wwn_show_attr_lio_version(
struct target_fabric_configfs *tf,
char *page)
{
- return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+ return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
}
TF_WWN_ATTR_RO(lio_target, lio_version);
@@ -1272,7 +1508,7 @@ static struct configfs_attribute *lio_target_wwn_attrs[] = {
NULL,
};
-struct se_wwn *lio_target_call_coreaddtiqn(
+static struct se_wwn *lio_target_call_coreaddtiqn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
@@ -1288,7 +1524,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
*/
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
@@ -1297,21 +1533,21 @@ struct se_wwn *lio_target_call_coreaddtiqn(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
- stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
- stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
- stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
- stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
+ stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
stats_cg->default_groups[5] = NULL;
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
@@ -1320,7 +1556,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
return &tiqn->tiqn_wwn;
}
-void lio_target_call_coredeltiqn(
+static void lio_target_call_coredeltiqn(
struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
@@ -1421,10 +1657,12 @@ static ssize_t iscsi_disc_store_enforce_discovery_auth(
{
struct iscsi_param *param;
struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
- char *endptr;
u32 op;
+ int err;
- op = simple_strtoul(page, &endptr, 0);
+ err = kstrtou32(page, 0, &op);
+ if (err)
+ return -EINVAL;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for enforce_discovery_auth:"
" %u\n", op);
@@ -1495,7 +1733,8 @@ static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
- return cmd->init_task_tag;
+ /* only used for printks or comparism with ->ref_task_tag */
+ return (__force u32)cmd->init_task_tag;
}
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
@@ -1505,28 +1744,6 @@ static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
return cmd->i_state;
}
-static int iscsi_is_state_remove(struct se_cmd *se_cmd)
-{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-
- return (cmd->i_state == ISTATE_REMOVE);
-}
-
-static int lio_sess_logged_in(struct se_session *se_sess)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
- int ret;
- /*
- * Called with spin_lock_bh(&tpg_lock); and
- * spin_lock(&se_tpg->session_lock); held.
- */
- spin_lock(&sess->conn_lock);
- ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
- spin_unlock(&sess->conn_lock);
-
- return ret;
-}
-
static u32 lio_sess_get_index(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
@@ -1553,16 +1770,18 @@ static int lio_queue_data_in(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_DATAIN;
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
+
return 0;
}
static int lio_write_pending(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn *conn = cmd->conn;
if (!cmd->immediate_data && !cmd->unsolicited_data)
- return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+ return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
return 0;
}
@@ -1584,40 +1803,29 @@ static int lio_queue_status(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_STATUS;
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+
+ if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+ }
+ cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
+
return 0;
}
-static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
{
- unsigned char *buffer = se_cmd->sense_buffer;
- /*
- * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
- * 16-bit SenseLength.
- */
- buffer[0] = ((sense_length >> 8) & 0xff);
- buffer[1] = (sense_length & 0xff);
- /*
- * Return two byte offset into allocated sense_buffer.
- */
- return 2;
-}
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-static u16 lio_get_fabric_sense_len(void)
-{
- /*
- * Return two byte offset into allocated sense_buffer.
- */
- return 2;
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
-static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+static void lio_aborted_task(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
- cmd->i_state = ISTATE_SEND_TASKMGTRSP;
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
- return 0;
+ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
@@ -1638,21 +1846,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ return tpg->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+ return tpg->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return tpg->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
@@ -1660,7 +1868,7 @@ static int lio_tpg_check_demo_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return tpg->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
@@ -1668,7 +1876,7 @@ static int lio_tpg_check_prod_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return tpg->tpg_attrib.prod_mode_write_protect;
}
static void lio_tpg_release_fabric_acl(
@@ -1700,8 +1908,8 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
- iscsit_inc_session_usage_count(sess);
iscsit_stop_time2retain_timer(sess);
+ iscsit_stop_session(sess, 1, 1);
return 1;
}
@@ -1717,28 +1925,9 @@ static void lio_tpg_close_session(struct se_session *se_sess)
* If the iSCSI Session for the iSCSI Initiator Node exists,
* forcefully shutdown the iSCSI NEXUS.
*/
- iscsit_stop_session(sess, 1, 1);
- iscsit_dec_session_usage_count(sess);
iscsit_close_session(sess);
}
-static void lio_tpg_stop_session(
- struct se_session *se_sess,
- int sess_sleep,
- int conn_sleep)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
-
- iscsit_stop_session(sess, sess_sleep, conn_sleep);
-}
-
-static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
-
- iscsit_fall_back_to_erl0(sess);
-}
-
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1750,15 +1939,24 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
se_node_acl);
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
- ISCSI_NODE_ATTRIB(acl)->nacl = acl;
- iscsit_set_default_node_attribues(acl);
+ acl->node_attrib.nacl = acl;
+ iscsit_set_default_node_attribues(acl, tpg);
+}
+
+static int lio_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
static void lio_release_cmd(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
iscsit_release_cmd(cmd);
}
@@ -1799,12 +1997,10 @@ int iscsi_target_register_configfs(void)
fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+ fabric->tf_ops.check_stop_free = &lio_check_stop_free,
fabric->tf_ops.release_cmd = &lio_release_cmd;
fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
fabric->tf_ops.close_session = &lio_tpg_close_session;
- fabric->tf_ops.stop_session = &lio_tpg_stop_session;
- fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
- fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
fabric->tf_ops.sess_get_index = &lio_sess_get_index;
fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
fabric->tf_ops.write_pending = &lio_write_pending;
@@ -1816,9 +2012,7 @@ int iscsi_target_register_configfs(void)
fabric->tf_ops.queue_data_in = &lio_queue_data_in;
fabric->tf_ops.queue_status = &lio_queue_status;
fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
- fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
- fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
- fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
+ fabric->tf_ops.aborted_task = &lio_aborted_task;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
@@ -1836,16 +2030,17 @@ int iscsi_target_register_configfs(void)
* Setup default attribute lists for various fabric->tf_cit_tmpl
* sturct config_item_type's
*/
- TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+ fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 0ec3b77a0c2..302eb3b7871 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -9,7 +9,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#define ISCSIT_VERSION "v4.1.0-rc1"
+#define ISCSIT_VERSION "v4.1.0"
#define ISCSI_MAX_DATASN_MISSING_COUNT 16
#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
@@ -17,6 +17,9 @@
#define SECONDS_FOR_ASYNC_TEXT 10
#define SECONDS_FOR_LOGOUT_COMP 15
#define WHITE_SPACE " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS 16
+#define ISCSIT_EXTRA_TAGS 8
+#define ISCSIT_TCP_BACKLOG 256
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -25,18 +28,15 @@
#define NA_DATAOUT_TIMEOUT_RETRIES 5
#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
-#define NA_NOPIN_TIMEOUT 5
+#define NA_NOPIN_TIMEOUT 15
#define NA_NOPIN_TIMEOUT_MAX 60
#define NA_NOPIN_TIMEOUT_MIN 3
-#define NA_NOPIN_RESPONSE_TIMEOUT 5
+#define NA_NOPIN_RESPONSE_TIMEOUT 30
#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
#define NA_RANDOM_R2T_OFFSETS 0
-#define NA_DEFAULT_ERL 0
-#define NA_DEFAULT_ERL_MAX 2
-#define NA_DEFAULT_ERL_MIN 0
/* struct iscsi_tpg_attrib sanity values */
#define TA_AUTHENTICATION 1
@@ -47,7 +47,7 @@
#define TA_NETIF_TIMEOUT_MAX 15
#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
-#define TA_DEFAULT_CMDSN_DEPTH 16
+#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
#define TA_CACHE_DYNAMIC_ACLS 0
@@ -55,12 +55,15 @@
#define TA_DEMO_MODE_WRITE_PROTECT 1
/* Disabled by default in production mode w/ explict ACLs */
#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_DEMO_MODE_DISCOVERY 1
+#define TA_DEFAULT_ERL 0
#define TA_CACHE_CORE_NPS 0
-
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI 0
#define ISCSI_IOV_DATA_BUFFER 5
-enum tpg_np_network_transport_table {
+enum iscsit_transport_type {
ISCSI_TCP = 0,
ISCSI_SCTP_TCP = 1,
ISCSI_SCTP_UDP = 2,
@@ -132,7 +135,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- ICF_REJECT_FAIL_CONN = 0x00000100,
+ IFC_SENDTARGETS_ALL = 0x00000100,
+ IFC_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
@@ -188,6 +192,7 @@ enum recover_cmdsn_ret_table {
CMDSN_NORMAL_OPERATION = 0,
CMDSN_LOWER_THAN_EXP = 1,
CMDSN_HIGHER_THAN_EXP = 2,
+ CMDSN_MAXCMDSN_OVERRUN = 3,
};
/* Used for iscsi_handle_immediate_data() return values */
@@ -224,7 +229,6 @@ enum iscsi_timer_flags_table {
/* Used for struct iscsi_np->np_flags */
enum np_flags_table {
NPF_IP_NETWORK = 0x00,
- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
};
/* Used for struct iscsi_np->np_thread_state */
@@ -240,10 +244,16 @@ struct iscsi_conn_ops {
u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
u8 DataDigest; /* [0,1] == [None,CRC32C] */
u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
u8 OFMarker; /* [0,1] == [No,Yes] */
u8 IFMarker; /* [0,1] == [No,Yes] */
u32 OFMarkInt; /* [1..65535] */
u32 IFMarkInt; /* [1..65535] */
+ /*
+ * iSER specific connection parameters
+ */
+ u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
};
struct iscsi_sess_ops {
@@ -265,6 +275,10 @@ struct iscsi_sess_ops {
u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
u8 ErrorRecoveryLevel; /* [0..2] */
u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+ /*
+ * iSER specific session parameters
+ */
+ u8 RDMAExtensions; /* [0,1] == [No,Yes] */
};
struct iscsi_queue_req {
@@ -284,6 +298,7 @@ struct iscsi_data_count {
};
struct iscsi_param_list {
+ bool iser;
struct list_head param_list;
struct list_head extra_response_list;
};
@@ -296,12 +311,11 @@ struct iscsi_datain_req {
u32 runlength;
u32 data_length;
u32 data_offset;
- u32 data_offset_end;
u32 data_sn;
u32 next_burst_len;
u32 read_data_done;
u32 seq_send_order;
- struct list_head dr_list;
+ struct list_head cmd_datain_node;
} ____cacheline_aligned;
struct iscsi_ooo_cmdsn {
@@ -357,12 +371,14 @@ struct iscsi_cmd {
u8 maxcmdsn_inc;
/* Immediate Unsolicited Dataout */
u8 unsolicited_data;
+ /* Reject reason code */
+ u8 reject_reason;
/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
u16 logout_cid;
/* Command flags */
enum cmd_flags_table cmd_flags;
/* Initiator Task Tag assigned from Initiator */
- u32 init_task_tag;
+ itt_t init_task_tag;
/* Target Transfer Tag assigned from Target */
u32 targ_xfer_tag;
/* CmdSN assigned from Initiator */
@@ -381,8 +397,6 @@ struct iscsi_cmd {
u32 buf_ptr_size;
/* Used to store DataDigest */
u32 data_crc;
- /* Total size in bytes associated with command */
- u32 data_length;
/* Counter for MaxOutstandingR2T */
u32 outstanding_r2ts;
/* Next R2T Offset when DataSequenceInOrder=Yes */
@@ -420,6 +434,8 @@ struct iscsi_cmd {
u32 tx_size;
/* Buffer used for various purposes */
void *buf_ptr;
+ /* Used by SendTargets=[iqn.,eui.] discovery */
+ void *text_in_ptr;
/* See include/linux/dma-mapping.h */
enum dma_data_direction data_direction;
/* iSCSI PDU Header + CRC */
@@ -439,7 +455,6 @@ struct iscsi_cmd {
struct list_head datain_list;
/* R2T List */
struct list_head cmd_r2t_list;
- struct completion reject_comp;
/* Timer for DataOUT */
struct timer_list dataout_timer;
/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
@@ -464,34 +479,32 @@ struct iscsi_cmd {
/* Session the command is part of, used for connection recovery */
struct iscsi_session *sess;
/* list_head for connection list */
- struct list_head i_list;
+ struct list_head i_conn_node;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd se_cmd;
/* Sense buffer that will be mapped into outgoing status */
#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
- struct scatterlist *t_mem_sg;
- u32 t_mem_sg_nents;
-
u32 padding;
u8 pad_bytes[4];
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
-
+ sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {
bool task_reassign:1;
- u32 ref_cmd_sn;
u32 exp_data_sn;
+ struct iscsi_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
struct iscsi_conn {
+ wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
/* State connection is currently in */
@@ -500,8 +513,6 @@ struct iscsi_conn {
u8 network_transport;
enum iscsi_timer_flags_table nopin_timer_flags;
enum iscsi_timer_flags_table nopin_response_timer_flags;
- u8 tx_immediate_queue;
- u8 tx_response_queue;
/* Used to know what thread encountered a transport failure */
u8 which_thread;
/* connection id assigned by the Initiator */
@@ -510,11 +521,11 @@ struct iscsi_conn {
u16 login_port;
u16 local_port;
int net_size;
+ int login_family;
u32 auth_id;
-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
u32 conn_flags;
/* Used for iscsi_tx_login_rsp() */
- u32 login_itt;
+ itt_t login_itt;
u32 exp_statsn;
/* Per connection status sequence number */
u32 stat_sn;
@@ -524,8 +535,6 @@ struct iscsi_conn {
u32 of_marker;
/* Used for calculating OFMarker offset to next PDU */
u32 of_marker_offset;
- /* Complete Bad PDU for sending reject */
- unsigned char bad_hdr[ISCSI_HDR_LEN];
#define IPV6_ADDRESS_SPACE 48
unsigned char login_ip[IPV6_ADDRESS_SPACE];
unsigned char local_ip[IPV6_ADDRESS_SPACE];
@@ -548,9 +557,19 @@ struct iscsi_conn {
struct completion rx_half_close_comp;
/* socket used by this connection */
struct socket *sock;
+ void (*orig_data_ready)(struct sock *);
+ void (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE 1
+#define LOGIN_FLAGS_CLOSED 2
+#define LOGIN_FLAGS_READY 4
+ unsigned long login_flags;
+ struct delayed_work login_work;
+ struct delayed_work login_cleanup_work;
+ struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
struct timer_list transport_timer;
+ struct task_struct *login_kworker;
/* Spinlock used for add/deleting cmd's from conn_cmd_list */
spinlock_t cmd_lock;
spinlock_t conn_usage_lock;
@@ -570,11 +589,15 @@ struct iscsi_conn {
struct list_head immed_queue_list;
struct list_head response_queue_list;
struct iscsi_conn_ops *conn_ops;
+ struct iscsi_login *conn_login;
+ struct iscsit_transport *conn_transport;
struct iscsi_param_list *param_list;
/* Used for per connection auth state machine */
void *auth_protocol;
+ void *context;
struct iscsi_login_thread_s *login_thread;
struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsi_session *sess;
/* Pointer to thread_set in use for this conn's threads */
@@ -587,6 +610,7 @@ struct iscsi_conn_recovery {
u16 cid;
u32 cmd_count;
u32 maxrecvdatasegmentlength;
+ u32 maxxmitdatasegmentlength;
int ready_for_reallegiance;
struct list_head conn_recovery_cmd_list;
spinlock_t conn_recovery_cmd_lock;
@@ -606,7 +630,7 @@ struct iscsi_session {
/* state session is currently in */
u32 session_state;
/* session wide counter: initiator assigned task tag */
- u32 init_task_tag;
+ itt_t init_task_tag;
/* session wide counter: target assigned task tag */
u32 targ_xfer_tag;
u32 cmdsn_window;
@@ -627,14 +651,13 @@ struct iscsi_session {
/* Used for session reference counting */
int session_usage_count;
int session_waiting_on_uc;
- u32 cmd_pdus;
- u32 rsp_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
- u32 conn_digest_errors;
- u32 conn_timeout_errors;
+ atomic_long_t cmd_pdus;
+ atomic_long_t rsp_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+ atomic_long_t conn_digest_errors;
+ atomic_long_t conn_timeout_errors;
u64 creation_time;
- spinlock_t session_stats_lock;
/* Number of active connections */
atomic_t nconn;
atomic_t session_continuation;
@@ -670,17 +693,22 @@ struct iscsi_login {
u8 first_request;
u8 version_min;
u8 version_max;
+ u8 login_complete;
+ u8 login_failed;
+ bool zero_tsih;
char isid[6];
u32 cmd_sn;
- u32 init_task_tag;
+ itt_t init_task_tag;
u32 initial_exp_statsn;
u32 rsp_length;
u16 cid;
u16 tsih;
- char *req;
- char *rsp;
+ char req[ISCSI_HDR_LEN];
+ char rsp[ISCSI_HDR_LEN];
char *req_buf;
char *rsp_buf;
+ struct iscsi_conn *conn;
+ struct iscsi_np *np;
} ____cacheline_aligned;
struct iscsi_node_attrib {
@@ -727,11 +755,6 @@ struct iscsi_node_acl {
struct se_node_acl se_node_acl;
};
-#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
-
-#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
-#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
-
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
@@ -741,6 +764,9 @@ struct iscsi_tpg_attrib {
u32 default_cmdsn_depth;
u32 demo_mode_write_protect;
u32 prod_mode_write_protect;
+ u32 demo_mode_discovery;
+ u32 default_erl;
+ u8 t10_pi;
struct iscsi_portal_group *tpg;
};
@@ -749,6 +775,7 @@ struct iscsi_np {
int np_ip_proto;
int np_sock_type;
enum np_thread_state_table np_thread_state;
+ bool enabled;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
@@ -760,8 +787,10 @@ struct iscsi_np {
struct __kernel_sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
struct timer_list np_login_timer;
- struct iscsi_portal_group *np_login_tpg;
+ void *np_context;
+ struct iscsit_transport *np_transport;
struct list_head np_list;
+ struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned;
struct iscsi_tpg_np {
@@ -773,6 +802,8 @@ struct iscsi_tpg_np {
struct list_head tpg_np_parent_list;
struct se_tpg_np se_tpg_np;
spinlock_t tpg_np_parent_lock;
+ struct completion tpg_np_comp;
+ struct kref tpg_np_kref;
};
struct iscsi_portal_group {
@@ -794,8 +825,9 @@ struct iscsi_portal_group {
spinlock_t tpg_state_lock;
struct se_portal_group tpg_se_tpg;
struct mutex tpg_access_lock;
- struct mutex np_login_lock;
+ struct semaphore np_login_sem;
struct iscsi_tpg_attrib tpg_attrib;
+ struct iscsi_node_auth tpg_demo_auth;
/* Pointer to default list of iSCSI parameters for TPG */
struct iscsi_param_list *param_list;
struct iscsi_tiqn *tpg_tiqn;
@@ -803,12 +835,6 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
-#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
-#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
-#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
-#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
-#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
-
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
@@ -839,8 +865,6 @@ struct iscsi_tiqn {
struct iscsi_logout_stats logout_stats;
} ____cacheline_aligned;
-#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
-
struct iscsit_global {
/* In core shutdown */
u32 in_shutdown;
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 8c049512951..e93d5a7a3f8 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target DataIN value generation functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -37,7 +35,7 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
" struct iscsi_datain_req\n");
return NULL;
}
- INIT_LIST_HEAD(&dr->dr_list);
+ INIT_LIST_HEAD(&dr->cmd_datain_node);
return dr;
}
@@ -45,14 +43,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
- list_add_tail(&dr->dr_list, &cmd->datain_list);
+ list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
spin_unlock(&cmd->datain_lock);
}
void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
- list_del(&dr->dr_list);
+ list_del(&dr->cmd_datain_node);
spin_unlock(&cmd->datain_lock);
kmem_cache_free(lio_dr_cache, dr);
@@ -63,8 +61,8 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
struct iscsi_datain_req *dr, *dr_tmp;
spin_lock(&cmd->datain_lock);
- list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
- list_del(&dr->dr_list);
+ list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
+ list_del(&dr->cmd_datain_node);
kmem_cache_free(lio_dr_cache, dr);
}
spin_unlock(&cmd->datain_lock);
@@ -72,17 +70,14 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
{
- struct iscsi_datain_req *dr;
-
if (list_empty(&cmd->datain_list)) {
pr_err("cmd->datain_list is empty for ITT:"
" 0x%08x\n", cmd->init_task_tag);
return NULL;
}
- list_for_each_entry(dr, &cmd->datain_list, dr_list)
- break;
- return dr;
+ return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
+ cmd_datain_node);
}
/*
@@ -113,7 +108,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
- read_data_left = (cmd->data_length - read_data_done);
+ read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@@ -212,7 +207,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
- read_data_left = (cmd->data_length - read_data_done);
+ read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@@ -231,8 +226,8 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
offset = (seq->offset + seq->next_burst_len);
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
- cmd->data_length) {
- datain->length = (cmd->data_length - offset);
+ cmd->se_cmd.data_length) {
+ datain->length = (cmd->se_cmd.data_length - offset);
datain->offset = offset;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -264,7 +259,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
}
}
- if ((read_data_done + datain->length) == cmd->data_length)
+ if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
datain->flags |= ISCSI_FLAG_DATA_STATUS;
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
@@ -333,7 +328,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
- read_data_left = (cmd->data_length - read_data_done);
+ read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@@ -344,7 +339,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
if (!pdu)
return dr;
- if ((read_data_done + pdu->length) == cmd->data_length) {
+ if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
@@ -433,7 +428,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
- read_data_left = (cmd->data_length - read_data_done);
+ read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@@ -463,7 +458,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
} else
seq->next_burst_len += pdu->length;
- if ((read_data_done + pdu->length) == cmd->data_length)
+ if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
pdu->flags |= ISCSI_FLAG_DATA_STATUS;
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index f63ea35bc4a..7087c736daa 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -2,9 +2,7 @@
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -28,25 +26,6 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
-int iscsit_get_lun_for_tmr(
- struct iscsi_cmd *cmd,
- u64 lun)
-{
- u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
- return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
-}
-
-int iscsit_get_lun_for_cmd(
- struct iscsi_cmd *cmd,
- unsigned char *cdb,
- u64 lun)
-{
- u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
- return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
-}
-
void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
{
struct se_node_acl *se_nacl;
@@ -84,3 +63,4 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
}
+EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index bef1cada15f..a0e2df9e809 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,8 +1,6 @@
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
-extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
-extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 478451167b6..0d1e6ee3e99 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -2,9 +2,7 @@
* This file contains error recovery level zero functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -48,9 +46,9 @@ void iscsit_set_dataout_sequence_values(
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->write_data_done +
- (cmd->data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+ ((cmd->se_cmd.data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
return;
}
@@ -59,15 +57,15 @@ void iscsit_set_dataout_sequence_values(
if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->data_length >
+ cmd->seq_end_offset = (cmd->se_cmd.data_length >
conn->sess->sess_ops->MaxBurstLength) ?
(cmd->write_data_done +
- conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+ conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
} else {
cmd->seq_start_offset = cmd->seq_end_offset;
cmd->seq_end_offset = ((cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength) >=
- cmd->data_length) ? cmd->data_length :
+ cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
(cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength);
}
@@ -95,14 +93,15 @@ static int iscsit_dataout_within_command_recovery_check(
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
- (cmd->write_data_done != hdr->offset))
+ cmd->write_data_done != be32_to_cpu(hdr->offset))
goto dump;
cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
} else {
struct iscsi_seq *seq;
- seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
+ payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
@@ -111,15 +110,15 @@ static int iscsit_dataout_within_command_recovery_check(
cmd->seq_ptr = seq;
if (conn->sess->sess_ops->DataPDUInOrder) {
- if ((seq->status ==
- DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
- ((seq->offset != hdr->offset) ||
- (seq->data_sn != hdr->datasn)))
+ if (seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
+ (seq->offset != be32_to_cpu(hdr->offset) ||
+ seq->data_sn != be32_to_cpu(hdr->datasn)))
goto dump;
} else {
- if ((seq->status ==
- DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
- (seq->data_sn != hdr->datasn))
+ if (seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY &&
+ seq->data_sn != be32_to_cpu(hdr->datasn))
goto dump;
}
@@ -148,12 +147,12 @@ static int iscsit_dataout_check_unsolicited_sequence(
u32 payload_length = ntoh24(hdr->dlength);
- if ((hdr->offset < cmd->seq_start_offset) ||
- ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
+ ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Unsolicited Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
- hdr->offset, payload_length, cmd->seq_start_offset,
+ be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
return DATAOUT_CANNOT_RECOVER;
}
@@ -182,13 +181,13 @@ static int iscsit_dataout_check_unsolicited_sequence(
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
- if ((first_burst_len != cmd->data_length) &&
+ if ((first_burst_len != cmd->se_cmd.data_length) &&
(first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data"
" received %u does not equal FirstBurstLength: %u, and"
" does not equal ExpXferLen %u.\n", first_burst_len,
conn->sess->sess_ops->FirstBurstLength,
- cmd->data_length);
+ cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
@@ -201,10 +200,10 @@ static int iscsit_dataout_check_unsolicited_sequence(
conn->sess->sess_ops->FirstBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
- if (first_burst_len == cmd->data_length) {
+ if (first_burst_len == cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
- " error.\n", cmd->init_task_tag, cmd->data_length);
+ " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
return DATAOUT_CANNOT_RECOVER;
}
}
@@ -236,12 +235,12 @@ static int iscsit_dataout_check_sequence(
* fullfilling an Recovery R2T, it's best to just dump the
* payload here, instead of erroring out.
*/
- if ((hdr->offset < cmd->seq_start_offset) ||
- ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) ||
+ ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
- hdr->offset, payload_length, cmd->seq_start_offset,
+ be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
@@ -251,7 +250,8 @@ static int iscsit_dataout_check_sequence(
next_burst_len = (cmd->next_burst_len + payload_length);
} else {
- seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset),
+ payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
@@ -294,7 +294,7 @@ static int iscsit_dataout_check_sequence(
if ((next_burst_len <
conn->sess->sess_ops->MaxBurstLength) &&
((cmd->write_data_done + payload_length) <
- cmd->data_length)) {
+ cmd->se_cmd.data_length)) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
@@ -319,7 +319,7 @@ static int iscsit_dataout_check_sequence(
return DATAOUT_CANNOT_RECOVER;
}
if ((cmd->write_data_done + payload_length) ==
- cmd->data_length) {
+ cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
@@ -366,16 +366,16 @@ static int iscsit_dataout_check_datasn(
data_sn = seq->data_sn;
}
- if (hdr->datasn > data_sn) {
+ if (be32_to_cpu(hdr->datasn) > data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" higher than expected 0x%08x.\n", cmd->init_task_tag,
- hdr->datasn, data_sn);
+ be32_to_cpu(hdr->datasn), data_sn);
recovery = 1;
goto recover;
- } else if (hdr->datasn < data_sn) {
+ } else if (be32_to_cpu(hdr->datasn) < data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" lower than expected 0x%08x, discarding payload.\n",
- cmd->init_task_tag, hdr->datasn, data_sn);
+ cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
dump = 1;
goto dump;
}
@@ -408,33 +408,34 @@ static int iscsit_dataout_pre_datapduinorder_yes(
/*
* For DataSequenceInOrder=Yes: If the offset is greater than the global
* DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
- * occured and fail the connection.
+ * occurred and fail the connection.
*
* For DataSequenceInOrder=No: If the offset is greater than the per
* sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
- * error has occured and fail the connection.
+ * error has occurred and fail the connection.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
- if (hdr->offset != cmd->write_data_done) {
+ if (be32_to_cpu(hdr->offset) != cmd->write_data_done) {
pr_err("Command ITT: 0x%08x, received offset"
" %u different than expected %u.\n", cmd->init_task_tag,
- hdr->offset, cmd->write_data_done);
+ be32_to_cpu(hdr->offset), cmd->write_data_done);
recovery = 1;
goto recover;
}
} else {
struct iscsi_seq *seq = cmd->seq_ptr;
- if (hdr->offset > seq->offset) {
+ if (be32_to_cpu(hdr->offset) > seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u greater than expected %u.\n", cmd->init_task_tag,
- hdr->offset, seq->offset);
+ be32_to_cpu(hdr->offset), seq->offset);
recovery = 1;
goto recover;
- } else if (hdr->offset < seq->offset) {
+ } else if (be32_to_cpu(hdr->offset) < seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u less than expected %u, discarding payload.\n",
- cmd->init_task_tag, hdr->offset, seq->offset);
+ cmd->init_task_tag, be32_to_cpu(hdr->offset),
+ seq->offset);
dump = 1;
goto dump;
}
@@ -453,7 +454,7 @@ dump:
return DATAOUT_CANNOT_RECOVER;
return (recovery) ? iscsit_recover_dataout_sequence(cmd,
- hdr->offset, payload_length) :
+ be32_to_cpu(hdr->offset), payload_length) :
(dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
}
@@ -465,7 +466,8 @@ static int iscsit_dataout_pre_datapduinorder_no(
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
- pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
+ pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset),
+ payload_length);
if (!pdu)
return DATAOUT_CANNOT_RECOVER;
@@ -479,7 +481,7 @@ static int iscsit_dataout_pre_datapduinorder_no(
case ISCSI_PDU_RECEIVED_OK:
pr_err("Command ITT: 0x%08x received already gotten"
" Offset: %u, Length: %u\n", cmd->init_task_tag,
- hdr->offset, payload_length);
+ be32_to_cpu(hdr->offset), payload_length);
return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
default:
return DATAOUT_CANNOT_RECOVER;
@@ -553,7 +555,7 @@ static int iscsit_dataout_post_crc_passed(
if (cmd->unsolicited_data) {
if ((cmd->first_burst_len + payload_length) ==
conn->sess->sess_ops->FirstBurstLength) {
- if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
@@ -561,7 +563,8 @@ static int iscsit_dataout_post_crc_passed(
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(cmd,
- hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ be32_to_cpu(hdr->datasn),
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
@@ -586,7 +589,8 @@ static int iscsit_dataout_post_crc_passed(
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->next_burst_len + payload_length) ==
conn->sess->sess_ops->MaxBurstLength) {
- if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ if (iscsit_dataout_update_r2t(cmd,
+ be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
@@ -594,7 +598,7 @@ static int iscsit_dataout_post_crc_passed(
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
- cmd, hdr->datasn,
+ cmd, be32_to_cpu(hdr->datasn),
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
@@ -610,7 +614,8 @@ static int iscsit_dataout_post_crc_passed(
if ((seq->next_burst_len + payload_length) ==
seq->xfer_len) {
- if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ if (iscsit_dataout_update_r2t(cmd,
+ be32_to_cpu(hdr->offset),
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
@@ -618,7 +623,7 @@ static int iscsit_dataout_post_crc_passed(
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
- cmd, hdr->datasn,
+ cmd, be32_to_cpu(hdr->datasn),
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
@@ -640,9 +645,12 @@ static int iscsit_dataout_post_crc_passed(
cmd->write_data_done += payload_length;
- return (cmd->write_data_done == cmd->data_length) ?
- DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
- DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+ if (cmd->write_data_done == cmd->se_cmd.data_length)
+ return DATAOUT_SEND_TO_TRANSPORT;
+ else if (send_r2t)
+ return DATAOUT_SEND_R2T;
+ else
+ return DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_failed(
@@ -675,14 +683,15 @@ static int iscsit_dataout_post_crc_failed(
}
recover:
- return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
+ return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset),
+ payload_length);
}
/*
* Called from iscsit_handle_data_out() before DataOUT Payload is received
* and CRC computed.
*/
-extern int iscsit_check_pre_dataout(
+int iscsit_check_pre_dataout(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
@@ -735,13 +744,12 @@ int iscsit_check_post_dataout(
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOUT CRC"
" failure while ERL=0, closing session.\n");
- iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
- 1, 0, buf, cmd);
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ buf);
return DATAOUT_CANNOT_RECOVER;
}
- iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
- 0, 0, buf, cmd);
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
return iscsit_dataout_post_crc_failed(cmd, buf);
}
}
@@ -749,7 +757,7 @@ int iscsit_check_post_dataout(
static void iscsit_handle_time2retain_timeout(unsigned long data)
{
struct iscsi_session *sess = (struct iscsi_session *) data;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
@@ -777,25 +785,25 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- sess->conn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
spin_unlock(&tiqn->sess_err_stats.lock);
}
}
spin_unlock_bh(&se_tpg->session_lock);
- iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
}
-extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+void iscsit_start_time2retain_handler(struct iscsi_session *sess)
{
int tpg_active;
/*
- * Only start Time2Retain timer when the assoicated TPG is still in
+ * Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
- spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
- tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
- spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ spin_lock(&sess->tpg->tpg_state_lock);
+ tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&sess->tpg->tpg_state_lock);
if (!tpg_active)
return;
@@ -819,9 +827,9 @@ extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
/*
* Called with spin_lock_bh(&struct se_portal_group->session_lock) held
*/
-extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
@@ -831,11 +839,11 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
return 0;
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
- spin_unlock_bh(&se_tpg->session_lock);
+ spin_unlock(&se_tpg->session_lock);
del_timer_sync(&sess->time2retain_timer);
- spin_lock_bh(&se_tpg->session_lock);
+ spin_lock(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped Time2Retain Timer for SID: %u\n",
sess->sid);
@@ -898,6 +906,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
wait_for_completion(&conn->conn_wait_comp);
complete(&conn->conn_post_wait_comp);
}
+EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
{
@@ -923,7 +932,7 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
}
}
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 006f605edb0..cda4d80cfae 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains error recovery level one used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -22,6 +20,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
@@ -53,6 +52,9 @@ int iscsit_dump_data_payload(
u32 length, padding, offset = 0, size;
struct kvec iov;
+ if (conn->sess->sess_ops->RDMAExtensions)
+ return 0;
+
length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
buf = kzalloc(length, GFP_ATOMIC);
@@ -158,9 +160,8 @@ static int iscsit_handle_r2t_snack(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (runlength) {
@@ -169,8 +170,8 @@ static int iscsit_handle_r2t_snack(
" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
" current R2TSN: 0x%08x, protocol error.\n",
cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
last_r2tsn = (begrun + runlength);
} else
@@ -279,11 +280,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
* seq->first_datasn and seq->last_datasn have not been set.
*/
if (!seq->sent) {
-#if 0
pr_err("Ignoring non-sent sequence 0x%08x ->"
" 0x%08x\n\n", seq->first_datasn,
seq->last_datasn);
-#endif
continue;
}
@@ -294,11 +293,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn < begrun) &&
(seq->last_datasn < begrun)) {
-#if 0
pr_err("Pre BegRun sequence 0x%08x ->"
" 0x%08x\n", seq->first_datasn,
seq->last_datasn);
-#endif
+
read_data_done += cmd->seq_list[i].xfer_len;
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
@@ -309,11 +307,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn <= begrun) &&
(seq->last_datasn >= begrun)) {
-#if 0
pr_err("Found sequence begrun: 0x%08x in"
" 0x%08x -> 0x%08x\n", begrun,
seq->first_datasn, seq->last_datasn);
-#endif
+
seq_send_order = seq->seq_send_order;
data_sn = seq->first_datasn;
seq->next_burst_len = seq->pdu_send_order = 0;
@@ -369,10 +366,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn > begrun) ||
(seq->last_datasn > begrun)) {
-#if 0
pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
seq->first_datasn, seq->last_datasn);
-#endif
+
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
}
@@ -434,8 +430,7 @@ static int iscsit_handle_recovery_datain(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
/*
@@ -446,14 +441,14 @@ static int iscsit_handle_recovery_datain(
pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
": 0x%08x greater than maximum DataSN: 0x%08x.\n",
begrun, runlength, (cmd->data_sn - 1));
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
+ buf);
}
dr = iscsit_allocate_datain_req();
if (!dr)
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ buf);
dr->data_sn = dr->begrun = begrun;
dr->runlength = runlength;
@@ -471,7 +466,7 @@ static int iscsit_handle_recovery_datain(
int iscsit_handle_recovery_datain_or_r2t(
struct iscsi_conn *conn,
unsigned char *buf,
- u32 init_task_tag,
+ itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
@@ -503,7 +498,7 @@ int iscsit_handle_recovery_datain_or_r2t(
/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
int iscsit_handle_status_snack(
struct iscsi_conn *conn,
- u32 init_task_tag,
+ itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
@@ -512,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
@@ -526,7 +523,7 @@ int iscsit_handle_status_snack(
found_cmd = 0;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->stat_sn == begrun) {
found_cmd = 1;
break;
@@ -824,7 +821,7 @@ static int iscsit_attach_ooo_cmdsn(
/*
* CmdSN is greater than the tail of the list.
*/
- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
@@ -834,11 +831,12 @@ static int iscsit_attach_ooo_cmdsn(
*/
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) {
- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
continue;
+ /* Insert before this entry */
list_add(&ooo_cmdsn->ooo_list,
- &ooo_tmp->ooo_list);
+ ooo_tmp->ooo_list.prev);
break;
}
}
@@ -924,6 +922,7 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct iscsi_conn *conn = cmd->conn;
int lr = 0;
spin_lock_bh(&cmd->istate_lock);
@@ -934,11 +933,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
- * any SCSI CDB exceptions that may have occurred, also
- * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ * any SCSI CDB exceptions that may have occurred.
*/
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@@ -961,7 +959,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception
*/
return transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
@@ -970,8 +968,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (cmd->immediate_data) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
spin_unlock_bh(&cmd->istate_lock);
- return transport_generic_handle_data(
- &cmd->se_cmd);
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
}
spin_unlock_bh(&cmd->istate_lock);
@@ -987,7 +985,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
return 0;
iscsit_set_dataout_sequence_values(cmd);
- iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+ conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
}
return 0;
}
@@ -1005,10 +1003,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (transport_check_aborted_status(se_cmd, 1) != 0)
return 0;
- iscsit_set_dataout_sequence_values(cmd);
- spin_lock_bh(&cmd->dataout_timeout_lock);
- iscsit_start_dataout_timer(cmd, cmd->conn);
- spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_set_unsoliticed_dataout(cmd);
}
return transport_handle_cdb_direct(&cmd->se_cmd);
@@ -1018,7 +1013,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
@@ -1093,7 +1088,7 @@ int iscsit_handle_ooo_cmdsn(
ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
if (!ooo_cmdsn)
- return CMDSN_ERROR_CANNOT_RECOVER;
+ return -ENOMEM;
ooo_cmdsn->cmd = cmd;
ooo_cmdsn->batch_count = (batch) ?
@@ -1104,10 +1099,10 @@ int iscsit_handle_ooo_cmdsn(
if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
- return CMDSN_ERROR_CANNOT_RECOVER;
+ return -ENOMEM;
}
- return CMDSN_HIGHER_THAN_EXP;
+ return 0;
}
static int iscsit_set_dataout_timeout_values(
@@ -1121,8 +1116,8 @@ static int iscsit_set_dataout_timeout_values(
if (cmd->unsolicited_data) {
*offset = 0;
*length = (conn->sess->sess_ops->FirstBurstLength >
- cmd->data_length) ?
- cmd->data_length :
+ cmd->se_cmd.data_length) ?
+ cmd->se_cmd.data_length :
conn->sess->sess_ops->FirstBurstLength;
return 0;
}
@@ -1193,8 +1188,8 @@ static void iscsit_handle_dataout_timeout(unsigned long data)
if (conn->sess->sess_ops->DataPDUInOrder) {
pdu_offset = cmd->write_data_done;
if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len)) > cmd->data_length)
- pdu_length = (cmd->data_length -
+ cmd->next_burst_len)) > cmd->se_cmd.data_length)
+ pdu_length = (cmd->se_cmd.data_length -
cmd->write_data_done);
else
pdu_length = (conn->sess->sess_ops->MaxBurstLength -
@@ -1296,3 +1291,4 @@ void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
cmd->init_task_tag);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
+EXPORT_SYMBOL(iscsit_stop_dataout_timer);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 85e67e29de6..2a3ebf118a3 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -7,8 +7,8 @@ extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
struct iscsi_cmd *, struct iscsi_datain_req *);
extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
- u32, u32, u32, u32);
-extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
+ itt_t, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
u32, u32);
extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 1af1f21af21..4ca8fd2a70d 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -2,9 +2,7 @@
* This file contains error recovery level two functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -36,7 +34,7 @@
*/
void iscsit_create_conn_recovery_datain_values(
struct iscsi_cmd *cmd,
- u32 exp_data_sn)
+ __be32 exp_data_sn)
{
u32 data_sn = 0;
struct iscsi_conn *conn = cmd->conn;
@@ -44,7 +42,7 @@ void iscsit_create_conn_recovery_datain_values(
cmd->next_burst_len = 0;
cmd->read_data_done = 0;
- while (exp_data_sn > data_sn) {
+ while (be32_to_cpu(exp_data_sn) > data_sn) {
if ((cmd->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
@@ -138,12 +136,12 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
- &cr->conn_recovery_cmd_list, i_list) {
+ &cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -160,12 +158,12 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
- &cr->conn_recovery_cmd_list, i_list) {
+ &cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -193,15 +191,13 @@ int iscsit_remove_active_connection_recovery_entry(
return 0;
}
-int iscsit_remove_inactive_connection_recovery_entry(
+static void iscsit_remove_inactive_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
struct iscsi_session *sess)
{
spin_lock(&sess->cr_i_lock);
list_del(&cr->cr_list);
spin_unlock(&sess->cr_i_lock);
-
- return 0;
}
/*
@@ -220,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -234,7 +230,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
- &cr->conn_recovery_cmd_list, i_list) {
+ &cr->conn_recovery_cmd_list, i_conn_node) {
if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
(cmd->deferred_i_state != ISTATE_REMOVE)) ||
@@ -250,7 +246,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -297,14 +293,14 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
mutex_unlock(&sess->cmdsn_mutex);
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@@ -339,14 +335,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_list); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
* sending the TMR response.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
@@ -355,9 +351,9 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -374,10 +370,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
- (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_list);
+ iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- iscsit_free_cmd(cmd);
+ iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -397,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
- list_del(&cmd->i_list);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
@@ -407,7 +403,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
spin_lock(&cr->conn_recovery_cmd_lock);
- list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock_bh(&conn->cmd_lock);
@@ -421,6 +417,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cr->cid = conn->cid;
cr->cmd_count = cmd_count;
cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+ cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength;
cr->sess = conn->sess;
iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 22f8d24780a..63f2501f3fe 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,7 +1,7 @@
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
-extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *, u16);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 38cb7ce8469..5e71ac60941 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the login functions used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -21,6 +19,7 @@
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/crypto.h>
+#include <linux/idr.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -38,12 +37,41 @@
#include "iscsi_target.h"
#include "iscsi_target_parameters.h"
-extern struct idr sess_idr;
-extern struct mutex auth_id_lock;
-extern spinlock_t sess_idr_lock;
+#include <target/iscsi/iscsi_transport.h>
-static int iscsi_login_init_conn(struct iscsi_conn *conn)
+static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
{
+ struct iscsi_login *login;
+
+ login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+ if (!login) {
+ pr_err("Unable to allocate memory for struct iscsi_login.\n");
+ return NULL;
+ }
+ conn->login = login;
+ login->conn = conn;
+ login->first_request = 1;
+
+ login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->req_buf) {
+ pr_err("Unable to allocate memory for response buffer.\n");
+ goto out_login;
+ }
+
+ login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->rsp_buf) {
+ pr_err("Unable to allocate memory for request buffer.\n");
+ goto out_req_buf;
+ }
+
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_ops.\n");
+ goto out_rsp_buf;
+ }
+
+ init_waitqueue_head(&conn->queues_wq);
INIT_LIST_HEAD(&conn->conn_list);
INIT_LIST_HEAD(&conn->conn_cmd_list);
INIT_LIST_HEAD(&conn->immed_queue_list);
@@ -64,10 +92,21 @@ static int iscsi_login_init_conn(struct iscsi_conn *conn)
if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate conn->conn_cpumask\n");
- return -ENOMEM;
+ goto out_conn_ops;
}
+ conn->conn_login = login;
- return 0;
+ return login;
+
+out_conn_ops:
+ kfree(conn->conn_ops);
+out_rsp_buf:
+ kfree(login->rsp_buf);
+out_req_buf:
+ kfree(login->req_buf);
+out_login:
+ kfree(login);
+ return NULL;
}
/*
@@ -129,13 +168,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list);
- if (!initiatorname_param)
- return -1;
-
sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list);
- if (!sessiontype_param)
+ if (!initiatorname_param || !sessiontype_param) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
+ }
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
@@ -180,23 +219,25 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- return iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
+ return 0;
}
spin_unlock_bh(&sess->conn_lock);
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- return iscsit_close_session(sess);
+ target_put_session(sess->se_sess);
+ return 0;
}
static void iscsi_login_set_conn_values(
struct iscsi_session *sess,
struct iscsi_conn *conn,
- u16 cid)
+ __be16 cid)
{
conn->sess = sess;
- conn->cid = cid;
+ conn->cid = be16_to_cpu(cid);
/*
* Generate a random Status sequence number (statsn) for the new
* iSCSI connection.
@@ -208,6 +249,28 @@ static void iscsi_login_set_conn_values(
mutex_unlock(&auth_id_lock);
}
+static __printf(2, 3) int iscsi_change_param_sprintf(
+ struct iscsi_conn *conn,
+ const char *fmt, ...)
+{
+ va_list args;
+ unsigned char buf[64];
+
+ memset(buf, 0, sizeof buf);
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof buf, fmt, args);
+ va_end(args);
+
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* This is the leading connection of a new session,
* or session reinstatement.
@@ -218,6 +281,8 @@ static int iscsi_login_zero_tsih_s1(
{
struct iscsi_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+ enum target_prot_op sup_pro_ops;
+ int ret;
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
if (!sess) {
@@ -230,7 +295,7 @@ static int iscsi_login_zero_tsih_s1(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
- sess->exp_cmd_sn = pdu->cmdsn;
+ sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
INIT_LIST_HEAD(&sess->sess_conn_list);
INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
INIT_LIST_HEAD(&sess->cr_active_list);
@@ -246,24 +311,28 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
- if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for sess_idr failed\n");
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(&sess_idr_lock);
+ ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ sess->session_index = ret;
+ spin_unlock_bh(&sess_idr_lock);
+ idr_preload_end();
+
+ if (ret < 0) {
+ pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess);
return -ENOMEM;
}
- spin_lock(&sess_idr_lock);
- idr_get_new(&sess_idr, NULL, &sess->session_index);
- spin_unlock(&sess_idr_lock);
sess->creation_time = get_jiffies_64();
- spin_lock_init(&sess->session_stats_lock);
/*
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
*/
- sess->max_cmd_sn = pdu->cmdsn;
+ sess->max_cmd_sn = be32_to_cpu(pdu->cmdsn);
sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
if (!sess->sess_ops) {
@@ -274,8 +343,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
+ sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session(sup_pro_ops);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -291,7 +361,7 @@ static int iscsi_login_zero_tsih_s2(
{
struct iscsi_node_attrib *na;
struct iscsi_session *sess = conn->sess;
- unsigned char buf[32];
+ bool iser = false;
sess->tpg = conn->tpg;
@@ -299,21 +369,24 @@ static int iscsi_login_zero_tsih_s2(
* Assign a new TPG Session Handle. Note this is protected with
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
*/
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
if (!sess->tsih)
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
/*
* Create the default params from user defined values..
*/
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ conn->tpg->param_list, 1) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
- iscsi_set_keys_to_negotiate(0, conn->param_list);
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+ iser = true;
+
+ iscsi_set_keys_to_negotiate(conn->param_list, iser);
if (sess->sess_ops->SessionType)
return iscsi_set_keys_irrelevant_for_discovery(
@@ -328,29 +401,80 @@ static int iscsi_login_zero_tsih_s2(
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
- memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
- }
/*
* Workaround for Initiators that have broken connection recovery logic.
*
* "We would really like to get rid of this." Linux-iSCSI.org team
*/
- memset(buf, 0, 32);
- sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
return -1;
- }
if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
return -1;
+ /*
+ * Set RDMAExtensions=Yes by default for iSER enabled network portals
+ */
+ if (iser) {
+ struct iscsi_param *param;
+ unsigned long mrdsl, off;
+ int rc;
+
+ if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
+ return -1;
+
+ /*
+ * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
+ * Immediate Data + Unsolicitied Data-OUT if necessary..
+ */
+ param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
+ conn->param_list);
+ if (!param) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ rc = kstrtoul(param->value, 0, &mrdsl);
+ if (rc < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ off = mrdsl % PAGE_SIZE;
+ if (!off)
+ goto check_prot;
+
+ if (mrdsl < PAGE_SIZE)
+ mrdsl = PAGE_SIZE;
+ else
+ mrdsl -= off;
+
+ pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
+ " to PAGE_SIZE\n", mrdsl);
+
+ if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
+ return -1;
+ /*
+ * ISER currently requires that ImmediateData + Unsolicited
+ * Data be disabled when protection / signature MRs are enabled.
+ */
+check_prot:
+ if (sess->se_sess->sup_prot_ops &
+ (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+ TARGET_PROT_DOUT_INSERT)) {
+
+ if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
+ return -1;
+
+ if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
+ return -1;
+
+ pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+ " T10-PI enabled ISER session\n");
+ }
+ }
return 0;
}
@@ -430,6 +554,7 @@ static int iscsi_login_non_zero_tsih_s2(
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+ bool iser = false;
spin_lock_bh(&se_tpg->session_lock);
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
@@ -441,7 +566,7 @@ static int iscsi_login_non_zero_tsih_s2(
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
- (sess_p->tsih == pdu->tsih)) {
+ (sess_p->tsih == be16_to_cpu(pdu->tsih))) {
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
sess = sess_p;
@@ -473,13 +598,16 @@ static int iscsi_login_non_zero_tsih_s2(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
- iscsi_set_keys_to_negotiate(0, conn->param_list);
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+ iser = true;
+
+ iscsi_set_keys_to_negotiate(conn->param_list, iser);
/*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
@@ -487,13 +615,8 @@ static int iscsi_login_non_zero_tsih_s2(
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
- memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
- }
return iscsi_login_disable_FIM_keys(conn->param_list, conn);
}
@@ -515,7 +638,7 @@ int iscsi_login_post_auth_non_zero_tsih(
* initiator and release the new connection.
*/
conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
- if ((conn_ptr)) {
+ if (conn_ptr) {
pr_err("Connection exists with CID %hu for %s,"
" performing connection reinstatement.\n",
conn_ptr->cid, sess->sess_ops->InitiatorName);
@@ -536,7 +659,7 @@ int iscsi_login_post_auth_non_zero_tsih(
if (sess->sess_ops->ErrorRecoveryLevel == 2) {
cr = iscsit_get_inactive_connection_recovery_entry(
sess, cid);
- if ((cr)) {
+ if (cr) {
pr_debug("Performing implicit logout"
" for connection recovery on CID: %hu\n",
conn->cid);
@@ -568,12 +691,17 @@ int iscsi_login_post_auth_non_zero_tsih(
static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
+ /*
+ * FIXME: Unsolicitied NopIN support for ISER
+ */
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+ return;
if (!sess->sess_ops->SessionType)
iscsit_start_nopin_timer(conn);
}
-static int iscsi_post_login_handler(
+int iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
u8 zero_tsih)
@@ -581,7 +709,7 @@ static int iscsi_post_login_handler(
int stop_timer = 0;
struct iscsi_session *sess = conn->sess;
struct se_session *se_sess = sess->se_sess;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct iscsi_thread_set *ts;
@@ -626,6 +754,7 @@ static int iscsi_post_login_handler(
spin_unlock_bh(&sess->conn_lock);
iscsi_post_login_start_timers(conn);
+
iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
@@ -755,12 +884,12 @@ static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
spin_unlock_bh(&np->np_thread_lock);
}
-int iscsi_target_setup_login_socket(
+int iscsit_setup_np(
struct iscsi_np *np,
struct __kernel_sockaddr_storage *sockaddr)
{
- struct socket *sock;
- int backlog = 5, ret, opt = 0, len;
+ struct socket *sock = NULL;
+ int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
switch (np->np_network_transport) {
case ISCSI_TCP:
@@ -775,15 +904,15 @@ int iscsi_target_setup_login_socket(
np->np_ip_proto = IPPROTO_SCTP;
np->np_sock_type = SOCK_SEQPACKET;
break;
- case ISCSI_IWARP_TCP:
- case ISCSI_IWARP_SCTP:
- case ISCSI_INFINIBAND:
default:
pr_err("Unsupported network_transport: %d\n",
np->np_network_transport);
return -EINVAL;
}
+ np->np_ip_proto = IPPROTO_TCP;
+ np->np_sock_type = SOCK_STREAM;
+
ret = sock_create(sockaddr->ss_family, np->np_sock_type,
np->np_ip_proto, &sock);
if (ret < 0) {
@@ -792,22 +921,6 @@ int iscsi_target_setup_login_socket(
}
np->np_socket = sock;
/*
- * The SCTP stack needs struct socket->file.
- */
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!sock->file) {
- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
- if (!sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- ret = -ENOMEM;
- goto fail;
- }
- np->np_flags |= NPF_SCTP_STRUCT_FILE;
- }
- }
- /*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
@@ -863,147 +976,337 @@ int iscsi_target_setup_login_socket(
}
return 0;
-
fail:
np->np_socket = NULL;
- if (sock) {
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(sock->file);
- sock->file = NULL;
+ if (sock)
+ sock_release(sock);
+ return ret;
+}
+
+int iscsi_target_setup_login_socket(
+ struct iscsi_np *np,
+ struct __kernel_sockaddr_storage *sockaddr)
+{
+ struct iscsit_transport *t;
+ int rc;
+
+ t = iscsit_get_transport(np->np_network_transport);
+ if (!t)
+ return -EINVAL;
+
+ rc = t->iscsit_setup_np(np, sockaddr);
+ if (rc < 0) {
+ iscsit_put_transport(t);
+ return rc;
+ }
+
+ np->np_transport = t;
+ np->enabled = true;
+ return 0;
+}
+
+int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+ struct socket *new_sock, *sock = np->np_socket;
+ struct sockaddr_in sock_in;
+ struct sockaddr_in6 sock_in6;
+ int rc, err;
+
+ rc = kernel_accept(sock, &new_sock, 0);
+ if (rc < 0)
+ return rc;
+
+ conn->sock = new_sock;
+ conn->login_family = np->np_sockaddr.ss_family;
+
+ if (np->np_sockaddr.ss_family == AF_INET6) {
+ memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+ rc = conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 1);
+ if (!rc) {
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]",
+ &sock_in6.sin6_addr.in6_u);
+ else
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4",
+ &sock_in6.sin6_addr.s6_addr32[3]);
+ conn->login_port = ntohs(sock_in6.sin6_port);
}
- sock_release(sock);
+ rc = conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 0);
+ if (!rc) {
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
+ snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]",
+ &sock_in6.sin6_addr.in6_u);
+ else
+ snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4",
+ &sock_in6.sin6_addr.s6_addr32[3]);
+ conn->local_port = ntohs(sock_in6.sin6_port);
+ }
+ } else {
+ memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+ rc = conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 1);
+ if (!rc) {
+ sprintf(conn->login_ip, "%pI4",
+ &sock_in.sin_addr.s_addr);
+ conn->login_port = ntohs(sock_in.sin_port);
+ }
+
+ rc = conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 0);
+ if (!rc) {
+ sprintf(conn->local_ip, "%pI4",
+ &sock_in.sin_addr.s_addr);
+ conn->local_port = ntohs(sock_in.sin_port);
+ }
}
- return ret;
+
+ return 0;
+}
+
+int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ struct iscsi_login_req *login_req;
+ u32 padding = 0, payload_length;
+
+ if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+ return -1;
+
+ login_req = (struct iscsi_login_req *)login->req;
+ payload_length = ntoh24(login_req->dlength);
+ padding = ((-payload_length) & 3);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, payload_length);
+ /*
+ * Setup the initial iscsi_login values from the leading
+ * login request PDU.
+ */
+ if (login->first_request) {
+ login_req = (struct iscsi_login_req *)login->req;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = be32_to_cpu(login_req->cmdsn);
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+ login->cid = be16_to_cpu(login_req->cid);
+ login->tsih = be16_to_cpu(login_req->tsih);
+ }
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+ if (iscsi_login_rx_data(conn, login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ u32 length)
+{
+ if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
+{
+ int rc;
+
+ if (!t->owner) {
+ conn->conn_transport = t;
+ return 0;
+ }
+
+ rc = try_module_get(t->owner);
+ if (!rc) {
+ pr_err("try_module_get() failed for %s\n", t->name);
+ return -EINVAL;
+ }
+
+ conn->conn_transport = t;
+ return 0;
+}
+
+void iscsi_target_login_sess_out(struct iscsi_conn *conn,
+ struct iscsi_np *np, bool zero_tsih, bool new_sess)
+{
+ if (!new_sess)
+ goto old_sess_out;
+
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+ if (conn->sess->se_sess)
+ transport_free_session(conn->sess->se_sess);
+ if (conn->sess->session_index != 0) {
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+ }
+ kfree(conn->sess->sess_ops);
+ kfree(conn->sess);
+
+old_sess_out:
+ iscsi_stop_login_thread_timer(np);
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &conn->tpg->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (!IS_ERR(conn->conn_rx_hash.tfm))
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (!IS_ERR(conn->conn_tx_hash.tfm))
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ iscsi_target_nego_release(conn);
+
+ if (conn->sock) {
+ sock_release(conn->sock);
+ conn->sock = NULL;
+ }
+
+ if (conn->conn_transport->iscsit_free_conn)
+ conn->conn_transport->iscsit_free_conn(conn);
+
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
- u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
- int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
+ u8 *buffer, zero_tsih = 0;
+ int ret = 0, rc;
struct iscsi_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
- struct socket *new_sock, *sock;
- struct kvec iov;
struct iscsi_login_req *pdu;
- struct sockaddr_in sock_in;
- struct sockaddr_in6 sock_in6;
+ struct iscsi_tpg_np *tpg_np;
+ bool new_sess = false;
flush_signals(current);
- set_sctp_conn_flag = 0;
- sock = np->np_socket;
- ip_proto = np->np_ip_proto;
- sock_type = np->np_sock_type;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
complete(&np->np_restart_comp);
+ } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+ spin_unlock_bh(&np->np_thread_lock);
+ goto exit;
} else {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
}
spin_unlock_bh(&np->np_thread_lock);
- if (kernel_accept(sock, &new_sock, 0) < 0) {
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+ /* Get another socket */
+ return 1;
+ }
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+
+ if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
+ kfree(conn);
+ return 1;
+ }
+
+ rc = np->np_transport->iscsit_accept_np(np, conn);
+ if (rc == -ENOSYS) {
+ complete(&np->np_restart_comp);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+ conn = NULL;
+ goto exit;
+ } else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+ conn = NULL;
+ if (ret == -ENODEV)
+ goto out;
/* Get another socket */
return 1;
}
spin_unlock_bh(&np->np_thread_lock);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+ conn = NULL;
goto out;
}
/*
- * The SCTP stack needs struct socket->file.
+ * Perform the remaining iSCSI connection initialization items..
*/
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!new_sock->file) {
- new_sock->file = kzalloc(
- sizeof(struct file), GFP_KERNEL);
- if (!new_sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- sock_release(new_sock);
- /* Get another socket */
- return 1;
- }
- set_sctp_conn_flag = 1;
- }
+ login = iscsi_login_init_conn(conn);
+ if (!login) {
+ goto new_sess_out;
}
iscsi_start_login_thread_timer(np);
- conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
- if (!conn) {
- pr_err("Could not allocate memory for"
- " new connection\n");
- if (set_sctp_conn_flag) {
- kfree(new_sock->file);
- new_sock->file = NULL;
- }
- sock_release(new_sock);
- /* Get another socket */
- return 1;
- }
-
- pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
- conn->conn_state = TARG_CONN_STATE_FREE;
- conn->sock = new_sock;
-
- if (set_sctp_conn_flag)
- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
-
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
-
- /*
- * Allocate conn->conn_ops early as a failure calling
- * iscsit_tx_login_rsp() below will call tx_data().
- */
- conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
- if (!conn->conn_ops) {
- pr_err("Unable to allocate memory for"
- " struct iscsi_conn_ops.\n");
- goto new_sess_out;
- }
/*
- * Perform the remaining iSCSI connection initialization items..
+ * This will process the first login request + payload..
*/
- if (iscsi_login_init_conn(conn) < 0)
- goto new_sess_out;
-
- memset(buffer, 0, ISCSI_HDR_LEN);
- memset(&iov, 0, sizeof(struct kvec));
- iov.iov_base = buffer;
- iov.iov_len = ISCSI_HDR_LEN;
-
- if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
- pr_err("rx_data() returned an error.\n");
- goto new_sess_out;
- }
-
- iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
- if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
- pr_err("First opcode is not login request,"
- " failing login request.\n");
+ rc = np->np_transport->iscsit_get_login_rx(conn, login);
+ if (rc == 1)
+ return 1;
+ else if (rc < 0)
goto new_sess_out;
- }
- pdu = (struct iscsi_login_req *) buffer;
- pdu->cid = be16_to_cpu(pdu->cid);
- pdu->tsih = be16_to_cpu(pdu->tsih);
- pdu->itt = be32_to_cpu(pdu->itt);
- pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
- pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
+ buffer = &login->req[0];
+ pdu = (struct iscsi_login_req *)buffer;
/*
* Used by iscsit_tx_login_rsp() for Login Resonses PDUs
* when Status-Class != 0.
*/
- conn->login_itt = pdu->itt;
+ conn->login_itt = pdu->itt;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
@@ -1016,61 +1319,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
spin_unlock_bh(&np->np_thread_lock);
- if (np->np_sockaddr.ss_family == AF_INET6) {
- memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
-
- if (conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 1) < 0) {
- pr_err("sock_ops->getname() failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_TARGET_ERROR);
- goto new_sess_out;
- }
- snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
- conn->login_port = ntohs(sock_in6.sin6_port);
-
- if (conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 0) < 0) {
- pr_err("sock_ops->getname() failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_TARGET_ERROR);
- goto new_sess_out;
- }
- snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
- conn->local_port = ntohs(sock_in6.sin6_port);
-
- } else {
- memset(&sock_in, 0, sizeof(struct sockaddr_in));
-
- if (conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 1) < 0) {
- pr_err("sock_ops->getname() failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_TARGET_ERROR);
- goto new_sess_out;
- }
- sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
- conn->login_port = ntohs(sock_in.sin_port);
-
- if (conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 0) < 0) {
- pr_err("sock_ops->getname() failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_TARGET_ERROR);
- goto new_sess_out;
- }
- sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
- conn->local_port = ntohs(sock_in.sin_port);
- }
-
conn->network_transport = np->np_network_transport;
pr_debug("Received iSCSI login request from %s on %s Network"
- " Portal %s:%hu\n", conn->login_ip,
- (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
- conn->local_ip, conn->local_port);
+ " Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
+ conn->local_ip, conn->local_port);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
@@ -1080,7 +1333,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out;
zero_tsih = (pdu->tsih == 0x0000);
- if ((zero_tsih)) {
+ if (zero_tsih) {
/*
* This is the leading connection of a new session.
* We wait until after authentication to check for
@@ -1099,16 +1352,21 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
goto new_sess_out;
}
-
/*
- * This will process the first login request, and call
- * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+ * SessionType: Discovery
+ *
+ * Locates Default Portal
+ *
+ * SessionType: Normal
+ *
+ * Locates Target Portal from NP -> Target IQN
*/
- login = iscsi_target_init_negotiation(np, conn, buffer);
- if (!login) {
+ rc = iscsi_target_locate_portal(np, conn, login);
+ if (rc < 0) {
tpg = conn->tpg;
goto new_sess_out;
}
+ login->zero_tsih = zero_tsih;
tpg = conn->tpg;
if (!tpg) {
@@ -1117,18 +1375,15 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
if (zero_tsih) {
- if (iscsi_login_zero_tsih_s2(conn) < 0) {
- iscsi_target_nego_release(login, conn);
+ if (iscsi_login_zero_tsih_s2(conn) < 0)
goto new_sess_out;
- }
} else {
- if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
- iscsi_target_nego_release(login, conn);
+ if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0)
goto old_sess_out;
- }
}
- if (iscsi_target_start_negotiation(login, conn) < 0)
+ ret = iscsi_target_start_negotiation(login, conn);
+ if (ret < 0)
goto new_sess_out;
if (!conn->sess) {
@@ -1141,98 +1396,43 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (signal_pending(current))
goto new_sess_out;
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
+ if (ret == 1) {
+ tpg_np = conn->tpg_np;
- if (ret < 0)
- goto new_sess_out;
+ ret = iscsi_post_login_handler(np, conn, zero_tsih);
+ if (ret < 0)
+ goto new_sess_out;
+
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
- iscsit_deaccess_np(np, tpg);
tpg = NULL;
+ tpg_np = NULL;
/* Get another socket */
return 1;
new_sess_out:
- pr_err("iSCSI Login negotiation failed.\n");
- iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_INIT_ERR);
- if (!zero_tsih || !conn->sess)
- goto old_sess_out;
- if (conn->sess->se_sess)
- transport_free_session(conn->sess->se_sess);
- if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
- idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
- }
- if (conn->sess->sess_ops)
- kfree(conn->sess->sess_ops);
- if (conn->sess)
- kfree(conn->sess);
+ new_sess = true;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
- /*
- * If login negotiation fails check if the Time2Retain timer
- * needs to be restarted.
- */
- if (!zero_tsih && conn->sess) {
- spin_lock_bh(&conn->sess->conn_lock);
- if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
- struct se_portal_group *se_tpg =
- &ISCSI_TPG_C(conn)->tpg_se_tpg;
-
- atomic_set(&conn->sess->session_continuation, 0);
- spin_unlock_bh(&conn->sess->conn_lock);
- spin_lock_bh(&se_tpg->session_lock);
- iscsit_start_time2retain_handler(conn->sess);
- spin_unlock_bh(&se_tpg->session_lock);
- } else
- spin_unlock_bh(&conn->sess->conn_lock);
- iscsit_dec_session_usage_count(conn->sess);
- }
-
- if (!IS_ERR(conn->conn_rx_hash.tfm))
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (!IS_ERR(conn->conn_tx_hash.tfm))
- crypto_free_hash(conn->conn_tx_hash.tfm);
-
- if (conn->conn_cpumask)
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
-
- if (conn->param_list) {
- iscsi_release_param_list(conn->param_list);
- conn->param_list = NULL;
- }
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
- sock_release(conn->sock);
- }
- kfree(conn);
+ tpg_np = conn->tpg_np;
+ iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+ new_sess = false;
if (tpg) {
- iscsit_deaccess_np(np, tpg);
+ iscsit_deaccess_np(np, tpg, tpg_np);
tpg = NULL;
+ tpg_np = NULL;
}
out:
- stop = kthread_should_stop();
- if (!stop && signal_pending(current)) {
- spin_lock_bh(&np->np_thread_lock);
- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
- spin_unlock_bh(&np->np_thread_lock);
- }
- /* Wait for another socket.. */
- if (!stop)
- return 1;
+ return 1;
+exit:
iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
spin_unlock_bh(&np->np_thread_lock);
+
return 0;
}
@@ -1243,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
allow_signal(SIGINT);
- while (!kthread_should_stop()) {
+ while (1) {
ret = __iscsi_target_login_thread(np);
/*
* We break and exit here unless another sock_accept() call
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 091dcae2532..29d098324b7 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -4,8 +4,17 @@
extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsit_setup_np(struct iscsi_np *,
+ struct __kernel_sockaddr_storage *);
extern int iscsi_target_setup_login_socket(struct iscsi_np *,
struct __kernel_sockaddr_storage *);
+extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+ bool, bool);
extern int iscsi_target_login_thread(void *);
extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index e89fa745725..62a095f36bf 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -22,6 +20,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -44,7 +43,7 @@ void convert_null_to_semi(char *buf, int len)
buf[i] = ';';
}
-int strlen_semi(char *buf)
+static int strlen_semi(char *buf)
{
int i = 0;
@@ -89,8 +88,8 @@ int extract_param(
if (len < 0)
return -1;
- if (len > max_length) {
- pr_err("Length of input: %d exeeds max_length:"
+ if (len >= max_length) {
+ pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
return -1;
}
@@ -111,6 +110,7 @@ static u32 iscsi_handle_authentication(
struct iscsi_session *sess = conn->sess;
struct iscsi_node_auth *auth;
struct iscsi_node_acl *iscsi_nacl;
+ struct iscsi_portal_group *iscsi_tpg;
struct se_node_acl *se_nacl;
if (!sess->sess_ops->SessionType) {
@@ -131,7 +131,17 @@ static u32 iscsi_handle_authentication(
return -1;
}
- auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ if (se_nacl->dynamic_node_acl) {
+ iscsi_tpg = container_of(se_nacl->se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+
+ auth = &iscsi_tpg->tpg_demo_auth;
+ } else {
+ iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+
+ auth = &iscsi_nacl->node_auth;
+ }
} else {
/*
* For SessionType=Discovery
@@ -169,17 +179,15 @@ static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
kfree(conn->auth_protocol);
}
-static int iscsi_target_check_login_request(
+int iscsi_target_check_login_request(
struct iscsi_conn *conn,
struct iscsi_login *login)
{
- int req_csg, req_nsg, rsp_csg, rsp_nsg;
+ int req_csg, req_nsg;
u32 payload_length;
struct iscsi_login_req *login_req;
- struct iscsi_login_rsp *login_rsp;
login_req = (struct iscsi_login_req *) login->req;
- login_rsp = (struct iscsi_login_rsp *) login->rsp;
payload_length = ntoh24(login_req->dlength);
switch (login_req->opcode & ISCSI_OPCODE_MASK) {
@@ -202,10 +210,8 @@ static int iscsi_target_check_login_request(
return -1;
}
- req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
- rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
- req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
- rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+ req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+ req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags);
if (req_csg != login->current_stage) {
pr_err("Initiator unexpectedly changed login stage"
@@ -343,112 +349,310 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
hton24(login_rsp->dlength, login->rsp_length);
memcpy(login_rsp->isid, login->isid, 6);
login_rsp->tsih = cpu_to_be16(login->tsih);
- login_rsp->itt = cpu_to_be32(login->init_task_tag);
+ login_rsp->itt = login->init_task_tag;
login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
- " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
+ " %u\n", login_rsp->flags, (__force u32)login_rsp->itt,
ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
ntohl(login_rsp->statsn), login->rsp_length);
padding = ((-login->rsp_length) & 3);
- if (iscsi_login_tx_data(
- conn,
- login->rsp,
- login->rsp_buf,
- login->rsp_length + padding) < 0)
+ if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+ login->rsp_length + padding) < 0)
return -1;
login->rsp_length = 0;
- login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
- login_rsp->itt = be32_to_cpu(login_rsp->itt);
- login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
mutex_lock(&sess->cmdsn_mutex);
- login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
- login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
+ login_rsp->exp_cmdsn = cpu_to_be32(sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32(sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
return 0;
}
-static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static void iscsi_target_sk_data_ready(struct sock *sk)
{
- u32 padding = 0, payload_length;
- struct iscsi_login_req *login_req;
+ struct iscsi_conn *conn = sk->sk_user_data;
+ bool rc;
- if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
- return -1;
+ pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
- login_req = (struct iscsi_login_req *) login->req;
- payload_length = ntoh24(login_req->dlength);
- login_req->tsih = be16_to_cpu(login_req->tsih);
- login_req->itt = be32_to_cpu(login_req->itt);
- login_req->cid = be16_to_cpu(login_req->cid);
- login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
- login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
-
- pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
- " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
- login_req->flags, login_req->itt, login_req->cmdsn,
- login_req->exp_statsn, login_req->cid, payload_length);
-
- if (iscsi_target_check_login_request(conn, login) < 0)
- return -1;
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
+ return;
+ }
- padding = ((-payload_length) & 3);
- memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+ rc = schedule_delayed_work(&conn->login_work, 0);
+ if (!rc) {
+ pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
+ " got false\n");
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+}
- if (iscsi_login_rx_data(
- conn,
- login->req_buf,
- payload_length + padding) < 0)
- return -1;
+static void iscsi_target_sk_state_change(struct sock *);
- return 0;
+static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
+{
+ struct sock *sk;
+
+ if (!conn->sock)
+ return;
+
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = conn;
+ conn->orig_data_ready = sk->sk_data_ready;
+ conn->orig_state_change = sk->sk_state_change;
+ sk->sk_data_ready = iscsi_target_sk_data_ready;
+ sk->sk_state_change = iscsi_target_sk_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
+ sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
}
-static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
{
- if (iscsi_target_do_tx_login_io(conn, login) < 0)
- return -1;
+ struct sock *sk;
- if (iscsi_target_do_rx_login_io(conn, login) < 0)
- return -1;
+ if (!conn->sock)
+ return;
- return 0;
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = conn->orig_data_ready;
+ sk->sk_state_change = conn->orig_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
}
-static int iscsi_target_get_initial_payload(
- struct iscsi_conn *conn,
- struct iscsi_login *login)
+static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
+
+static bool iscsi_target_sk_state_check(struct sock *sk)
{
- u32 padding = 0, payload_length;
- struct iscsi_login_req *login_req;
+ if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
+ pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+ "returning FALSE\n");
+ return false;
+ }
+ return true;
+}
- login_req = (struct iscsi_login_req *) login->req;
- payload_length = ntoh24(login_req->dlength);
+static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ struct iscsi_np *np = login->np;
+ bool zero_tsih = login->zero_tsih;
- pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
- " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
- login_req->flags, login_req->itt, login_req->cmdsn,
- login_req->exp_statsn, payload_length);
+ iscsi_remove_failed_auth_entry(conn);
+ iscsi_target_nego_release(conn);
+ iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+}
- if (iscsi_target_check_login_request(conn, login) < 0)
- return -1;
+static void iscsi_target_login_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
- padding = ((-payload_length) & 3);
+ pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
- if (iscsi_login_rx_data(
- conn,
- login->req_buf,
- payload_length + padding) < 0)
- return -1;
+ if (conn->login_kworker) {
+ pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+ conn->login_kworker->comm, conn->login_kworker->pid);
+ send_sig(SIGINT, conn->login_kworker, 1);
+ }
+}
- return 0;
+static void iscsi_target_do_login_rx(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work,
+ struct iscsi_conn, login_work.work);
+ struct iscsi_login *login = conn->login;
+ struct iscsi_np *np = login->np;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+ struct timer_list login_timer;
+ int rc, zero_tsih = login->zero_tsih;
+ bool state;
+
+ pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ spin_lock(&tpg->tpg_state_lock);
+ state = (tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (!state) {
+ pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = iscsi_target_sk_state_check(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ if (!state) {
+ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+ }
+
+ conn->login_kworker = current;
+ allow_signal(SIGINT);
+
+ init_timer(&login_timer);
+ login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+ login_timer.data = (unsigned long)conn;
+ login_timer.function = iscsi_target_login_timeout;
+ add_timer(&login_timer);
+ pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid);
+
+ rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
+ del_timer_sync(&login_timer);
+ flush_signals(current);
+ conn->login_kworker = NULL;
+
+ if (rc < 0) {
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+
+ pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ rc = iscsi_target_do_login(conn, login);
+ if (rc < 0) {
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ } else if (!rc) {
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ } else if (rc == 1) {
+ iscsi_target_nego_release(conn);
+ iscsi_post_login_handler(np, conn, zero_tsih);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
+}
+
+static void iscsi_target_do_cleanup(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work,
+ struct iscsi_conn, login_cleanup_work.work);
+ struct sock *sk = conn->sock->sk;
+ struct iscsi_login *login = conn->login;
+ struct iscsi_np *np = login->np;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+
+ pr_debug("Entering iscsi_target_do_cleanup\n");
+
+ cancel_delayed_work_sync(&conn->login_work);
+ conn->orig_state_change(sk);
+
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+
+ pr_debug("iscsi_target_do_cleanup done()\n");
+}
+
+static void iscsi_target_sk_state_change(struct sock *sk)
+{
+ struct iscsi_conn *conn;
+ void (*orig_state_change)(struct sock *);
+ bool state;
+
+ pr_debug("Entering iscsi_target_sk_state_change\n");
+
+ write_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = conn->orig_state_change;
+
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
+ " conn: %p\n", conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+
+ state = iscsi_target_sk_state_check(sk);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+
+ if (!state) {
+ pr_debug("iscsi_target_sk_state_change got failed state\n");
+ schedule_delayed_work(&conn->login_cleanup_work, 0);
+ return;
+ }
+ orig_state_change(sk);
}
/*
@@ -554,7 +758,7 @@ static int iscsi_target_handle_csg_zero(
SENDER_INITIATOR|SENDER_RECEIVER,
login->req_buf,
payload_length,
- conn->param_list);
+ conn);
if (ret < 0)
return -1;
@@ -569,6 +773,12 @@ static int iscsi_target_handle_csg_zero(
}
goto do_auth;
+ } else if (!payload_length) {
+ pr_err("Initiator sent zero length security payload,"
+ " login failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
}
if (login->first_request)
@@ -585,7 +795,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!strncmp(param->value, NONE, 4)) {
pr_err("Initiator sent AuthMethod=None but"
" Target is enforcing iSCSI Authentication,"
@@ -595,7 +805,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
}
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!login->auth_complete)
return 0;
@@ -631,9 +841,12 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
SENDER_INITIATOR|SENDER_RECEIVER,
login->req_buf,
payload_length,
- conn->param_list);
- if (ret < 0)
+ conn);
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
@@ -648,11 +861,14 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf,
&login->rsp_length,
conn->param_list);
- if (ret < 0)
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (!login->auth_complete &&
- ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ conn->tpg->tpg_attrib.authentication) {
pr_err("Initiator is requesting CSG: 1, has not been"
" successfully authenticated, and the Target is"
" enforcing iSCSI Authentication, login failed.\n");
@@ -687,9 +903,9 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
return -1;
}
- switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+ switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) {
case 0:
- login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK;
if (iscsi_target_handle_csg_zero(conn, login) < 0)
return -1;
break;
@@ -699,27 +915,44 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login->tsih = conn->sess->tsih;
+ login->login_complete = 1;
+ iscsi_target_restore_sock_callbacks(conn);
if (iscsi_target_do_tx_login_io(conn,
login) < 0)
return -1;
- return 0;
+ return 1;
}
break;
default:
pr_err("Illegal CSG: %d received from"
" Initiator, protocol error.\n",
- (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
- >> 2);
+ ISCSI_LOGIN_CURRENT_STAGE(login_req->flags));
break;
}
- if (iscsi_target_do_login_io(conn, login) < 0)
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
}
+ break;
+ }
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+ bool state;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = iscsi_target_sk_state_check(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ if (!state) {
+ pr_debug("iscsi_target_do_login() failed state for"
+ " conn: %p\n", conn);
+ return -1;
+ }
}
return 0;
@@ -743,7 +976,7 @@ static void iscsi_initiatorname_tolower(
/*
* Processes the first Login Request..
*/
-static int iscsi_target_locate_portal(
+int iscsi_target_locate_portal(
struct iscsi_np *np,
struct iscsi_conn *conn,
struct iscsi_login *login)
@@ -752,31 +985,21 @@ static int iscsi_target_locate_portal(
char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
struct iscsi_session *sess = conn->sess;
struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np = NULL;
struct iscsi_login_req *login_req;
- struct iscsi_targ_login_rsp *login_rsp;
- u32 payload_length;
- int sessiontype = 0, ret = 0;
+ struct se_node_acl *se_nacl;
+ u32 payload_length, queue_depth = 0;
+ int sessiontype = 0, ret = 0, tag_num, tag_size;
+
+ INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
+ INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
+ iscsi_target_set_sock_callbacks(conn);
+
+ login->np = np;
login_req = (struct iscsi_login_req *) login->req;
- login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
payload_length = ntoh24(login_req->dlength);
- login->first_request = 1;
- login->leading_connection = (!login_req->tsih) ? 1 : 0;
- login->current_stage =
- (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
- login->version_min = login_req->min_version;
- login->version_max = login_req->max_version;
- memcpy(login->isid, login_req->isid, 6);
- login->cmd_sn = login_req->cmdsn;
- login->init_task_tag = login_req->itt;
- login->initial_exp_statsn = login_req->exp_statsn;
- login->cid = login_req->cid;
- login->tsih = login_req->tsih;
-
- if (iscsi_target_get_initial_payload(conn, login) < 0)
- return -1;
-
tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n");
@@ -807,7 +1030,6 @@ static int iscsi_target_locate_portal(
start += strlen(key) + strlen(value) + 2;
}
-
/*
* See 5.3. Login Phase.
*/
@@ -867,7 +1089,7 @@ static int iscsi_target_locate_portal(
goto out;
}
ret = 0;
- goto out;
+ goto alloc_tags;
}
get_target:
@@ -898,7 +1120,7 @@ get_target:
/*
* Locate Target Portal Group from Storage Node.
*/
- conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
if (!conn->tpg) {
pr_err("Unable to locate Target Portal Group"
" on %s\n", tiqn->tiqn);
@@ -908,12 +1130,16 @@ get_target:
ret = -1;
goto out;
}
+ conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
/*
* Setup crc32c modules from libcrypto
*/
if (iscsi_login_setup_crypto(conn) < 0) {
pr_err("iscsi_login_setup_crypto() failed\n");
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+ iscsit_put_tiqn_for_login(tiqn);
+ conn->tpg = NULL;
ret = -1;
goto out;
}
@@ -922,11 +1148,12 @@ get_target:
* process login attempt.
*/
if (iscsit_access_np(np, conn->tpg) < 0) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
iscsit_put_tiqn_for_login(tiqn);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
- ret = -1;
conn->tpg = NULL;
+ ret = -1;
goto out;
}
@@ -959,108 +1186,69 @@ get_target:
ret = -1;
goto out;
}
-
- ret = 0;
-out:
- kfree(tmpbuf);
- return ret;
-}
-
-struct iscsi_login *iscsi_target_init_negotiation(
- struct iscsi_np *np,
- struct iscsi_conn *conn,
- char *login_pdu)
-{
- struct iscsi_login *login;
-
- login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
- if (!login) {
- pr_err("Unable to allocate memory for struct iscsi_login.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return NULL;
- }
-
- login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
- if (!login->req) {
- pr_err("Unable to allocate memory for Login Request.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- goto out;
- }
-
- login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
- if (!login->req_buf) {
- pr_err("Unable to allocate memory for response buffer.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- goto out;
- }
+ se_nacl = sess->se_sess->se_node_acl;
+ queue_depth = se_nacl->queue_depth;
/*
- * SessionType: Discovery
- *
- * Locates Default Portal
- *
- * SessionType: Normal
+ * Setup pre-allocated tags based upon allowed per NodeACL CmdSN
+ * depth for non immediate commands, plus extra tags for immediate
+ * commands.
*
- * Locates Target Portal from NP -> Target IQN
+ * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
+ * in per-cpu-ida tag allocation logic + small queue_depth.
*/
- if (iscsi_target_locate_portal(np, conn, login) < 0) {
- pr_err("iSCSI Login negotiation failed.\n");
- goto out;
- }
+alloc_tags:
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
+ tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
- return login;
+ ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ }
out:
- kfree(login->req);
- kfree(login->req_buf);
- kfree(login);
-
- return NULL;
+ kfree(tmpbuf);
+ return ret;
}
int iscsi_target_start_negotiation(
struct iscsi_login *login,
struct iscsi_conn *conn)
{
- int ret = -1;
+ int ret;
- login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
- if (!login->rsp) {
- pr_err("Unable to allocate memory for"
- " Login Response.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- ret = -1;
- goto out;
- }
+ ret = iscsi_target_do_login(conn, login);
+ if (!ret) {
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
- login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
- if (!login->rsp_buf) {
- pr_err("Unable to allocate memory for"
- " request buffer.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- ret = -1;
- goto out;
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ } else if (ret < 0) {
+ cancel_delayed_work_sync(&conn->login_work);
+ cancel_delayed_work_sync(&conn->login_cleanup_work);
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_remove_failed_auth_entry(conn);
}
-
- ret = iscsi_target_do_login(conn, login);
-out:
if (ret != 0)
- iscsi_remove_failed_auth_entry(conn);
+ iscsi_target_nego_release(conn);
- iscsi_target_nego_release(login, conn);
return ret;
}
-void iscsi_target_nego_release(
- struct iscsi_login *login,
- struct iscsi_conn *conn)
+void iscsi_target_nego_release(struct iscsi_conn *conn)
{
- kfree(login->req);
- kfree(login->rsp);
+ struct iscsi_login *login = conn->conn_login;
+
+ if (!login)
+ return;
+
kfree(login->req_buf);
kfree(login->rsp_buf);
kfree(login);
+
+ conn->conn_login = NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 92e133a5158..f021cbd330e 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -7,11 +7,14 @@
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
-extern struct iscsi_login *iscsi_target_init_negotiation(
- struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+ struct iscsi_login *);
+extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
+ struct iscsi_login *);
+extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
+ struct iscsi_login *);
extern int iscsi_target_start_negotiation(
struct iscsi_login *, struct iscsi_conn *);
-extern void iscsi_target_nego_release(
- struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(struct iscsi_conn *);
#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index b3c699c4fe8..16454a922e2 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the main functions related to Initiator Node Attributes.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -35,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
}
void iscsit_set_default_node_attribues(
- struct iscsi_node_acl *acl)
+ struct iscsi_node_acl *acl,
+ struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
@@ -46,10 +45,10 @@ void iscsit_set_default_node_attribues(
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
- a->default_erl = NA_DEFAULT_ERL;
+ a->default_erl = tpg->tpg_attrib.default_erl;
}
-extern int iscsit_na_dataout_timeout(
+int iscsit_na_dataout_timeout(
struct iscsi_node_acl *acl,
u32 dataout_timeout)
{
@@ -74,7 +73,7 @@ extern int iscsit_na_dataout_timeout(
return 0;
}
-extern int iscsit_na_dataout_timeout_retries(
+int iscsit_na_dataout_timeout_retries(
struct iscsi_node_acl *acl,
u32 dataout_timeout_retries)
{
@@ -100,7 +99,7 @@ extern int iscsit_na_dataout_timeout_retries(
return 0;
}
-extern int iscsit_na_nopin_timeout(
+int iscsit_na_nopin_timeout(
struct iscsi_node_acl *acl,
u32 nopin_timeout)
{
@@ -155,7 +154,7 @@ extern int iscsit_na_nopin_timeout(
return 0;
}
-extern int iscsit_na_nopin_response_timeout(
+int iscsit_na_nopin_response_timeout(
struct iscsi_node_acl *acl,
u32 nopin_response_timeout)
{
@@ -181,7 +180,7 @@ extern int iscsit_na_nopin_response_timeout(
return 0;
}
-extern int iscsit_na_random_datain_pdu_offsets(
+int iscsit_na_random_datain_pdu_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_pdu_offsets)
{
@@ -201,7 +200,7 @@ extern int iscsit_na_random_datain_pdu_offsets(
return 0;
}
-extern int iscsit_na_random_datain_seq_offsets(
+int iscsit_na_random_datain_seq_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_seq_offsets)
{
@@ -221,7 +220,7 @@ extern int iscsit_na_random_datain_seq_offsets(
return 0;
}
-extern int iscsit_na_random_r2t_offsets(
+int iscsit_na_random_r2t_offsets(
struct iscsi_node_acl *acl,
u32 random_r2t_offsets)
{
@@ -241,7 +240,7 @@ extern int iscsit_na_random_r2t_offsets(
return 0;
}
-extern int iscsit_na_default_erl(
+int iscsit_na_default_erl(
struct iscsi_node_acl *acl,
u32 default_erl)
{
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef2..0c69a46a62e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,8 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
-extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
+ struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 5b773160200..02f9de26f38 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -59,7 +57,7 @@ int iscsi_login_tx_data(
char *text_buf,
int text_length)
{
- int length, tx_sent;
+ int length, tx_sent, iov_cnt = 1;
struct kvec iov[2];
length = (ISCSI_HDR_LEN + text_length);
@@ -67,8 +65,12 @@ int iscsi_login_tx_data(
memset(&iov[0], 0, 2 * sizeof(struct kvec));
iov[0].iov_len = ISCSI_HDR_LEN;
iov[0].iov_base = pdu_buf;
- iov[1].iov_len = text_length;
- iov[1].iov_base = text_buf;
+
+ if (text_buf && text_length) {
+ iov[1].iov_len = text_length;
+ iov[1].iov_base = text_buf;
+ iov_cnt++;
+ }
/*
* Initial Marker-less Interval.
@@ -77,7 +79,7 @@ int iscsi_login_tx_data(
*/
conn->if_marker += length;
- tx_sent = tx_data(conn, &iov[0], 2, length);
+ tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
if (tx_sent != length) {
pr_err("tx_data returned %d, expecting %d.\n",
tx_sent, length);
@@ -154,22 +156,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
}
INIT_LIST_HEAD(&param->p_list);
- param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n");
goto out;
}
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n");
goto out;
}
- memcpy(param->name, name, strlen(name));
- param->name[strlen(name)] = '\0';
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
param->phase = phase;
param->scope = scope;
param->sender = sender;
@@ -334,6 +332,13 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
if (!param)
goto out;
+ param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH,
+ INITIAL_MAXXMITDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
+
param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
INITIAL_MAXRECVDATASEGMENTLENGTH,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
@@ -426,6 +431,28 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
TYPERANGE_MARKINT, USE_INITIAL_ONLY);
if (!param)
goto out;
+ /*
+ * Extra parameters for ISER from RFC-5046
+ */
+ param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH,
+ INITIAL_INITIATORRECVDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH,
+ INITIAL_TARGETRECVDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
*param_list_ptr = pl;
return 0;
@@ -435,19 +462,23 @@ out:
}
int iscsi_set_keys_to_negotiate(
- int sessiontype,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool iser)
{
struct iscsi_param *param;
+ param_list->iser = iser;
+
list_for_each_entry(param, &param_list->param_list, p_list) {
param->state = 0;
if (!strcmp(param->name, AUTHMETHOD)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, HEADERDIGEST)) {
- SET_PSTATE_NEGOTIATE(param);
+ if (!iser)
+ SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATADIGEST)) {
- SET_PSTATE_NEGOTIATE(param);
+ if (!iser)
+ SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXCONNECTIONS)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETNAME)) {
@@ -466,7 +497,10 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
- SET_PSTATE_NEGOTIATE(param);
+ if (!iser)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
+ continue;
} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
@@ -493,6 +527,15 @@ int iscsi_set_keys_to_negotiate(
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
+ if (iser)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+ if (iser)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+ if (iser)
+ SET_PSTATE_NEGOTIATE(param);
}
}
@@ -535,6 +578,12 @@ int iscsi_set_keys_irrelevant_for_discovery(
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, OFMARKINT))
param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, RDMAEXTENSIONS))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
}
return 0;
@@ -626,11 +675,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list);
kfree(param->name);
- param->name = NULL;
kfree(param->value);
- param->value = NULL;
kfree(param);
- param = NULL;
}
iscsi_release_extra_responses(param_list);
@@ -662,7 +708,7 @@ int iscsi_extract_key_value(char *textbuf, char **key, char **value)
{
*value = strchr(textbuf, '=');
if (!*value) {
- pr_err("Unable to locate \"=\" seperator for key,"
+ pr_err("Unable to locate \"=\" separator for key,"
" ignoring request.\n");
return -1;
}
@@ -678,15 +724,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{
kfree(param->value);
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
- return -1;
+ return -ENOMEM;
}
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
-
pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value);
return 0;
@@ -713,9 +756,9 @@ static int iscsi_add_notunderstood_response(
}
INIT_LIST_HEAD(&extra_response->er_list);
- strncpy(extra_response->key, key, strlen(key) + 1);
- strncpy(extra_response->value, NOTUNDERSTOOD,
- strlen(NOTUNDERSTOOD) + 1);
+ strlcpy(extra_response->key, key, sizeof(extra_response->key));
+ strlcpy(extra_response->value, NOTUNDERSTOOD,
+ sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
&param_list->extra_response_list);
@@ -803,14 +846,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
value = simple_strtoul(value_ptr, &tmpptr, 0);
-/* #warning FIXME: Fix this */
-#if 0
- if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
- pr_err("Illegal value \"%s\" for \"%s\".\n",
- value, param->name);
- return -1;
- }
-#endif
if (IS_TYPERANGE_0_TO_2(param)) {
if ((value < 0) || (value > 2)) {
pr_err("Illegal value for \"%s\", must be"
@@ -874,8 +909,8 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
{
char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL, *tmp_ptr = NULL;
- u32 left_val, right_val, local_left_val, local_right_val;
+ char *tilde_ptr = NULL;
+ u32 left_val, right_val, local_left_val;
if (strcmp(param->name, IFMARKINT) &&
strcmp(param->name, OFMARKINT)) {
@@ -903,8 +938,8 @@ static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *va
if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
return -1;
- left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
- right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ left_val = simple_strtoul(left_val_ptr, NULL, 0);
+ right_val = simple_strtoul(right_val_ptr, NULL, 0);
*tilde_ptr = '~';
if (right_val < left_val) {
@@ -928,8 +963,7 @@ static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *va
left_val_ptr = param->value;
right_val_ptr = param->value + strlen(left_val_ptr) + 1;
- local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
- local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ local_left_val = simple_strtoul(left_val_ptr, NULL, 0);
*tilde_ptr = '~';
if (param->set_param) {
@@ -1046,13 +1080,6 @@ static char *iscsi_check_valuelist_for_support(
tmp2 = strchr(acceptor_values, ',');
if (tmp2)
*tmp2 = '\0';
- if (!acceptor_values || !proposer_values) {
- if (tmp1)
- *tmp1 = ',';
- if (tmp2)
- *tmp2 = ',';
- return NULL;
- }
if (!strcmp(acceptor_values, proposer_values)) {
if (tmp2)
*tmp2 = ',';
@@ -1062,8 +1089,6 @@ static char *iscsi_check_valuelist_for_support(
*tmp2++ = ',';
acceptor_values = tmp2;
- if (!acceptor_values)
- break;
} while (acceptor_values);
if (tmp1)
*tmp1++ = ',';
@@ -1074,7 +1099,8 @@ out:
return proposer_values;
}
-static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
+ struct iscsi_conn *conn)
{
u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
char *negoitated_value = NULL;
@@ -1113,11 +1139,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_NUMBER(param)) {
- char *tmpptr, buf[10];
+ char *tmpptr, buf[11];
u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
- memset(buf, 0, 10);
+ memset(buf, 0, sizeof(buf));
if (!strcmp(param->name, MAXCONNECTIONS) ||
!strcmp(param->name, MAXBURSTLENGTH) ||
@@ -1149,8 +1175,35 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
return -1;
}
- if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ struct iscsi_param *param_mxdsl;
+ unsigned long long tmp;
+ int rc;
+
+ rc = kstrtoull(param->value, 0, &tmp);
+ if (rc < 0)
+ return -1;
+
+ conn->conn_ops->MaxRecvDataSegmentLength = tmp;
+ pr_debug("Saving op->MaxRecvDataSegmentLength from"
+ " original initiator received value: %u\n",
+ conn->conn_ops->MaxRecvDataSegmentLength);
+
+ param_mxdsl = iscsi_find_param_from_key(
+ MAXXMITDATASEGMENTLENGTH,
+ conn->param_list);
+ if (!param_mxdsl)
+ return -1;
+
+ rc = iscsi_update_param_value(param,
+ param_mxdsl->value);
+ if (rc < 0)
+ return -1;
+
+ pr_debug("Updated %s to target MXDSL value: %s\n",
+ param->name, param->value);
+ }
+
} else if (IS_TYPE_NUMBER_RANGE(param)) {
negoitated_value = iscsi_get_value_from_number_range(
param, value);
@@ -1189,7 +1242,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
if (IS_TYPE_NUMBER_RANGE(param)) {
u32 left_val = 0, right_val = 0, recieved_value = 0;
char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL, *tmp_ptr = NULL;
+ char *tilde_ptr = NULL;
if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
if (iscsi_update_param_value(param, value) < 0)
@@ -1213,9 +1266,9 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
left_val_ptr = param->value;
right_val_ptr = param->value + strlen(left_val_ptr) + 1;
- left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
- right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
- recieved_value = simple_strtoul(value, &tmp_ptr, 0);
+ left_val = simple_strtoul(left_val_ptr, NULL, 0);
+ right_val = simple_strtoul(right_val_ptr, NULL, 0);
+ recieved_value = simple_strtoul(value, NULL, 0);
*tilde_ptr = '~';
@@ -1287,7 +1340,7 @@ static int iscsi_check_value(struct iscsi_param *param, char *value)
comma_ptr = strchr(value, ',');
if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
- pr_err("Detected value seperator \",\", but"
+ pr_err("Detected value separator \",\", but"
" key \"%s\" does not allow a value list,"
" protocol error.\n", param->name);
return -1;
@@ -1413,6 +1466,7 @@ static struct iscsi_param *iscsi_check_key(
break;
case PHASE_OPERATIONAL:
pr_debug("Operational phase.\n");
+ break;
default:
pr_debug("Unknown phase.\n");
}
@@ -1493,8 +1547,8 @@ static int iscsi_enforce_integrity_rules(
FirstBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
if (FirstBurstLength > MaxBurstLength) {
- char tmpbuf[10];
- memset(tmpbuf, 0, 10);
+ char tmpbuf[11];
+ memset(tmpbuf, 0, sizeof(tmpbuf));
sprintf(tmpbuf, "%u", MaxBurstLength);
if (iscsi_update_param_value(param, tmpbuf))
return -1;
@@ -1544,13 +1598,14 @@ int iscsi_decode_text_input(
u8 sender,
char *textbuf,
u32 length,
- struct iscsi_param_list *param_list)
+ struct iscsi_conn *conn)
{
+ struct iscsi_param_list *param_list = conn->param_list;
char *tmpbuf, *start = NULL, *end = NULL;
tmpbuf = kzalloc(length + 1, GFP_KERNEL);
if (!tmpbuf) {
- pr_err("Unable to allocate memory for tmpbuf.\n");
+ pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -1;
}
@@ -1572,8 +1627,6 @@ int iscsi_decode_text_input(
if (phase & PHASE_SECURITY) {
if (iscsi_check_for_auth_key(key) > 0) {
- char *tmpptr = key + strlen(key);
- *tmpptr = '=';
kfree(tmpbuf);
return 1;
}
@@ -1603,7 +1656,7 @@ int iscsi_decode_text_input(
}
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value) < 0) {
+ if (iscsi_check_acceptor_state(param, value, conn) < 0) {
kfree(tmpbuf);
return -1;
}
@@ -1738,6 +1791,18 @@ void iscsi_set_connection_parameters(
pr_debug("---------------------------------------------------"
"---------------\n");
list_for_each_entry(param, &param_list->param_list, p_list) {
+ /*
+ * Special case to set MAXXMITDATASEGMENTLENGTH from the
+ * target requested MaxRecvDataSegmentLength, even though
+ * this key is not sent over the wire.
+ */
+ if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
+ ops->MaxXmitDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxXmitDataSegmentLength: %s\n",
+ param->value);
+ }
+
if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
continue;
if (!strcmp(param->name, AUTHMETHOD)) {
@@ -1752,10 +1817,13 @@ void iscsi_set_connection_parameters(
pr_debug("DataDigest: %s\n",
param->value);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
- ops->MaxRecvDataSegmentLength =
- simple_strtoul(param->value, &tmpptr, 0);
- pr_debug("MaxRecvDataSegmentLength: %s\n",
- param->value);
+ /*
+ * At this point iscsi_check_acceptor_state() will have
+ * set ops->MaxRecvDataSegmentLength from the original
+ * initiator provided value.
+ */
+ pr_debug("MaxRecvDataSegmentLength: %u\n",
+ ops->MaxRecvDataSegmentLength);
} else if (!strcmp(param->name, OFMARKER)) {
ops->OFMarker = !strcmp(param->value, YES);
pr_debug("OFMarker: %s\n",
@@ -1774,6 +1842,22 @@ void iscsi_set_connection_parameters(
simple_strtoul(param->value, &tmpptr, 0);
pr_debug("IFMarkInt: %s\n",
param->value);
+ } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+ ops->InitiatorRecvDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("InitiatorRecvDataSegmentLength: %s\n",
+ param->value);
+ ops->MaxRecvDataSegmentLength =
+ ops->InitiatorRecvDataSegmentLength;
+ pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n");
+ } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+ ops->TargetRecvDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("TargetRecvDataSegmentLength: %s\n",
+ param->value);
+ ops->MaxXmitDataSegmentLength =
+ ops->TargetRecvDataSegmentLength;
+ pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n");
}
}
pr_debug("----------------------------------------------------"
@@ -1886,6 +1970,10 @@ void iscsi_set_session_parameters(
ops->SessionType = !strcmp(param->value, DISCOVERY);
pr_debug("SessionType: %s\n",
param->value);
+ } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
+ ops->RDMAExtensions = !strcmp(param->value, YES);
+ pr_debug("RDMAExtensions: %s\n",
+ param->value);
}
}
pr_debug("----------------------------------------------------"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 6a37fd6f128..a47046a752a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,8 +1,10 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <scsi/iscsi_proto.h>
+
struct iscsi_extra_response {
- char key[64];
+ char key[KEY_MAXLEN];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
@@ -27,7 +29,7 @@ extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
-extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
extern int iscsi_copy_param_list(struct iscsi_param_list **,
struct iscsi_param_list *, int);
@@ -36,7 +38,7 @@ extern void iscsi_release_param_list(struct iscsi_param_list *);
extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
-extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
struct iscsi_param_list *);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
@@ -70,6 +72,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define INITIALR2T "InitialR2T"
#define IMMEDIATEDATA "ImmediateData"
#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
+#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength"
#define MAXBURSTLENGTH "MaxBurstLength"
#define FIRSTBURSTLENGTH "FirstBurstLength"
#define DEFAULTTIME2WAIT "DefaultTime2Wait"
@@ -88,6 +91,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
+ * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
+ */
+#define RDMAEXTENSIONS "RDMAExtensions"
+#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
+#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
+
+/*
* For AuthMethod.
*/
#define KRB5 "KRB5"
@@ -113,6 +123,10 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define INITIAL_INITIALR2T YES
#define INITIAL_IMMEDIATEDATA YES
#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
+/*
+ * Match outgoing MXDSL default to incoming Open-iSCSI default
+ */
+#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144"
#define INITIAL_MAXBURSTLENGTH "262144"
#define INITIAL_FIRSTBURSTLENGTH "65536"
#define INITIAL_DEFAULTTIME2WAIT "2"
@@ -128,6 +142,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define INITIAL_OFMARKINT "2048~65535"
/*
+ * Initial values for iSER parameters following RFC-5046 Section 6
+ */
+#define INITIAL_RDMAEXTENSIONS NO
+#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
+#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
+
+/*
* For [Header,Data]Digests.
*/
#define CRC32C "CRC32C"
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index fc694082bfc..ca41b583f2f 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -2,9 +2,7 @@
* This file contains main functions related to iSCSI DataSequenceInOrder=No
* and DataPDUInOrder=No.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -24,11 +22,13 @@
#include "iscsi_target_core.h"
#include "iscsi_target_util.h"
+#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
#define OFFLOAD_BUF_SIZE 32768
-void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+#ifdef DEBUG
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_seq *seq;
@@ -46,7 +46,7 @@ void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
}
}
-void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_pdu *pdu;
@@ -61,6 +61,10 @@ void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
pdu->length, pdu->pdu_send_order, pdu->seq_no);
}
}
+#else
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
+#endif
static void iscsit_ordered_seq_lists(
struct iscsi_cmd *cmd,
@@ -135,11 +139,11 @@ redo:
seq_count++;
continue;
}
- array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory"
" for random array.\n");
- return -1;
+ return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@@ -155,11 +159,11 @@ redo:
}
if (seq_count) {
- array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for"
" random array.\n");
- return -1;
+ return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@@ -187,10 +191,10 @@ static int iscsit_randomize_seq_lists(
if (!seq_count)
return 0;
- array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for random array.\n");
- return -1;
+ return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@@ -213,19 +217,24 @@ static void iscsit_determine_counts_for_list(
int check_immediate = 0;
u32 burstlength = 0, offset = 0;
u32 unsolicited_data_length = 0;
+ u32 mdsl;
struct iscsi_conn *conn = cmd->conn;
+ if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
+ mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
+ else
+ mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
+
if ((bl->type == PDULIST_IMMEDIATE) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
check_immediate = 1;
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
- unsolicited_data_length = (cmd->data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+ unsolicited_data_length = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
- while (offset < cmd->data_length) {
+ while (offset < cmd->se_cmd.data_length) {
*pdu_count += 1;
if (check_immediate) {
@@ -238,14 +247,13 @@ static void iscsit_determine_counts_for_list(
continue;
}
if (unsolicited_data_length > 0) {
- if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
- >= cmd->data_length) {
+ if ((offset + mdsl) >= cmd->se_cmd.data_length) {
unsolicited_data_length -=
- (cmd->data_length - offset);
- offset += (cmd->data_length - offset);
+ (cmd->se_cmd.data_length - offset);
+ offset += (cmd->se_cmd.data_length - offset);
continue;
}
- if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ if ((offset + mdsl)
>= conn->sess->sess_ops->FirstBurstLength) {
unsolicited_data_length -=
(conn->sess->sess_ops->FirstBurstLength -
@@ -257,17 +265,15 @@ static void iscsit_determine_counts_for_list(
continue;
}
- offset += conn->conn_ops->MaxRecvDataSegmentLength;
- unsolicited_data_length -=
- conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += mdsl;
+ unsolicited_data_length -= mdsl;
continue;
}
- if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
- cmd->data_length) {
- offset += (cmd->data_length - offset);
+ if ((offset + mdsl) >= cmd->se_cmd.data_length) {
+ offset += (cmd->se_cmd.data_length - offset);
continue;
}
- if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
offset += (conn->sess->sess_ops->MaxBurstLength -
burstlength);
@@ -276,27 +282,32 @@ static void iscsit_determine_counts_for_list(
continue;
}
- burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
- offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ burstlength += mdsl;
+ offset += mdsl;
}
}
/*
- * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
- * and DataPDUInOrder=No.
+ * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ * or DataPDUInOrder=No.
*/
-static int iscsit_build_pdu_and_seq_list(
+static int iscsit_do_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
struct iscsi_build_list *bl)
{
int check_immediate = 0, datapduinorder, datasequenceinorder;
- u32 burstlength = 0, offset = 0, i = 0;
+ u32 burstlength = 0, offset = 0, i = 0, mdsl;
u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = cmd->pdu_list;
struct iscsi_seq *seq = cmd->seq_list;
+ if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
+ mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
+ else
+ mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
+
datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
@@ -306,11 +317,10 @@ static int iscsit_build_pdu_and_seq_list(
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
- unsolicited_data_length = (cmd->data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+ unsolicited_data_length = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
- while (offset < cmd->data_length) {
+ while (offset < cmd->se_cmd.data_length) {
pdu_count++;
if (!datapduinorder) {
pdu[i].offset = offset;
@@ -344,27 +354,24 @@ static int iscsit_build_pdu_and_seq_list(
continue;
}
if (unsolicited_data_length > 0) {
- if ((offset +
- conn->conn_ops->MaxRecvDataSegmentLength) >=
- cmd->data_length) {
+ if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i].length =
- (cmd->data_length - offset);
+ (cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
- (cmd->data_length - offset));
+ (cmd->se_cmd.data_length - offset));
}
unsolicited_data_length -=
- (cmd->data_length - offset);
- offset += (cmd->data_length - offset);
+ (cmd->se_cmd.data_length - offset);
+ offset += (cmd->se_cmd.data_length - offset);
continue;
}
- if ((offset +
- conn->conn_ops->MaxRecvDataSegmentLength) >=
+ if ((offset + mdsl) >=
conn->sess->sess_ops->FirstBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
@@ -392,31 +399,28 @@ static int iscsit_build_pdu_and_seq_list(
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
- pdu[i++].length =
- conn->conn_ops->MaxRecvDataSegmentLength;
+ pdu[i++].length = mdsl;
}
- burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
- offset += conn->conn_ops->MaxRecvDataSegmentLength;
- unsolicited_data_length -=
- conn->conn_ops->MaxRecvDataSegmentLength;
+ burstlength += mdsl;
+ offset += mdsl;
+ unsolicited_data_length -= mdsl;
continue;
}
- if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
- cmd->data_length) {
+ if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
- pdu[i].length = (cmd->data_length - offset);
+ pdu[i].length = (cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
- (cmd->data_length - offset));
+ (cmd->se_cmd.data_length - offset));
}
- offset += (cmd->data_length - offset);
+ offset += (cmd->se_cmd.data_length - offset);
continue;
}
- if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
@@ -441,11 +445,10 @@ static int iscsit_build_pdu_and_seq_list(
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
- pdu[i++].length =
- conn->conn_ops->MaxRecvDataSegmentLength;
+ pdu[i++].length = mdsl;
}
- burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
- offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ burstlength += mdsl;
+ offset += mdsl;
}
if (!datasequenceinorder) {
@@ -464,9 +467,8 @@ static int iscsit_build_pdu_and_seq_list(
} else
iscsit_ordered_seq_lists(cmd, bl->type);
}
-#if 0
+
iscsit_dump_seq_list(cmd);
-#endif
}
if (!datapduinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
@@ -484,50 +486,86 @@ static int iscsit_build_pdu_and_seq_list(
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
}
-#if 0
+
iscsit_dump_pdu_list(cmd);
-#endif
}
return 0;
}
-/*
- * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
- */
-int iscsit_do_build_list(
+int iscsit_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
- struct iscsi_build_list *bl)
+ u32 immediate_data_length)
{
+ struct iscsi_build_list bl;
u32 pdu_count = 0, seq_count = 1;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
- iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na;
+
+ /*
+ * Do nothing if no OOO shenanigans
+ */
+ if (sess->sess_ops->DataSequenceInOrder &&
+ sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ if (cmd->data_direction == DMA_NONE)
+ return 0;
+
+ na = iscsit_tpg_get_node_attrib(sess);
+ memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ bl.data_direction = ISCSI_PDU_READ;
+ bl.type = PDULIST_NORMAL;
+ if (na->random_datain_pdu_offsets)
+ bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+ if (na->random_datain_seq_offsets)
+ bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+ } else {
+ bl.data_direction = ISCSI_PDU_WRITE;
+ bl.immediate_data_length = immediate_data_length;
+ if (na->random_r2t_offsets)
+ bl.randomize |= RANDOM_R2T_OFFSETS;
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_NORMAL;
+ else if (cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE;
+ else if (!cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_UNSOLICITED;
+ else if (cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+ }
+
+ iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
if (!conn->sess->sess_ops->DataSequenceInOrder) {
- seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+ seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
if (!seq) {
pr_err("Unable to allocate struct iscsi_seq list\n");
- return -1;
+ return -ENOMEM;
}
cmd->seq_list = seq;
cmd->seq_count = seq_count;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
- pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+ pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
if (!pdu) {
pr_err("Unable to allocate struct iscsi_pdu list.\n");
kfree(seq);
- return -1;
+ return -ENOMEM;
}
cmd->pdu_list = pdu;
cmd->pdu_count = pdu_count;
}
- return iscsit_build_pdu_and_seq_list(cmd, bl);
+ return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
}
struct iscsi_pdu *iscsit_get_pdu_holder(
@@ -572,13 +610,12 @@ redo:
pdu = &cmd->pdu_list[cmd->pdu_start];
for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
-#if 0
pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
"_send_order: %d, pdu[i].offset: %d,"
" pdu[i].length: %d\n", pdu[i].seq_no,
pdu[i].pdu_send_order, pdu[i].offset,
pdu[i].length);
-#endif
+
if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
cmd->pdu_send_order++;
return &pdu[i];
@@ -601,11 +638,11 @@ redo:
pr_err("struct iscsi_seq is NULL!\n");
return NULL;
}
-#if 0
+
pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
seq->seq_no);
-#endif
+
pdu = &cmd->pdu_list[seq->pdu_start];
if (seq->pdu_send_order == seq->pdu_count) {
@@ -645,12 +682,11 @@ struct iscsi_seq *iscsit_get_seq_holder(
}
for (i = 0; i < cmd->seq_count; i++) {
-#if 0
pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
"xfer_len: %d, seq_list[i].seq_no %u\n",
cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
cmd->seq_list[i].seq_no);
-#endif
+
if ((cmd->seq_list[i].orig_offset +
cmd->seq_list[i].xfer_len) >=
(offset + length))
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index 0d52a10e306..d5b153751a8 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -78,7 +78,7 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
-extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 421d6947dc6..10339551030 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -2,9 +2,7 @@
* Modern ConfigFS group context specific iSCSI statistics based on original
* iscsi_target_mib.c code
*
- * Copyright (c) 2011 Rising Tide Systems
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * Copyright (c) 2011-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -177,7 +175,7 @@ ISCSI_STAT_INSTANCE_ATTR_RO(description);
static ssize_t iscsi_stat_instance_show_attr_vendor(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
- return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+ return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
}
ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
@@ -410,14 +408,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[8];
+ int ret;
spin_lock(&lstat->lock);
- snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
- "ipv6" : "ipv4");
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ ret = snprintf(page, PAGE_SIZE, "ipv6\n");
+ else
+ ret = snprintf(page, PAGE_SIZE, "ipv4\n");
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
@@ -427,16 +427,13 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[32];
+ int ret;
spin_lock(&lstat->lock);
- if (lstat->last_intr_fail_ip_family == AF_INET6)
- snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
- else
- snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
@@ -795,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -818,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -841,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->tx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -865,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->rx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -889,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_digest_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -913,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_timeout_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index e01da9d2b37..78404b1cc0b 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target specific Task Management functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -19,9 +17,11 @@
******************************************************************************/
#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
@@ -49,21 +49,20 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
- return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
- (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
+ return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
+ iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
- if (ref_cmd->cmd_sn != hdr->refcmdsn) {
+ if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
pr_err("RefCmdSN 0x%08x does not equal"
" task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
hdr->refcmdsn, ref_cmd->cmd_sn);
return ISCSI_TMF_RSP_REJECTED;
}
- se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
- tmr_req->ref_cmd_sn = hdr->refcmdsn;
- tmr_req->exp_data_sn = hdr->exp_datasn;
+ se_tmr->ref_task_tag = (__force u32)hdr->rtt;
+ tmr_req->ref_cmd = ref_cmd;
+ tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
return ISCSI_TMF_RSP_COMPLETE;
}
@@ -78,10 +77,7 @@ int iscsit_tmr_task_warm_reset(
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
-#if 0
- struct iscsi_init_task_mgt_cmnd *hdr =
- (struct iscsi_init_task_mgt_cmnd *) buf;
-#endif
+
if (!na->tmr_warm_reset) {
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
@@ -124,7 +120,7 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- int ret;
+ int ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -148,7 +144,7 @@ u8 iscsit_tmr_task_reassign(
}
/*
* Temporary check to prevent connection recovery for
- * connections with a differing MaxRecvDataSegmentLength.
+ * connections with a differing Max*DataSegmentLength.
*/
if (cr->maxrecvdatasegmentlength !=
conn->conn_ops->MaxRecvDataSegmentLength) {
@@ -157,12 +153,25 @@ u8 iscsit_tmr_task_reassign(
" TMR TASK_REASSIGN.\n");
return ISCSI_TMF_RSP_REJECTED;
}
+ if (cr->maxxmitdatasegmentlength !=
+ conn->conn_ops->MaxXmitDataSegmentLength) {
+ pr_err("Unable to perform connection recovery for"
+ " differing MaxXmitDataSegmentLength, rejecting"
+ " TMR TASK_REASSIGN.\n");
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ ref_lun = scsilun_to_int(&hdr->lun);
+ if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
+ pr_err("Unable to perform connection recovery for"
+ " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+ ref_lun, ref_cmd->se_cmd.orig_fe_lun);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
- se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
- se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
- tmr_req->ref_cmd_sn = hdr->refcmdsn;
- tmr_req->exp_data_sn = hdr->exp_datasn;
+ se_tmr->ref_task_tag = (__force u32)hdr->rtt;
+ tmr_req->ref_cmd = ref_cmd;
+ tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
tmr_req->conn_recovery = cr;
tmr_req->task_reassign = 1;
/*
@@ -194,9 +203,7 @@ static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -216,7 +223,7 @@ static int iscsit_task_reassign_complete_nop_out(
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_NOPIN;
@@ -254,7 +261,8 @@ static int iscsit_task_reassign_complete_write(
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
- return transport_generic_handle_data(se_cmd);
+ target_execute_cmd(se_cmd);
+ return 0;
}
cmd->i_state = ISTATE_SEND_STATUS;
@@ -272,9 +280,9 @@ static int iscsit_task_reassign_complete_write(
offset = cmd->next_burst_len = cmd->write_data_done;
if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
- cmd->data_length) {
+ cmd->se_cmd.data_length) {
no_build_r2ts = 1;
- length = (cmd->data_length - offset);
+ length = (cmd->se_cmd.data_length - offset);
} else
length = (conn->sess->sess_ops->FirstBurstLength - offset);
@@ -292,7 +300,7 @@ static int iscsit_task_reassign_complete_write(
/*
* iscsit_build_r2ts_for_cmd() can handle the rest from here.
*/
- return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+ return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
}
static int iscsit_task_reassign_complete_read(
@@ -363,9 +371,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -385,10 +391,10 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
@@ -414,17 +420,14 @@ static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd;
struct iscsi_cmd *cmd;
int ret = 0;
- if (!se_tmr->ref_cmd) {
+ if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
return -1;
}
- se_cmd = se_tmr->ref_cmd;
- cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ cmd = tmr_req->ref_cmd;
cmd->conn = conn;
@@ -456,7 +459,7 @@ static int iscsit_task_reassign_complete(
* Right now the only one that its really needed for is
* connection recovery releated TASK_REASSIGN.
*/
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -467,11 +470,12 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *con
return 0;
}
+EXPORT_SYMBOL(iscsit_tmr_post_handler);
/*
* Nothing to do here, but leave it for good measure. :-)
*/
-int iscsit_task_reassign_prepare_read(
+static int iscsit_task_reassign_prepare_read(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
@@ -546,13 +550,11 @@ static void iscsit_task_reassign_prepare_unsolicited_dataout(
}
}
-int iscsit_task_reassign_prepare_write(
+static int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
@@ -785,14 +787,12 @@ int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0;
if (ref_cmd->data_direction == DMA_NONE)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 879d8d0fa3f..c3cb5c15efd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains iSCSI Target Portal Group related functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -31,6 +29,8 @@
#include "iscsi_target.h"
#include "iscsi_target_parameters.h"
+#include <target/iscsi/iscsi_transport.h>
+
struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
{
struct iscsi_portal_group *tpg;
@@ -47,7 +47,7 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1
INIT_LIST_HEAD(&tpg->tpg_gnp_list);
INIT_LIST_HEAD(&tpg->tpg_list);
mutex_init(&tpg->tpg_access_lock);
- mutex_init(&tpg->np_login_lock);
+ sema_init(&tpg->np_login_sem, 1);
spin_lock_init(&tpg->tpg_state_lock);
spin_lock_init(&tpg->tpg_np_lock);
@@ -127,7 +127,8 @@ void iscsit_release_discovery_tpg(void)
struct iscsi_portal_group *iscsit_get_tpg_from_np(
struct iscsi_tiqn *tiqn,
- struct iscsi_np *np)
+ struct iscsi_np *np,
+ struct iscsi_tpg_np **tpg_np_out)
{
struct iscsi_portal_group *tpg = NULL;
struct iscsi_tpg_np *tpg_np;
@@ -136,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
@@ -145,6 +146,8 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (tpg_np->tpg_np == np) {
+ *tpg_np_out = tpg_np;
+ kref_get(&tpg_np->tpg_np_kref);
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
return tpg;
@@ -173,18 +176,22 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg)
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
- iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+ if (shutdown)
+ tpg_np->tpg_np->enabled = false;
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
-void iscsit_clear_tpg_np_login_threads(
- struct iscsi_portal_group *tpg)
+static void iscsit_clear_tpg_np_login_threads(
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
struct iscsi_tpg_np *tpg_np;
@@ -195,7 +202,7 @@ void iscsit_clear_tpg_np_login_threads(
continue;
}
spin_unlock(&tpg->tpg_np_lock);
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
@@ -218,6 +225,9 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+ a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
+ a->default_erl = TA_DEFAULT_ERL;
+ a->t10_pi = TA_DEFAULT_T10_PI;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -232,7 +242,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
- ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+ tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -303,6 +313,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{
struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+ int ret;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
@@ -319,19 +330,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param) {
spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ return -EINVAL;
}
- if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
- if (!strcmp(param->value, NONE))
- if (iscsi_update_param_value(param, CHAP) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
- }
- if (iscsit_ta_authentication(tpg, 1) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ if (tpg->tpg_attrib.authentication) {
+ if (!strcmp(param->value, NONE)) {
+ ret = iscsi_update_param_value(param, CHAP);
+ if (ret)
+ goto err;
}
+
+ ret = iscsit_ta_authentication(tpg, 1);
+ if (ret < 0)
+ goto err;
}
tpg->tpg_state = TPG_STATE_ACTIVE;
@@ -344,6 +355,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
+
+err:
+ spin_unlock(&tpg->tpg_state_lock);
+ return ret;
}
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
@@ -361,7 +376,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
- iscsit_clear_tpg_np_login_threads(tpg);
+ iscsit_clear_tpg_np_login_threads(tpg, false);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
spin_lock(&tpg->tpg_state_lock);
@@ -417,6 +432,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
return NULL;
}
+static bool iscsit_tpg_check_network_portal(
+ struct iscsi_tiqn *tiqn,
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct iscsi_np *np;
+ bool match = false;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ np = tpg_np->tpg_np;
+
+ match = iscsit_check_np_match(sockaddr, np,
+ network_transport);
+ if (match)
+ break;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return match;
+}
+
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
struct __kernel_sockaddr_storage *sockaddr,
@@ -427,6 +471,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np;
+ if (!tpg_np_parent) {
+ if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
+ network_transport)) {
+ pr_err("Network Portal: %s already exists on a"
+ " different TPG on %s\n", ip_str,
+ tpg->tpg_tiqn->tiqn);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
if (!tpg_np) {
pr_err("Unable to allocate memory for"
@@ -444,7 +498,10 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ init_completion(&tpg_np->tpg_np_comp);
+ kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
+ np->tpg_np = tpg_np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
@@ -464,7 +521,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
- (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+ np->np_transport->name);
return tpg_np;
}
@@ -474,11 +531,11 @@ static int iscsit_tpg_release_np(
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
- (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+ np->np_transport->name);
tpg_np->tpg_np = NULL;
tpg_np->tpg = NULL;
@@ -558,7 +615,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication);
- return -1;
+ return -EINVAL;
}
memset(buf1, 0, sizeof(buf1));
@@ -593,7 +650,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
} else {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
- if ((none))
+ if (none)
goto out;
strncat(buf1, ",", strlen(","));
strncat(buf1, NONE, strlen(NONE));
@@ -672,6 +729,12 @@ int iscsit_ta_generate_node_acls(
pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+ if (flag == 1 && a->cache_dynamic_acls == 0) {
+ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
+ "generate_node_acls=1\n");
+ a->cache_dynamic_acls = 1;
+ }
+
return 0;
}
@@ -711,6 +774,12 @@ int iscsit_ta_cache_dynamic_acls(
return -EINVAL;
}
+ if (a->generate_node_acls == 1 && flag == 0) {
+ pr_debug("Skipping cache_dynamic_acls=0 when"
+ " generate_node_acls=1\n");
+ return 0;
+ }
+
a->cache_dynamic_acls = flag;
pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
@@ -755,3 +824,58 @@ int iscsit_ta_prod_mode_write_protect(
return 0;
}
+
+int iscsit_ta_demo_mode_discovery(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_discovery = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
+ " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
+ "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_default_erl(
+ struct iscsi_portal_group *tpg,
+ u32 default_erl)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
+ pr_err("Illegal value for default_erl: %u\n", default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
+
+ return 0;
+}
+
+int iscsit_ta_t10_pi(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->t10_pi = flag;
+ pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+ " %s\n", tpg->tpgt, (a->t10_pi) ?
+ "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index dda48c141a8..e7265337bc4 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -5,10 +5,9 @@ extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *,
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
- struct iscsi_np *);
+ struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
@@ -37,5 +36,8 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 0baac5bcebd..601e9cc61e9 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Login Thread and Thread Queue functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -40,7 +38,7 @@ static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
spin_unlock(&active_ts_lock);
}
-extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
{
spin_lock(&inactive_ts_lock);
list_add_tail(&ts->ts_list, &inactive_ts_list);
@@ -66,8 +64,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL;
}
- list_for_each_entry(ts, &inactive_ts_list, ts_list)
- break;
+ ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del(&ts->ts_list);
iscsit_global->inactive_ts--;
@@ -76,7 +73,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return ts;
}
-extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
+int iscsi_allocate_thread_sets(u32 thread_pair_count)
{
int allocated_thread_pair_count = 0, i, thread_id;
struct iscsi_thread_set *ts = NULL;
@@ -106,12 +103,11 @@ extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
ts->status = ISCSI_THREAD_SET_FREE;
INIT_LIST_HEAD(&ts->ts_list);
spin_lock_init(&ts->ts_state_lock);
- init_completion(&ts->rx_post_start_comp);
- init_completion(&ts->tx_post_start_comp);
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
init_completion(&ts->rx_start_comp);
init_completion(&ts->tx_start_comp);
+ sema_init(&ts->ts_activate_sem, 0);
ts->create_threads = 1;
ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
@@ -140,35 +136,44 @@ extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
return allocated_thread_pair_count;
}
-extern void iscsi_deallocate_thread_sets(void)
+static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
{
- u32 released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- while ((ts = iscsi_get_ts_from_inactive_list())) {
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ if (ts->rx_thread) {
+ complete(&ts->rx_start_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+ kthread_stop(ts->rx_thread);
spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
+ }
+ if (ts->tx_thread) {
+ complete(&ts->tx_start_comp);
spin_unlock_bh(&ts->ts_state_lock);
+ kthread_stop(ts->tx_thread);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
+ kfree(ts);
+}
+
+void iscsi_deallocate_thread_sets(void)
+{
+ struct iscsi_thread_set *ts = NULL;
+ u32 released_count = 0;
+ while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+ iscsi_deallocate_thread_one(ts);
released_count++;
- kfree(ts);
}
if (released_count)
@@ -188,34 +193,13 @@ static void iscsi_deallocate_extra_thread_sets(void)
if (!ts)
break;
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
+ iscsi_deallocate_thread_one(ts);
released_count++;
- kfree(ts);
}
- if (released_count) {
+ if (released_count)
pr_debug("Stopped %d thread set(s) (%d total threads)."
"\n", released_count, released_count * 2);
- }
}
void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
@@ -225,37 +209,23 @@ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set
spin_lock_bh(&ts->ts_state_lock);
conn->thread_set = ts;
ts->conn = conn;
+ ts->status = ISCSI_THREAD_SET_ACTIVE;
spin_unlock_bh(&ts->ts_state_lock);
- /*
- * Start up the RX thread and wait on rx_post_start_comp. The RX
- * Thread will then do the same for the TX Thread in
- * iscsi_rx_thread_pre_handler().
- */
+
complete(&ts->rx_start_comp);
- wait_for_completion(&ts->rx_post_start_comp);
+ complete(&ts->tx_start_comp);
+
+ down(&ts->ts_activate_sem);
}
struct iscsi_thread_set *iscsi_get_thread_set(void)
{
- int allocate_ts = 0;
- struct completion comp;
- struct iscsi_thread_set *ts = NULL;
- /*
- * If no inactive thread set is available on the first call to
- * iscsi_get_ts_from_inactive_list(), sleep for a second and
- * try again. If still none are available after two attempts,
- * allocate a set ourselves.
- */
+ struct iscsi_thread_set *ts;
+
get_set:
ts = iscsi_get_ts_from_inactive_list();
if (!ts) {
- if (allocate_ts == 2)
- iscsi_allocate_thread_sets(1);
-
- init_completion(&comp);
- wait_for_completion_timeout(&comp, 1 * HZ);
-
- allocate_ts++;
+ iscsi_allocate_thread_sets(1);
goto get_set;
}
@@ -264,6 +234,7 @@ get_set:
ts->thread_count = 2;
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
+ sema_init(&ts->ts_activate_sem, 0);
return ts;
}
@@ -401,7 +372,8 @@ static void iscsi_check_to_add_additional_sets(void)
static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
{
spin_lock_bh(&ts->ts_state_lock);
- if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+ if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
+ signal_pending(current)) {
spin_unlock_bh(&ts->ts_state_lock);
return -1;
}
@@ -420,7 +392,8 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
goto sleep;
}
- flush_signals(current);
+ if (ts->status != ISCSI_THREAD_SET_DIE)
+ flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
@@ -447,18 +420,19 @@ sleep:
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
+ iscsi_check_to_add_additional_sets();
+
+ spin_lock_bh(&ts->ts_state_lock);
if (!ts->conn) {
pr_err("struct iscsi_thread_set->conn is NULL for"
- " thread_id: %d, going back to sleep\n", ts->thread_id);
- goto sleep;
+ " RX thread_id: %s/%d\n", current->comm, current->pid);
+ spin_unlock_bh(&ts->ts_state_lock);
+ return NULL;
}
- iscsi_check_to_add_additional_sets();
- /*
- * The RX Thread starts up the TX Thread and sleeps.
- */
ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
- complete(&ts->tx_start_comp);
- wait_for_completion(&ts->tx_post_start_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ up(&ts->ts_activate_sem);
return ts->conn;
}
@@ -473,7 +447,8 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
goto sleep;
}
- flush_signals(current);
+ if (ts->status != ISCSI_THREAD_SET_DIE)
+ flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
@@ -499,27 +474,20 @@ sleep:
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for "
- " thread_id: %d, going back to sleep\n",
- ts->thread_id);
- goto sleep;
- }
-
iscsi_check_to_add_additional_sets();
- /*
- * From the TX thread, up the tx_post_start_comp that the RX Thread is
- * sleeping on in iscsi_rx_thread_pre_handler(), then up the
- * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
- */
- ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
- complete(&ts->tx_post_start_comp);
- complete(&ts->rx_post_start_comp);
spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_ACTIVE;
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for"
+ " TX thread_id: %s/%d\n", current->comm, current->pid);
+ spin_unlock_bh(&ts->ts_state_lock);
+ return NULL;
+ }
+ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
+ up(&ts->ts_activate_sem);
+
return ts->conn;
}
@@ -536,12 +504,6 @@ int iscsi_thread_set_init(void)
return -ENOMEM;
}
- spin_lock_init(&active_ts_lock);
- spin_lock_init(&inactive_ts_lock);
- spin_lock_init(&ts_bitmap_lock);
- INIT_LIST_HEAD(&active_ts_list);
- INIT_LIST_HEAD(&inactive_ts_list);
-
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
index 26e6a95ec20..cc1eede5ab3 100644
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -5,7 +5,6 @@
* Defines for thread sets.
*/
extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
-extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
extern int iscsi_allocate_thread_sets(u32);
extern void iscsi_deallocate_thread_sets(void);
extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
@@ -65,10 +64,6 @@ struct iscsi_thread_set {
struct iscsi_conn *conn;
/* used for controlling ts state accesses */
spinlock_t ts_state_lock;
- /* Used for rx side post startup */
- struct completion rx_post_start_comp;
- /* Used for tx side post startup */
- struct completion tx_post_start_comp;
/* used for restarting thread queue */
struct completion rx_restart_comp;
/* used for restarting thread queue */
@@ -83,6 +78,7 @@ struct iscsi_thread_set {
struct task_struct *tx_thread;
/* struct iscsi_thread_set in list list head*/
struct list_head ts_list;
+ struct semaphore ts_activate_sem;
};
#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
new file mode 100644
index 00000000000..882728fac30
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -0,0 +1,55 @@
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <target/iscsi/iscsi_transport.h>
+
+static LIST_HEAD(g_transport_list);
+static DEFINE_MUTEX(transport_mutex);
+
+struct iscsit_transport *iscsit_get_transport(int type)
+{
+ struct iscsit_transport *t;
+
+ mutex_lock(&transport_mutex);
+ list_for_each_entry(t, &g_transport_list, t_node) {
+ if (t->transport_type == type) {
+ if (t->owner && !try_module_get(t->owner)) {
+ t = NULL;
+ }
+ mutex_unlock(&transport_mutex);
+ return t;
+ }
+ }
+ mutex_unlock(&transport_mutex);
+
+ return NULL;
+}
+
+void iscsit_put_transport(struct iscsit_transport *t)
+{
+ if (t->owner)
+ module_put(t->owner);
+}
+
+int iscsit_register_transport(struct iscsit_transport *t)
+{
+ INIT_LIST_HEAD(&t->t_node);
+
+ mutex_lock(&transport_mutex);
+ list_add_tail(&t->t_node, &g_transport_list);
+ mutex_unlock(&transport_mutex);
+
+ pr_debug("Registered iSCSI transport: %s\n", t->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsit_register_transport);
+
+void iscsit_unregister_transport(struct iscsit_transport *t)
+{
+ mutex_lock(&transport_mutex);
+ list_del(&t->t_node);
+ mutex_unlock(&transport_mutex);
+
+ pr_debug("Unregistered iSCSI transport: %s\n", t->name);
+}
+EXPORT_SYMBOL(iscsit_unregister_transport);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 11287e1ece1..fd90b28f1d9 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target specific utility functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -19,11 +17,13 @@
******************************************************************************/
#include <linux/list.h>
+#include <linux/percpu_ida.h>
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -152,21 +152,25 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
+ struct se_session *se_sess = conn->sess->se_sess;
+ int size, tag;
- cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
- if (!cmd) {
- pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ if (tag < 0)
return NULL;
- }
- cmd->conn = conn;
- INIT_LIST_HEAD(&cmd->i_list);
+ size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+ cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
+ memset(cmd, 0, size);
+
+ cmd->se_cmd.map_tag = tag;
+ cmd->conn = conn;
+ INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
- init_completion(&cmd->reject_comp);
spin_lock_init(&cmd->datain_lock);
spin_lock_init(&cmd->dataout_timeout_lock);
spin_lock_init(&cmd->istate_lock);
@@ -175,175 +179,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
return cmd;
}
-
-/*
- * Called from iscsi_handle_scsi_cmd()
- */
-struct iscsi_cmd *iscsit_allocate_se_cmd(
- struct iscsi_conn *conn,
- u32 data_length,
- int data_direction,
- int iscsi_task_attr)
-{
- struct iscsi_cmd *cmd;
- struct se_cmd *se_cmd;
- int sam_task_attr;
-
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->data_direction = data_direction;
- cmd->data_length = data_length;
- /*
- * Figure out the SAM Task Attribute for the incoming SCSI CDB
- */
- if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
- (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
- sam_task_attr = MSG_SIMPLE_TAG;
- else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
- sam_task_attr = MSG_ORDERED_TAG;
- else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
- sam_task_attr = MSG_HEAD_TAG;
- else if (iscsi_task_attr == ISCSI_ATTR_ACA)
- sam_task_attr = MSG_ACA_TAG;
- else {
- pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
- " MSG_SIMPLE_TAG\n", iscsi_task_attr);
- sam_task_attr = MSG_SIMPLE_TAG;
- }
-
- se_cmd = &cmd->se_cmd;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod infrastructure
- */
- transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
- conn->sess->se_sess, data_length, data_direction,
- sam_task_attr, &cmd->sense_buffer[0]);
- return cmd;
-}
-
-struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
- struct iscsi_conn *conn,
- u8 function)
-{
- struct iscsi_cmd *cmd;
- struct se_cmd *se_cmd;
- u8 tcm_function;
-
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->data_direction = DMA_NONE;
-
- cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req) {
- pr_err("Unable to allocate memory for"
- " Task Management command!\n");
- goto out;
- }
- /*
- * TASK_REASSIGN for ERL=2 / connection stays inside of
- * LIO-Target $FABRIC_MOD
- */
- if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
- return cmd;
-
- se_cmd = &cmd->se_cmd;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod infrastructure
- */
- transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
-
- switch (function) {
- case ISCSI_TM_FUNC_ABORT_TASK:
- tcm_function = TMR_ABORT_TASK;
- break;
- case ISCSI_TM_FUNC_ABORT_TASK_SET:
- tcm_function = TMR_ABORT_TASK_SET;
- break;
- case ISCSI_TM_FUNC_CLEAR_ACA:
- tcm_function = TMR_CLEAR_ACA;
- break;
- case ISCSI_TM_FUNC_CLEAR_TASK_SET:
- tcm_function = TMR_CLEAR_TASK_SET;
- break;
- case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
- tcm_function = TMR_LUN_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_WARM_RESET:
- tcm_function = TMR_TARGET_WARM_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_COLD_RESET:
- tcm_function = TMR_TARGET_COLD_RESET;
- break;
- default:
- pr_err("Unknown iSCSI TMR Function:"
- " 0x%02x\n", function);
- goto out;
- }
-
- se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
- cmd->tmr_req, tcm_function,
- GFP_KERNEL);
- if (!se_cmd->se_tmr_req)
- goto out;
-
- cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
-
- return cmd;
-out:
- iscsit_release_cmd(cmd);
- return NULL;
-}
-
-int iscsit_decide_list_to_build(
- struct iscsi_cmd *cmd,
- u32 immediate_data_length)
-{
- struct iscsi_build_list bl;
- struct iscsi_conn *conn = cmd->conn;
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na;
-
- if (sess->sess_ops->DataSequenceInOrder &&
- sess->sess_ops->DataPDUInOrder)
- return 0;
-
- if (cmd->data_direction == DMA_NONE)
- return 0;
-
- na = iscsit_tpg_get_node_attrib(sess);
- memset(&bl, 0, sizeof(struct iscsi_build_list));
-
- if (cmd->data_direction == DMA_FROM_DEVICE) {
- bl.data_direction = ISCSI_PDU_READ;
- bl.type = PDULIST_NORMAL;
- if (na->random_datain_pdu_offsets)
- bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
- if (na->random_datain_seq_offsets)
- bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
- } else {
- bl.data_direction = ISCSI_PDU_WRITE;
- bl.immediate_data_length = immediate_data_length;
- if (na->random_r2t_offsets)
- bl.randomize |= RANDOM_R2T_OFFSETS;
-
- if (!cmd->immediate_data && !cmd->unsolicited_data)
- bl.type = PDULIST_NORMAL;
- else if (cmd->immediate_data && !cmd->unsolicited_data)
- bl.type = PDULIST_IMMEDIATE;
- else if (!cmd->immediate_data && cmd->unsolicited_data)
- bl.type = PDULIST_UNSOLICITED;
- else if (cmd->immediate_data && cmd->unsolicited_data)
- bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
- }
-
- return iscsit_do_build_list(cmd, &bl);
-}
+EXPORT_SYMBOL(iscsit_allocate_cmd);
struct iscsi_seq *iscsit_get_seq_holder_for_datain(
struct iscsi_cmd *cmd,
@@ -409,9 +245,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
*/
if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
- " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
sess->max_cmd_sn);
- ret = CMDSN_ERROR_CANNOT_RECOVER;
+ ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
@@ -440,39 +276,56 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
-int iscsit_sequence_cmd(
- struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
- u32 cmdsn)
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf, __be32 cmdsn)
{
- int ret;
- int cmdsn_ret;
+ int ret, cmdsn_ret;
+ bool reject = false;
+ u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
mutex_lock(&conn->sess->cmdsn_mutex);
- cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
+ cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
switch (cmdsn_ret) {
case CMDSN_NORMAL_OPERATION:
ret = iscsit_execute_cmd(cmd, 0);
if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
iscsit_execute_ooo_cmdsns(conn->sess);
+ else if (ret < 0) {
+ reject = true;
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+ }
break;
case CMDSN_HIGHER_THAN_EXP:
- ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
+ ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
+ if (ret < 0) {
+ reject = true;
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+ break;
+ }
+ ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
+ case CMDSN_MAXCMDSN_OVERRUN:
+ default:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
- ret = cmdsn_ret;
- break;
- default:
- ret = cmdsn_ret;
+ /*
+ * Existing callers for iscsit_sequence_cmd() will silently
+ * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+ * return for CMDSN_MAXCMDSN_OVERRUN as well..
+ */
+ ret = CMDSN_LOWER_THAN_EXP;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
+ if (reject)
+ iscsit_reject_cmd(cmd, reason, buf);
+
return ret;
}
+EXPORT_SYMBOL(iscsit_sequence_cmd);
int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
{
@@ -503,14 +356,14 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
return 0;
- if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+ if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
((cmd->first_burst_len + payload_length) !=
conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data received %u"
" does not equal FirstBurstLength: %u, and does"
" not equal ExpXferLen %u.\n",
(cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+ conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
@@ -520,12 +373,12 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
struct iscsi_cmd *iscsit_find_cmd_from_itt(
struct iscsi_conn *conn,
- u32 init_task_tag)
+ itt_t init_task_tag)
{
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@@ -540,13 +393,13 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
struct iscsi_conn *conn,
- u32 init_task_tag,
+ itt_t init_task_tag,
u32 length)
{
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@@ -569,7 +422,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt(
struct iscsi_cmd *cmd = NULL;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->targ_xfer_tag == targ_xfer_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@@ -586,7 +439,7 @@ int iscsit_find_cmd_for_recovery(
struct iscsi_session *sess,
struct iscsi_cmd **cmd_ptr,
struct iscsi_conn_recovery **cr_ptr,
- u32 init_task_tag)
+ itt_t init_task_tag)
{
struct iscsi_cmd *cmd = NULL;
struct iscsi_conn_recovery *cr;
@@ -597,7 +450,7 @@ int iscsit_find_cmd_for_recovery(
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
- list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_i_lock);
@@ -617,7 +470,7 @@ int iscsit_find_cmd_for_recovery(
spin_lock(&sess->cr_a_lock);
list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
- list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_a_lock);
@@ -657,7 +510,7 @@ void iscsit_add_cmd_to_immediate_queue(
atomic_set(&conn->check_immediate_queue, 1);
spin_unlock_bh(&conn->immed_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
@@ -669,8 +522,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
- list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->immed_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
@@ -731,7 +584,7 @@ void iscsit_add_cmd_to_response_queue(
atomic_inc(&cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -744,8 +597,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL;
}
- list_for_each_entry(qr, &conn->response_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->response_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
@@ -785,6 +638,24 @@ static void iscsit_remove_cmd_from_response_queue(
}
}
+bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+{
+ bool empty;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ empty = list_empty(&conn->immed_queue_list);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (!empty)
+ return empty;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ empty = list_empty(&conn->response_queue_list);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return empty;
+}
+
void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
@@ -813,41 +684,68 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
void iscsit_release_cmd(struct iscsi_cmd *cmd)
{
- struct iscsi_conn *conn = cmd->conn;
- int i;
+ struct iscsi_session *sess;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
- iscsit_free_r2ts_from_list(cmd);
- iscsit_free_all_datain_reqs(cmd);
+ if (cmd->conn)
+ sess = cmd->conn->sess;
+ else
+ sess = cmd->sess;
+
+ BUG_ON(!sess || !sess->se_sess);
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
kfree(cmd->iov_data);
+ kfree(cmd->text_in_ptr);
- for (i = 0; i < cmd->t_mem_sg_nents; i++)
- __free_page(sg_page(&cmd->t_mem_sg[i]));
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+}
+EXPORT_SYMBOL(iscsit_release_cmd);
- kfree(cmd->t_mem_sg);
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+ bool check_queues)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ if (scsi_cmd) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
+ iscsit_free_r2ts_from_list(cmd);
+ }
+ if (cmd->data_direction == DMA_FROM_DEVICE)
+ iscsit_free_all_datain_reqs(cmd);
+ }
- if (conn) {
+ if (conn && check_queues) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
-
- kmem_cache_free(lio_cmd_cache, cmd);
}
-void iscsit_free_cmd(struct iscsi_cmd *cmd)
+void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
+ struct se_cmd *se_cmd = NULL;
+ int rc;
/*
- * Determine if a struct se_cmd is assoicated with
+ * Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, true, shutdown);
+ /*
+ * Fallthrough
+ */
case ISCSI_OP_SCSI_TMFUNC:
- transport_generic_free_cmd(&cmd->se_cmd, 1);
+ rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
break;
case ISCSI_OP_REJECT:
/*
@@ -856,11 +754,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
- transport_generic_free_cmd(&cmd->se_cmd, 1);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, true, shutdown);
+
+ rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
break;
}
/* Fall-through */
default:
+ __iscsit_free_cmd(cmd, false, shutdown);
iscsit_release_cmd(cmd);
break;
}
@@ -1023,14 +929,14 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
u8 state;
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
if (!cmd)
return -1;
cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
ISTATE_SEND_NOPIN_NO_RESPONSE;
- cmd->init_task_tag = 0xFFFFFFFF;
+ cmd->init_task_tag = RESERVED_ITT;
spin_lock_bh(&conn->sess->ttt_lock);
cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
0xFFFFFFFF;
@@ -1039,7 +945,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
spin_unlock_bh(&conn->sess->ttt_lock);
spin_lock_bh(&conn->cmd_lock);
- list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (want_response)
@@ -1079,7 +985,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- conn->sess->conn_timeout_errors++;
+ atomic_long_inc(&conn->sess->conn_timeout_errors);
spin_unlock_bh(&tiqn->sess_err_stats.lock);
}
}
@@ -1383,34 +1289,21 @@ send_datacrc:
*/
int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
{
- u8 iscsi_hdr[ISCSI_HDR_LEN];
- int err;
- struct kvec iov;
struct iscsi_login_rsp *hdr;
+ struct iscsi_login *login = conn->conn_login;
+ login->login_failed = 1;
iscsit_collect_login_stats(conn, status_class, status_detail);
- memset(&iov, 0, sizeof(struct kvec));
- memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
+ memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
- hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
+ hdr = (struct iscsi_login_rsp *)&login->rsp[0];
hdr->opcode = ISCSI_OP_LOGIN_RSP;
hdr->status_class = status_class;
hdr->status_detail = status_detail;
- hdr->itt = cpu_to_be32(conn->login_itt);
+ hdr->itt = conn->login_itt;
- iov.iov_base = &iscsi_hdr;
- iov.iov_len = ISCSI_HDR_LEN;
-
- PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
-
- err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
- if (err != ISCSI_HDR_LEN) {
- pr_err("tx_data returned less than expected\n");
- return -1;
- }
-
- return 0;
+ return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
}
void iscsit_print_session_params(struct iscsi_session *sess)
@@ -1589,7 +1482,8 @@ void iscsit_collect_login_stats(
strcpy(ls->last_intr_fail_name,
(intrname ? intrname->value : "Unknown"));
- ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+ ls->last_intr_fail_ip_family = conn->login_family;
+
snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
"%s", conn->login_ip);
ls->last_fail_time = get_jiffies_64();
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 835bf7de028..a68508c4fec 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -8,29 +8,30 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
-extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
-extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
+extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
-int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
+extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char * ,__be32 cmdsn);
extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
- u32, u32);
+ itt_t, u32);
extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
- struct iscsi_conn_recovery **, u32);
+ struct iscsi_conn_recovery **, itt_t);
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c47ff7f59e5..8c64b8776a9 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -3,7 +3,7 @@
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
- * © Copyright 2011 RisingTide Systems LLC.
+ * © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
@@ -44,138 +44,12 @@
/* Local pointer to allocated TCM configfs fabric module */
static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
-/*
- * Allocate a tcm_loop cmd descriptor from target_core_mod code
- *
- * Can be called from interrupt context in tcm_loop_queuecommand() below
- */
-static struct se_cmd *tcm_loop_allocate_core_cmd(
- struct tcm_loop_hba *tl_hba,
- struct se_portal_group *se_tpg,
- struct scsi_cmnd *sc)
-{
- struct se_cmd *se_cmd;
- struct se_session *se_sess;
- struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
- struct tcm_loop_cmd *tl_cmd;
- int sam_task_attr;
-
- if (!tl_nexus) {
- scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
- " does not exist\n");
- set_host_byte(sc, DID_ERROR);
- return NULL;
- }
- se_sess = tl_nexus->se_sess;
-
- tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
- if (!tl_cmd) {
- pr_err("Unable to allocate struct tcm_loop_cmd\n");
- set_host_byte(sc, DID_ERROR);
- return NULL;
- }
- se_cmd = &tl_cmd->tl_se_cmd;
- /*
- * Save the pointer to struct scsi_cmnd *sc
- */
- tl_cmd->sc = sc;
- /*
- * Locate the SAM Task Attr from struct scsi_cmnd *
- */
- if (sc->device->tagged_supported) {
- switch (sc->tag) {
- case HEAD_OF_QUEUE_TAG:
- sam_task_attr = MSG_HEAD_TAG;
- break;
- case ORDERED_QUEUE_TAG:
- sam_task_attr = MSG_ORDERED_TAG;
- break;
- default:
- sam_task_attr = MSG_SIMPLE_TAG;
- break;
- }
- } else
- sam_task_attr = MSG_SIMPLE_TAG;
-
- /*
- * Initialize struct se_cmd descriptor from target_core_mod infrastructure
- */
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
- &tl_cmd->tl_sense_buf[0]);
-
- if (scsi_bidi_cmnd(sc))
- se_cmd->se_cmd_flags |= SCF_BIDI;
-
- /*
- * Locate the struct se_lun pointer and attach it to struct se_cmd
- */
- if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
- set_host_byte(sc, DID_NO_CONNECT);
- return NULL;
- }
-
- return se_cmd;
-}
-
-/*
- * Called by struct target_core_fabric_ops->new_cmd_map()
- *
- * Always called in process context. A non zero return value
- * here will signal to handle an exception based on the return code.
- */
-static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
- struct scsi_cmnd *sc = tl_cmd->sc;
- struct scatterlist *sgl_bidi = NULL;
- u32 sgl_bidi_count = 0;
- int ret;
- /*
- * Allocate the necessary tasks to complete the received CDB+data
- */
- ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret != 0)
- return ret;
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
-
- sgl_bidi = sdb->table.sgl;
- sgl_bidi_count = sdb->table.nents;
- }
- /*
- * Because some userspace code via scsi-generic do not memset their
- * associated read buffers, go ahead and do that here for type
- * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
- * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
- * by target core in transport_generic_allocate_tasks() ->
- * transport_generic_cmd_sequencer().
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
- se_cmd->data_direction == DMA_FROM_DEVICE) {
- struct scatterlist *sg = scsi_sglist(sc);
- unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
-
- if (buf != NULL) {
- memset(buf, 0, sg->length);
- kunmap(sg_page(sg));
- }
- }
-
- /* Tell the core about our preallocated memory */
- return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
- scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-}
+static int tcm_loop_queue_status(struct se_cmd *se_cmd);
/*
* Called from struct target_core_fabric_ops->check_stop_free()
@@ -187,7 +61,7 @@ static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* pointer. These will be released directly in tcm_loop_device_reset()
* with transport_generic_free_cmd().
*/
- if (se_cmd->se_tmr_req)
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return 0;
/*
* Release the struct se_cmd, which will make a callback to release
@@ -205,11 +79,10 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
}
-static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
- char **start, off_t offset,
- int length, int inout)
+static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- return sprintf(buffer, "tcm_loop_proc_info()\n");
+ seq_printf(m, "tcm_loop_proc_info()\n");
+ return 0;
}
static int tcm_loop_driver_probe(struct device *);
@@ -262,51 +135,140 @@ static int tcm_loop_change_queue_depth(
return sdev->queue_depth;
}
+static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag);
+
+ if (tag)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag = 0;
+
+ return tag;
+}
+
/*
- * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
- * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
+ * Locate the SAM Task Attr from struct scsi_cmnd *
*/
-static int tcm_loop_queuecommand(
- struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
+static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
{
- struct se_cmd *se_cmd;
- struct se_portal_group *se_tpg;
+ if (sc->device->tagged_supported) {
+ switch (sc->tag) {
+ case HEAD_OF_QUEUE_TAG:
+ return MSG_HEAD_TAG;
+ case ORDERED_QUEUE_TAG:
+ return MSG_ORDERED_TAG;
+ default:
+ break;
+ }
+ }
+
+ return MSG_SIMPLE_TAG;
+}
+
+static void tcm_loop_submission_work(struct work_struct *work)
+{
+ struct tcm_loop_cmd *tl_cmd =
+ container_of(work, struct tcm_loop_cmd, work);
+ struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
+ struct scsi_cmnd *sc = tl_cmd->sc;
+ struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
+ struct scatterlist *sgl_bidi = NULL;
+ u32 sgl_bidi_count = 0, transfer_length;
+ int rc;
- pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
- " scsi_buf_len: %u\n", sc->device->host->host_no,
- sc->device->id, sc->device->channel, sc->device->lun,
- sc->cmnd[0], scsi_bufflen(sc));
- /*
- * Locate the tcm_loop_hba_t pointer
- */
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+
/*
* Ensure that this tl_tpg reference from the incoming sc->device->id
* has already been configured via tcm_loop_make_naa_tpg().
*/
if (!tl_tpg->tl_hba) {
set_host_byte(sc, DID_NO_CONNECT);
- sc->scsi_done(sc);
- return 0;
+ goto out_done;
}
- se_tpg = &tl_tpg->tl_se_tpg;
- /*
- * Determine the SAM Task Attribute and allocate tl_cmd and
- * tl_cmd->tl_se_cmd from TCM infrastructure
- */
- se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
- if (!se_cmd) {
+ if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ goto out_done;
+ }
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+ " does not exist\n");
+ set_host_byte(sc, DID_ERROR);
+ goto out_done;
+ }
+ if (scsi_bidi_cmnd(sc)) {
+ struct scsi_data_buffer *sdb = scsi_in(sc);
+
+ sgl_bidi = sdb->table.sgl;
+ sgl_bidi_count = sdb->table.nents;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
+ }
+
+ transfer_length = scsi_transfer_length(sc);
+ if (!scsi_prot_sg_count(sc) &&
+ scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
+ se_cmd->prot_pto = true;
+ /*
+ * loopback transport doesn't support
+ * WRITE_GENERATE, READ_STRIP protection
+ * information operations, go ahead unprotected.
+ */
+ transfer_length = scsi_bufflen(sc);
+ }
+
+ rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
+ &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
+ transfer_length, tcm_loop_sam_attr(sc),
+ sc->sc_data_direction, 0,
+ scsi_sglist(sc), scsi_sg_count(sc),
+ sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
+ if (rc < 0) {
+ set_host_byte(sc, DID_NO_CONNECT);
+ goto out_done;
+ }
+ return;
+
+out_done:
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ sc->scsi_done(sc);
+ return;
+}
+
+/*
+ * ->queuecommand can be and usually is called from interrupt context, so
+ * defer the actual submission to a workqueue.
+ */
+static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+{
+ struct tcm_loop_cmd *tl_cmd;
+
+ pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ " scsi_buf_len: %u\n", sc->device->host->host_no,
+ sc->device->id, sc->device->channel, sc->device->lun,
+ sc->cmnd[0], scsi_bufflen(sc));
+
+ tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
+ if (!tl_cmd) {
+ pr_err("Unable to allocate struct tcm_loop_cmd\n");
+ set_host_byte(sc, DID_ERROR);
sc->scsi_done(sc);
return 0;
}
- /*
- * Queue up the newly allocated to be processed in TCM thread context.
- */
- transport_generic_handle_cdb_map(se_cmd);
+
+ tl_cmd->sc = sc;
+ tl_cmd->sc_cmd_tag = sc->tag;
+ INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
+ queue_work(tcm_loop_workqueue, &tl_cmd->work);
return 0;
}
@@ -314,41 +276,21 @@ static int tcm_loop_queuecommand(
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
-static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+ struct tcm_loop_nexus *tl_nexus,
+ int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
- struct se_portal_group *se_tpg;
struct se_session *se_sess;
+ struct se_portal_group *se_tpg;
struct tcm_loop_cmd *tl_cmd = NULL;
- struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tmr *tl_tmr = NULL;
- struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED;
- /*
- * Locate the tcm_loop_hba_t pointer
- */
- tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
- se_sess = tl_nexus->se_sess;
- /*
- * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
- */
- tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- se_tpg = &tl_tpg->tl_se_tpg;
+ int ret = TMR_FUNCTION_FAILED, rc;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
- return FAILED;
+ return ret;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
@@ -359,27 +301,32 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
se_cmd = &tl_cmd->tl_se_cmd;
+ se_tpg = &tl_tpg->tl_se_tpg;
+ se_sess = tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Allocate the LUN_RESET TMR
- */
- se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
- TMR_LUN_RESET, GFP_KERNEL);
- if (IS_ERR(se_cmd->se_tmr_req))
+
+ rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
+ if (rc < 0)
goto release;
+
+ if (tmr == TMR_ABORT_TASK)
+ se_cmd->se_tmr_req->ref_task_tag = task;
+
/*
- * Locate the underlying TCM struct se_lun from sc->device->lun
+ * Locate the underlying TCM struct se_lun
*/
- if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
+ ret = TMR_LUN_DOES_NOT_EXIST;
goto release;
+ }
/*
- * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
- * to wake us up.
+ * Queue the TMR to TCM Core and sleep waiting for
+ * tcm_loop_queue_tm_rsp() to wake us up.
*/
transport_generic_handle_tmr(se_cmd);
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
@@ -387,8 +334,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* The TMR LUN_RESET has completed, check the response status and
* then release allocations.
*/
- ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
+ ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
transport_generic_free_cmd(se_cmd, 1);
@@ -398,6 +344,94 @@ release:
return ret;
}
+static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ sc->tag, TMR_ABORT_TASK);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ 0, TMR_LUN_RESET);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ if (!tl_hba) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ if (tl_tpg) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return SUCCESS;
+ }
+ return FAILED;
+}
+
static int tcm_loop_slave_alloc(struct scsi_device *sd)
{
set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
@@ -406,16 +440,28 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
+ if (sd->tagged_supported) {
+ scsi_activate_tcq(sd, sd->queue_depth);
+ scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
+ sd->host->cmd_per_lun);
+ } else {
+ scsi_adjust_queue_depth(sd, 0,
+ sd->host->cmd_per_lun);
+ }
+
return 0;
}
static struct scsi_host_template tcm_loop_driver_template = {
- .proc_info = tcm_loop_proc_info,
+ .show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
+ .change_queue_type = tcm_loop_change_queue_type,
+ .eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
+ .eh_target_reset_handler = tcm_loop_target_reset,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
@@ -431,7 +477,7 @@ static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
- int error;
+ int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
@@ -455,6 +501,13 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_channel = 0;
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(sh, host_prot);
+ scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
+
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
@@ -762,22 +815,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
-{
- /*
- * Assume struct scsi_cmnd is not in remove state..
- */
- return 0;
-}
-
-static int tcm_loop_sess_logged_in(struct se_session *se_sess)
-{
- /*
- * Assume that TL Nexus is always active
- */
- return 1;
-}
-
static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
{
return 1;
@@ -790,7 +827,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
{
- return 1;
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ return tl_cmd->sc_cmd_tag;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
@@ -811,19 +851,6 @@ static void tcm_loop_close_session(struct se_session *se_sess)
return;
};
-static void tcm_loop_stop_session(
- struct se_session *se_sess,
- int sess_sleep,
- int conn_sleep)
-{
- return;
-}
-
-static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
-{
- return;
-}
-
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
@@ -835,7 +862,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
return 0;
}
@@ -855,6 +882,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
sc->result = SAM_STAT_GOOD;
set_host_byte(sc, DID_OK);
+ if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+ (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+ scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
@@ -880,11 +910,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
sc->result = se_cmd->scsi_status;
set_host_byte(sc, DID_OK);
+ if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+ (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+ scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
-static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
+static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
@@ -894,17 +927,11 @@ static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
*/
atomic_set(&tl_tmr->tmr_complete, 1);
wake_up(&tl_tmr->tl_tmr_wait);
- return 0;
-}
-
-static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
-{
- return 0;
}
-static u16 tcm_loop_get_fabric_sense_len(void)
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
{
- return 0;
+ return;
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
@@ -934,7 +961,7 @@ static int tcm_loop_port_link(
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
@@ -969,7 +996,7 @@ static void tcm_loop_port_unlink(
scsi_device_put(sd);
atomic_dec(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
@@ -1001,7 +1028,7 @@ static int tcm_loop_make_nexus(
/*
* Initialize the struct se_session pointer
*/
- tl_nexus->se_sess = transport_init_session();
+ tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
@@ -1041,7 +1068,10 @@ static int tcm_loop_drop_nexus(
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- tl_nexus = tpg->tl_hba->tl_nexus;
+ if (!tl_hba)
+ return -ENODEV;
+
+ tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -1170,14 +1200,62 @@ check_newline:
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+static ssize_t tcm_loop_tpg_show_transport_status(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ const char *status = NULL;
+ ssize_t ret = -EINVAL;
+
+ switch (tl_tpg->tl_transport_status) {
+ case TCM_TRANSPORT_ONLINE:
+ status = "online";
+ break;
+ case TCM_TRANSPORT_OFFLINE:
+ status = "offline";
+ break;
+ default:
+ break;
+ }
+
+ if (status)
+ ret = snprintf(page, PAGE_SIZE, "%s\n", status);
+
+ return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_transport_status(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ if (!strncmp(page, "online", 6)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return count;
+ }
+ if (!strncmp(page, "offline", 7)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+ return count;
+ }
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_nexus.attr,
+ &tcm_loop_tpg_transport_status.attr,
NULL,
};
/* Start items for tcm_loop_naa_cit */
-struct se_portal_group *tcm_loop_make_naa_tpg(
+static struct se_portal_group *tcm_loop_make_naa_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
@@ -1222,7 +1300,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
return &tl_tpg->tl_se_tpg;
}
-void tcm_loop_drop_naa_tpg(
+static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1254,7 +1332,7 @@ void tcm_loop_drop_naa_tpg(
/* Start items for tcm_loop_cit */
-struct se_wwn *tcm_loop_make_scsi_hba(
+static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
@@ -1324,7 +1402,7 @@ out:
return ERR_PTR(ret);
}
-void tcm_loop_drop_scsi_hba(
+static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
@@ -1361,7 +1439,6 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
static int tcm_loop_register_configfs(void)
{
struct target_fabric_configfs *fabric;
- struct config_group *tf_cg;
int ret;
/*
* Set the TCM Loop HBA counter to zero
@@ -1407,14 +1484,10 @@ static int tcm_loop_register_configfs(void)
/*
* Used for setting up remaining TCM resources in process context
*/
- fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
- fabric->tf_ops.stop_session = &tcm_loop_stop_session;
- fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
- fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
fabric->tf_ops.sess_get_initiator_sid = NULL;
fabric->tf_ops.write_pending = &tcm_loop_write_pending;
@@ -1429,11 +1502,8 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
- fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
- fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
- fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
+ fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
- tf_cg = &fabric->tf_group;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
@@ -1452,11 +1522,11 @@ static int tcm_loop_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
/*
* Once fabric->tf_ops has been setup, now register the fabric for
* use within TCM
@@ -1490,7 +1560,11 @@ static void tcm_loop_deregister_configfs(void)
static int __init tcm_loop_fabric_init(void)
{
- int ret;
+ int ret = -ENOMEM;
+
+ tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
+ if (!tcm_loop_workqueue)
+ goto out;
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
@@ -1499,20 +1573,27 @@ static int __init tcm_loop_fabric_init(void)
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
- return -ENOMEM;
+ goto out_destroy_workqueue;
}
ret = tcm_loop_alloc_core_bus();
if (ret)
- return ret;
+ goto out_destroy_cache;
ret = tcm_loop_register_configfs();
- if (ret) {
- tcm_loop_release_core_bus();
- return ret;
- }
+ if (ret)
+ goto out_release_core_bus;
return 0;
+
+out_release_core_bus:
+ tcm_loop_release_core_bus();
+out_destroy_cache:
+ kmem_cache_destroy(tcm_loop_cmd_cache);
+out_destroy_workqueue:
+ destroy_workqueue(tcm_loop_workqueue);
+out:
+ return ret;
}
static void __exit tcm_loop_fabric_exit(void)
@@ -1520,6 +1601,7 @@ static void __exit tcm_loop_fabric_exit(void)
tcm_loop_deregister_configfs();
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
+ destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 15a03644147..54c59d0b660 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,4 +1,4 @@
-#define TCM_LOOP_VERSION "v2.1-rc1"
+#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
@@ -10,11 +10,13 @@
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
+ /* Tagged command queueing */
+ u32 sc_cmd_tag;
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
struct scsi_cmnd *sc;
- struct list_head *tl_cmd_list;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
+ struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
};
@@ -40,8 +42,12 @@ struct tcm_loop_nacl {
struct se_node_acl se_node_acl;
};
+#define TCM_TRANSPORT_ONLINE 0
+#define TCM_TRANSPORT_OFFLINE 1
+
struct tcm_loop_tpg {
unsigned short tl_tpgt;
+ unsigned short tl_transport_status;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
@@ -53,7 +59,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
new file mode 100644
index 00000000000..1614bc710d4
--- /dev/null
+++ b/drivers/target/sbp/Kconfig
@@ -0,0 +1,11 @@
+config SBP_TARGET
+ tristate "FireWire SBP-2 fabric module"
+ depends on FIREWIRE
+ help
+ Say Y or M here to enable SCSI target functionality over FireWire.
+ This enables you to expose SCSI devices to other nodes on the FireWire
+ bus, for example hard disks. Similar to FireWire Target Disk mode on
+ many Apple computers.
+
+ To compile this driver as a module, say M here: The module will be
+ called sbp-target.
diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile
new file mode 100644
index 00000000000..27747ad054c
--- /dev/null
+++ b/drivers/target/sbp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SBP_TARGET) += sbp_target.o
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
new file mode 100644
index 00000000000..e7e93727553
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.c
@@ -0,0 +1,2614 @@
+/*
+ * SBP2 target driver (SCSI over IEEE1394 in target mode)
+ *
+ * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define KMSG_COMPONENT "sbp_target"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <asm/unaligned.h>
+
+#include "sbp_target.h"
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *sbp_fabric_configfs;
+
+/* FireWire address region for management and command block address handlers */
+static const struct fw_address_region sbp_register_region = {
+ .start = CSR_REGISTER_BASE + 0x10000,
+ .end = 0x1000000000000ULL,
+};
+
+static const u32 sbp_unit_directory_template[] = {
+ 0x1200609e, /* unit_specifier_id: NCITS/T10 */
+ 0x13010483, /* unit_sw_version: 1155D Rev 4 */
+ 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
+ 0x390104d8, /* command_set: SPC-2 */
+ 0x3b000000, /* command_set_revision: 0 */
+ 0x3c000001, /* firmware_revision: 1 */
+};
+
+#define SESSION_MAINTENANCE_INTERVAL HZ
+
+static atomic_t login_id = ATOMIC_INIT(0);
+
+static void session_maintenance_work(struct work_struct *);
+static int sbp_run_transaction(struct fw_card *, int, int, int, int,
+ unsigned long long, void *, size_t);
+
+static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
+{
+ int ret;
+ __be32 high, low;
+
+ ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
+ &high, sizeof(high));
+ if (ret != RCODE_COMPLETE)
+ return ret;
+
+ ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
+ &low, sizeof(low));
+ if (ret != RCODE_COMPLETE)
+ return ret;
+
+ *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
+
+ return RCODE_COMPLETE;
+}
+
+static struct sbp_session *sbp_session_find_by_guid(
+ struct sbp_tpg *tpg, u64 guid)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess, *found = NULL;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+ if (sess->guid == guid)
+ found = sess;
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return found;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_lun(
+ struct sbp_session *session, struct se_lun *lun)
+{
+ struct sbp_login_descriptor *login, *found = NULL;
+
+ spin_lock_bh(&session->lock);
+ list_for_each_entry(login, &session->login_list, link) {
+ if (login->lun == lun)
+ found = login;
+ }
+ spin_unlock_bh(&session->lock);
+
+ return found;
+}
+
+static int sbp_login_count_all_by_lun(
+ struct sbp_tpg *tpg,
+ struct se_lun *lun,
+ int exclusive)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login;
+ int count = 0;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry(login, &sess->login_list, link) {
+ if (login->lun != lun)
+ continue;
+
+ if (!exclusive || login->exclusive)
+ count++;
+ }
+ spin_unlock_bh(&sess->lock);
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return count;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_id(
+ struct sbp_tpg *tpg, int login_id)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login, *found = NULL;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry(login, &sess->login_list, link) {
+ if (login->login_id == login_id)
+ found = login;
+ }
+ spin_unlock_bh(&sess->lock);
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return found;
+}
+
+static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
+{
+ struct se_portal_group *se_tpg = &tpg->se_tpg;
+ struct se_lun *se_lun;
+
+ if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock(&se_tpg->tpg_lun_lock);
+ se_lun = se_tpg->tpg_lun_list[lun];
+
+ if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ se_lun = ERR_PTR(-ENODEV);
+
+ spin_unlock(&se_tpg->tpg_lun_lock);
+
+ return se_lun;
+}
+
+static struct sbp_session *sbp_session_create(
+ struct sbp_tpg *tpg,
+ u64 guid)
+{
+ struct sbp_session *sess;
+ int ret;
+ char guid_str[17];
+ struct se_node_acl *se_nacl;
+
+ sess = kmalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ pr_err("failed to allocate session descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ if (IS_ERR(sess->se_sess)) {
+ pr_err("failed to init se_session\n");
+
+ ret = PTR_ERR(sess->se_sess);
+ kfree(sess);
+ return ERR_PTR(ret);
+ }
+
+ snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
+
+ se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
+ if (!se_nacl) {
+ pr_warn("Node ACL not found for %s\n", guid_str);
+
+ transport_free_session(sess->se_sess);
+ kfree(sess);
+
+ return ERR_PTR(-EPERM);
+ }
+
+ sess->se_sess->se_node_acl = se_nacl;
+
+ spin_lock_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->login_list);
+ INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+
+ sess->guid = guid;
+
+ transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
+
+ return sess;
+}
+
+static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
+{
+ spin_lock_bh(&sess->lock);
+ if (!list_empty(&sess->login_list)) {
+ spin_unlock_bh(&sess->lock);
+ return;
+ }
+ spin_unlock_bh(&sess->lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&sess->maint_work);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+
+ if (sess->card)
+ fw_card_put(sess->card);
+
+ kfree(sess);
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *);
+
+static void sbp_login_release(struct sbp_login_descriptor *login,
+ bool cancel_work)
+{
+ struct sbp_session *sess = login->sess;
+
+ /* FIXME: abort/wait on tasks */
+
+ sbp_target_agent_unregister(login->tgt_agt);
+
+ if (sess) {
+ spin_lock_bh(&sess->lock);
+ list_del(&login->link);
+ spin_unlock_bh(&sess->lock);
+
+ sbp_session_release(sess, cancel_work);
+ }
+
+ kfree(login);
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+ struct sbp_login_descriptor *);
+
+static void sbp_management_request_login(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ struct se_lun *se_lun;
+ int ret;
+ u64 guid;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login;
+ struct sbp_login_response_block *response;
+ int login_response_len;
+
+ se_lun = sbp_get_lun_from_tpg(tpg,
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
+ if (IS_ERR(se_lun)) {
+ pr_notice("login to unknown LUN: %d\n",
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
+ return;
+ }
+
+ ret = read_peer_guid(&guid, req);
+ if (ret != RCODE_COMPLETE) {
+ pr_warn("failed to read peer GUID: %d\n", ret);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
+ se_lun->unpacked_lun, guid);
+
+ sess = sbp_session_find_by_guid(tpg, guid);
+ if (sess) {
+ login = sbp_login_find_by_lun(sess, se_lun);
+ if (login) {
+ pr_notice("initiator already logged-in\n");
+
+ /*
+ * SBP-2 R4 says we should return access denied, but
+ * that can confuse initiators. Instead we need to
+ * treat this like a reconnect, but send the login
+ * response block like a fresh login.
+ *
+ * This is required particularly in the case of Apple
+ * devices booting off the FireWire target, where
+ * the firmware has an active login to the target. When
+ * the OS takes control of the session it issues its own
+ * LOGIN rather than a RECONNECT. To avoid the machine
+ * waiting until the reconnect_hold expires, we can skip
+ * the ACCESS_DENIED errors to speed things up.
+ */
+
+ goto already_logged_in;
+ }
+ }
+
+ /*
+ * check exclusive bit in login request
+ * reject with access_denied if any logins present
+ */
+ if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
+ sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
+ pr_warn("refusing exclusive login with other active logins\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ /*
+ * check exclusive bit in any existing login descriptor
+ * reject with access_denied if any exclusive logins present
+ */
+ if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
+ pr_warn("refusing login while another exclusive login present\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ /*
+ * check we haven't exceeded the number of allowed logins
+ * reject with resources_unavailable if we have
+ */
+ if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
+ tport->max_logins_per_lun) {
+ pr_warn("max number of logins reached\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ if (!sess) {
+ sess = sbp_session_create(tpg, guid);
+ if (IS_ERR(sess)) {
+ switch (PTR_ERR(sess)) {
+ case -EPERM:
+ ret = SBP_STATUS_ACCESS_DENIED;
+ break;
+ default:
+ ret = SBP_STATUS_RESOURCES_UNAVAIL;
+ break;
+ }
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(ret));
+ return;
+ }
+
+ sess->node_id = req->node_addr;
+ sess->card = fw_card_get(req->card);
+ sess->generation = req->generation;
+ sess->speed = req->speed;
+
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ }
+
+ /* only take the latest reconnect_hold into account */
+ sess->reconnect_hold = min(
+ 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
+ tport->max_reconnect_timeout) - 1;
+
+ login = kmalloc(sizeof(*login), GFP_KERNEL);
+ if (!login) {
+ pr_err("failed to allocate login descriptor\n");
+
+ sbp_session_release(sess, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ login->sess = sess;
+ login->lun = se_lun;
+ login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
+ login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
+ login->login_id = atomic_inc_return(&login_id);
+
+ login->tgt_agt = sbp_target_agent_register(login);
+ if (IS_ERR(login->tgt_agt)) {
+ ret = PTR_ERR(login->tgt_agt);
+ pr_err("failed to map command block handler: %d\n", ret);
+
+ sbp_session_release(sess, true);
+ kfree(login);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ spin_lock_bh(&sess->lock);
+ list_add_tail(&login->link, &sess->login_list);
+ spin_unlock_bh(&sess->lock);
+
+already_logged_in:
+ response = kzalloc(sizeof(*response), GFP_KERNEL);
+ if (!response) {
+ pr_err("failed to allocate login response block\n");
+
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ login_response_len = clamp_val(
+ LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
+ 12, sizeof(*response));
+ response->misc = cpu_to_be32(
+ ((login_response_len & 0xffff) << 16) |
+ (login->login_id & 0xffff));
+ response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
+ addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
+ &response->command_block_agent);
+
+ ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
+ sess->node_id, sess->generation, sess->speed,
+ sbp2_pointer_to_addr(&req->orb.ptr2), response,
+ login_response_len);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("failed to write login response block: %x\n", ret);
+
+ kfree(response);
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ kfree(response);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_query_logins(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ pr_notice("QUERY LOGINS not implemented\n");
+ /* FIXME: implement */
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+}
+
+static void sbp_management_request_reconnect(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ int ret;
+ u64 guid;
+ struct sbp_login_descriptor *login;
+
+ ret = read_peer_guid(&guid, req);
+ if (ret != RCODE_COMPLETE) {
+ pr_warn("failed to read peer GUID: %d\n", ret);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
+
+ login = sbp_login_find_by_id(tpg,
+ RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
+
+ if (!login) {
+ pr_err("mgt_agent RECONNECT unknown login ID\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ if (login->sess->guid != guid) {
+ pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ spin_lock_bh(&login->sess->lock);
+ if (login->sess->card)
+ fw_card_put(login->sess->card);
+
+ /* update the node details */
+ login->sess->generation = req->generation;
+ login->sess->node_id = req->node_addr;
+ login->sess->card = fw_card_get(req->card);
+ login->sess->speed = req->speed;
+ spin_unlock_bh(&login->sess->lock);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_logout(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ int id;
+ struct sbp_login_descriptor *login;
+
+ id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+
+ login = sbp_login_find_by_id(tpg, id);
+ if (!login) {
+ pr_warn("cannot find login: %d\n", id);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
+ return;
+ }
+
+ pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
+ login->lun->unpacked_lun, login->login_id);
+
+ if (req->node_addr != login->sess->node_id) {
+ pr_warn("logout from different node ID\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void session_check_for_reset(struct sbp_session *sess)
+{
+ bool card_valid = false;
+
+ spin_lock_bh(&sess->lock);
+
+ if (sess->card) {
+ spin_lock_irq(&sess->card->lock);
+ card_valid = (sess->card->local_node != NULL);
+ spin_unlock_irq(&sess->card->lock);
+
+ if (!card_valid) {
+ fw_card_put(sess->card);
+ sess->card = NULL;
+ }
+ }
+
+ if (!card_valid || (sess->generation != sess->card->generation)) {
+ pr_info("Waiting for reconnect from node: %016llx\n",
+ sess->guid);
+
+ sess->node_id = -1;
+ sess->reconnect_expires = get_jiffies_64() +
+ ((sess->reconnect_hold + 1) * HZ);
+ }
+
+ spin_unlock_bh(&sess->lock);
+}
+
+static void session_reconnect_expired(struct sbp_session *sess)
+{
+ struct sbp_login_descriptor *login, *temp;
+ LIST_HEAD(login_list);
+
+ pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry_safe(login, temp, &sess->login_list, link) {
+ login->sess = NULL;
+ list_move_tail(&login->link, &login_list);
+ }
+ spin_unlock_bh(&sess->lock);
+
+ list_for_each_entry_safe(login, temp, &login_list, link) {
+ list_del(&login->link);
+ sbp_login_release(login, false);
+ }
+
+ sbp_session_release(sess, false);
+}
+
+static void session_maintenance_work(struct work_struct *work)
+{
+ struct sbp_session *sess = container_of(work, struct sbp_session,
+ maint_work.work);
+
+ /* could be called while tearing down the session */
+ spin_lock_bh(&sess->lock);
+ if (list_empty(&sess->login_list)) {
+ spin_unlock_bh(&sess->lock);
+ return;
+ }
+ spin_unlock_bh(&sess->lock);
+
+ if (sess->node_id != -1) {
+ /* check for bus reset and make node_id invalid */
+ session_check_for_reset(sess);
+
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
+ /* still waiting for reconnect */
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ } else {
+ /* reconnect timeout has expired */
+ session_reconnect_expired(sess);
+ }
+}
+
+static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ int state;
+
+ switch (tcode) {
+ case TCODE_READ_QUADLET_REQUEST:
+ pr_debug("tgt_agent AGENT_STATE READ\n");
+
+ spin_lock_bh(&agent->lock);
+ state = agent->state;
+ spin_unlock_bh(&agent->lock);
+
+ *(__be32 *)data = cpu_to_be32(state);
+
+ return RCODE_COMPLETE;
+
+ case TCODE_WRITE_QUADLET_REQUEST:
+ /* ignored */
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ pr_debug("tgt_agent AGENT_RESET\n");
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_RESET;
+ spin_unlock_bh(&agent->lock);
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ struct sbp2_pointer *ptr = data;
+
+ switch (tcode) {
+ case TCODE_WRITE_BLOCK_REQUEST:
+ spin_lock_bh(&agent->lock);
+ if (agent->state != AGENT_STATE_SUSPENDED &&
+ agent->state != AGENT_STATE_RESET) {
+ spin_unlock_bh(&agent->lock);
+ pr_notice("Ignoring ORB_POINTER write while active.\n");
+ return RCODE_CONFLICT_ERROR;
+ }
+ agent->state = AGENT_STATE_ACTIVE;
+ spin_unlock_bh(&agent->lock);
+
+ agent->orb_pointer = sbp2_pointer_to_addr(ptr);
+ agent->doorbell = false;
+
+ pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
+ agent->orb_pointer);
+
+ queue_work(system_unbound_wq, &agent->work);
+
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ pr_debug("tgt_agent ORB_POINTER READ\n");
+ spin_lock_bh(&agent->lock);
+ addr_to_sbp2_pointer(agent->orb_pointer, ptr);
+ spin_unlock_bh(&agent->lock);
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ spin_lock_bh(&agent->lock);
+ if (agent->state != AGENT_STATE_SUSPENDED) {
+ spin_unlock_bh(&agent->lock);
+ pr_debug("Ignoring DOORBELL while active.\n");
+ return RCODE_CONFLICT_ERROR;
+ }
+ agent->state = AGENT_STATE_ACTIVE;
+ spin_unlock_bh(&agent->lock);
+
+ agent->doorbell = true;
+
+ pr_debug("tgt_agent DOORBELL\n");
+
+ queue_work(system_unbound_wq, &agent->work);
+
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
+ int tcode, void *data, struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
+ /* ignored as we don't send unsolicited status */
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ unsigned long long offset, void *data, size_t length,
+ void *callback_data)
+{
+ struct sbp_target_agent *agent = callback_data;
+ struct sbp_session *sess = agent->login->sess;
+ int sess_gen, sess_node, rcode;
+
+ spin_lock_bh(&sess->lock);
+ sess_gen = sess->generation;
+ sess_node = sess->node_id;
+ spin_unlock_bh(&sess->lock);
+
+ if (generation != sess_gen) {
+ pr_notice("ignoring request with wrong generation\n");
+ rcode = RCODE_TYPE_ERROR;
+ goto out;
+ }
+
+ if (source != sess_node) {
+ pr_notice("ignoring request from foreign node (%x != %x)\n",
+ source, sess_node);
+ rcode = RCODE_TYPE_ERROR;
+ goto out;
+ }
+
+ /* turn offset into the offset from the start of the block */
+ offset -= agent->handler.offset;
+
+ if (offset == 0x00 && length == 4) {
+ /* AGENT_STATE */
+ rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
+ } else if (offset == 0x04 && length == 4) {
+ /* AGENT_RESET */
+ rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
+ } else if (offset == 0x08 && length == 8) {
+ /* ORB_POINTER */
+ rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
+ } else if (offset == 0x10 && length == 4) {
+ /* DOORBELL */
+ rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
+ } else if (offset == 0x14 && length == 4) {
+ /* UNSOLICITED_STATUS_ENABLE */
+ rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
+ data, agent);
+ } else {
+ rcode = RCODE_ADDRESS_ERROR;
+ }
+
+out:
+ fw_send_response(card, request, rcode);
+}
+
+static void sbp_handle_command(struct sbp_target_request *);
+static int sbp_send_status(struct sbp_target_request *);
+static void sbp_free_request(struct sbp_target_request *);
+
+static void tgt_agent_process_work(struct work_struct *work)
+{
+ struct sbp_target_request *req =
+ container_of(work, struct sbp_target_request, work);
+
+ pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
+ req->orb_pointer,
+ sbp2_pointer_to_addr(&req->orb.next_orb),
+ sbp2_pointer_to_addr(&req->orb.data_descriptor),
+ be32_to_cpu(req->orb.misc));
+
+ if (req->orb_pointer >> 32)
+ pr_debug("ORB with high bits set\n");
+
+ switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
+ case 0:/* Format specified by this standard */
+ sbp_handle_command(req);
+ return;
+ case 1: /* Reserved for future standardization */
+ case 2: /* Vendor-dependent */
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_REQ_TYPE_NOTSUPP));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ case 3: /* Dummy ORB */
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_DUMMY_ORB_COMPLETE));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ default:
+ BUG();
+ }
+}
+
+/* used to double-check we haven't been issued an AGENT_RESET */
+static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
+{
+ bool active;
+
+ spin_lock_bh(&agent->lock);
+ active = (agent->state == AGENT_STATE_ACTIVE);
+ spin_unlock_bh(&agent->lock);
+
+ return active;
+}
+
+static void tgt_agent_fetch_work(struct work_struct *work)
+{
+ struct sbp_target_agent *agent =
+ container_of(work, struct sbp_target_agent, work);
+ struct sbp_session *sess = agent->login->sess;
+ struct sbp_target_request *req;
+ int ret;
+ bool doorbell = agent->doorbell;
+ u64 next_orb = agent->orb_pointer;
+
+ while (next_orb && tgt_agent_check_active(agent)) {
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_DEAD;
+ spin_unlock_bh(&agent->lock);
+ return;
+ }
+
+ req->login = agent->login;
+ req->orb_pointer = next_orb;
+
+ req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
+ req->orb_pointer >> 32));
+ req->status.orb_low = cpu_to_be32(
+ req->orb_pointer & 0xfffffffc);
+
+ /* read in the ORB */
+ ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
+ sess->node_id, sess->generation, sess->speed,
+ req->orb_pointer, &req->orb, sizeof(req->orb));
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("tgt_orb fetch failed: %x\n", ret);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_FINISHED) |
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(1) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_UNSPECIFIED_ERROR));
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_DEAD;
+ spin_unlock_bh(&agent->lock);
+
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ }
+
+ /* check the next_ORB field */
+ if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
+ next_orb = 0;
+ req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_FINISHED));
+ } else {
+ next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
+ req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_CONTINUING));
+ }
+
+ if (tgt_agent_check_active(agent) && !doorbell) {
+ INIT_WORK(&req->work, tgt_agent_process_work);
+ queue_work(system_unbound_wq, &req->work);
+ } else {
+ /* don't process this request, just check next_ORB */
+ sbp_free_request(req);
+ }
+
+ spin_lock_bh(&agent->lock);
+ doorbell = agent->doorbell = false;
+
+ /* check if we should carry on processing */
+ if (next_orb)
+ agent->orb_pointer = next_orb;
+ else
+ agent->state = AGENT_STATE_SUSPENDED;
+
+ spin_unlock_bh(&agent->lock);
+ };
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+ struct sbp_login_descriptor *login)
+{
+ struct sbp_target_agent *agent;
+ int ret;
+
+ agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&agent->lock);
+
+ agent->handler.length = 0x20;
+ agent->handler.address_callback = tgt_agent_rw;
+ agent->handler.callback_data = agent;
+
+ agent->login = login;
+ agent->state = AGENT_STATE_RESET;
+ INIT_WORK(&agent->work, tgt_agent_fetch_work);
+ agent->orb_pointer = 0;
+ agent->doorbell = false;
+
+ ret = fw_core_add_address_handler(&agent->handler,
+ &sbp_register_region);
+ if (ret < 0) {
+ kfree(agent);
+ return ERR_PTR(ret);
+ }
+
+ return agent;
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
+{
+ fw_core_remove_address_handler(&agent->handler);
+ cancel_work_sync(&agent->work);
+ kfree(agent);
+}
+
+/*
+ * Simple wrapper around fw_run_transaction that retries the transaction several
+ * times in case of failure, with an exponential backoff.
+ */
+static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
+ int generation, int speed, unsigned long long offset,
+ void *payload, size_t length)
+{
+ int attempt, ret, delay;
+
+ for (attempt = 1; attempt <= 5; attempt++) {
+ ret = fw_run_transaction(card, tcode, destination_id,
+ generation, speed, offset, payload, length);
+
+ switch (ret) {
+ case RCODE_COMPLETE:
+ case RCODE_TYPE_ERROR:
+ case RCODE_ADDRESS_ERROR:
+ case RCODE_GENERATION:
+ return ret;
+
+ default:
+ delay = 5 * attempt * attempt;
+ usleep_range(delay, delay * 2);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Wrapper around sbp_run_transaction that gets the card, destination,
+ * generation and speed out of the request's session.
+ */
+static int sbp_run_request_transaction(struct sbp_target_request *req,
+ int tcode, unsigned long long offset, void *payload,
+ size_t length)
+{
+ struct sbp_login_descriptor *login = req->login;
+ struct sbp_session *sess = login->sess;
+ struct fw_card *card;
+ int node_id, generation, speed, ret;
+
+ spin_lock_bh(&sess->lock);
+ card = fw_card_get(sess->card);
+ node_id = sess->node_id;
+ generation = sess->generation;
+ speed = sess->speed;
+ spin_unlock_bh(&sess->lock);
+
+ ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
+ offset, payload, length);
+
+ fw_card_put(card);
+
+ return ret;
+}
+
+static int sbp_fetch_command(struct sbp_target_request *req)
+{
+ int ret, cmd_len, copy_len;
+
+ cmd_len = scsi_command_size(req->orb.command_block);
+
+ req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
+ if (!req->cmd_buf)
+ return -ENOMEM;
+
+ memcpy(req->cmd_buf, req->orb.command_block,
+ min_t(int, cmd_len, sizeof(req->orb.command_block)));
+
+ if (cmd_len > sizeof(req->orb.command_block)) {
+ pr_debug("sbp_fetch_command: filling in long command\n");
+ copy_len = cmd_len - sizeof(req->orb.command_block);
+
+ ret = sbp_run_request_transaction(req,
+ TCODE_READ_BLOCK_REQUEST,
+ req->orb_pointer + sizeof(req->orb),
+ req->cmd_buf + sizeof(req->orb.command_block),
+ copy_len);
+ if (ret != RCODE_COMPLETE)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sbp_fetch_page_table(struct sbp_target_request *req)
+{
+ int pg_tbl_sz, ret;
+ struct sbp_page_table_entry *pg_tbl;
+
+ if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
+ return 0;
+
+ pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
+ sizeof(struct sbp_page_table_entry);
+
+ pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+
+ ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
+ sbp2_pointer_to_addr(&req->orb.data_descriptor),
+ pg_tbl, pg_tbl_sz);
+ if (ret != RCODE_COMPLETE) {
+ kfree(pg_tbl);
+ return -EIO;
+ }
+
+ req->pg_tbl = pg_tbl;
+ return 0;
+}
+
+static void sbp_calc_data_length_direction(struct sbp_target_request *req,
+ u32 *data_len, enum dma_data_direction *data_dir)
+{
+ int data_size, direction, idx;
+
+ data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+ direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
+
+ if (!data_size) {
+ *data_len = 0;
+ *data_dir = DMA_NONE;
+ return;
+ }
+
+ *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (req->pg_tbl) {
+ *data_len = 0;
+ for (idx = 0; idx < data_size; idx++) {
+ *data_len += be16_to_cpu(
+ req->pg_tbl[idx].segment_length);
+ }
+ } else {
+ *data_len = data_size;
+ }
+}
+
+static void sbp_handle_command(struct sbp_target_request *req)
+{
+ struct sbp_login_descriptor *login = req->login;
+ struct sbp_session *sess = login->sess;
+ int ret, unpacked_lun;
+ u32 data_length;
+ enum dma_data_direction data_dir;
+
+ ret = sbp_fetch_command(req);
+ if (ret) {
+ pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = sbp_fetch_page_table(req);
+ if (ret) {
+ pr_debug("sbp_handle_command: fetch page table failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ unpacked_lun = req->login->lun->unpacked_lun;
+ sbp_calc_data_length_direction(req, &data_length, &data_dir);
+
+ pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
+ req->orb_pointer, unpacked_lun, data_length, data_dir);
+
+ if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ MSG_SIMPLE_TAG, data_dir, 0))
+ goto err;
+
+ return;
+
+err:
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ sbp_free_request(req);
+}
+
+/*
+ * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
+ * DMA_FROM_DEVICE = write to initiator (SCSI READ)
+ */
+static int sbp_rw_data(struct sbp_target_request *req)
+{
+ struct sbp_session *sess = req->login->sess;
+ int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
+ generation, num_pte, length, tfr_length,
+ rcode = RCODE_COMPLETE;
+ struct sbp_page_table_entry *pte;
+ unsigned long long offset;
+ struct fw_card *card;
+ struct sg_mapping_iter iter;
+
+ if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
+ tcode = TCODE_WRITE_BLOCK_REQUEST;
+ sg_miter_flags = SG_MITER_FROM_SG;
+ } else {
+ tcode = TCODE_READ_BLOCK_REQUEST;
+ sg_miter_flags = SG_MITER_TO_SG;
+ }
+
+ max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
+ speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
+
+ pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
+ if (pg_size) {
+ pr_err("sbp_run_transaction: page size ignored\n");
+ pg_size = 0x100 << pg_size;
+ }
+
+ spin_lock_bh(&sess->lock);
+ card = fw_card_get(sess->card);
+ node_id = sess->node_id;
+ generation = sess->generation;
+ spin_unlock_bh(&sess->lock);
+
+ if (req->pg_tbl) {
+ pte = req->pg_tbl;
+ num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+
+ offset = 0;
+ length = 0;
+ } else {
+ pte = NULL;
+ num_pte = 0;
+
+ offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
+ length = req->se_cmd.data_length;
+ }
+
+ sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
+ sg_miter_flags);
+
+ while (length || num_pte) {
+ if (!length) {
+ offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
+ be32_to_cpu(pte->segment_base_lo);
+ length = be16_to_cpu(pte->segment_length);
+
+ pte++;
+ num_pte--;
+ }
+
+ sg_miter_next(&iter);
+
+ tfr_length = min3(length, max_payload, (int)iter.length);
+
+ /* FIXME: take page_size into account */
+
+ rcode = sbp_run_transaction(card, tcode, node_id,
+ generation, speed,
+ offset, iter.addr, tfr_length);
+
+ if (rcode != RCODE_COMPLETE)
+ break;
+
+ length -= tfr_length;
+ offset += tfr_length;
+ iter.consumed = tfr_length;
+ }
+
+ sg_miter_stop(&iter);
+ fw_card_put(card);
+
+ if (rcode == RCODE_COMPLETE) {
+ WARN_ON(length != 0);
+ return 0;
+ } else {
+ return -EIO;
+ }
+}
+
+static int sbp_send_status(struct sbp_target_request *req)
+{
+ int ret, length;
+ struct sbp_login_descriptor *login = req->login;
+
+ length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
+
+ ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+ login->status_fifo_addr, &req->status, length);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
+ return -EIO;
+ }
+
+ pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
+ req->orb_pointer);
+
+ return 0;
+}
+
+static void sbp_sense_mangle(struct sbp_target_request *req)
+{
+ struct se_cmd *se_cmd = &req->se_cmd;
+ u8 *sense = req->sense_buf;
+ u8 *status = req->status.data;
+
+ WARN_ON(se_cmd->scsi_sense_length < 18);
+
+ switch (sense[0] & 0x7f) { /* sfmt */
+ case 0x70: /* current, fixed */
+ status[0] = 0 << 6;
+ break;
+ case 0x71: /* deferred, fixed */
+ status[0] = 1 << 6;
+ break;
+ case 0x72: /* current, descriptor */
+ case 0x73: /* deferred, descriptor */
+ default:
+ /*
+ * TODO: SBP-3 specifies what we should do with descriptor
+ * format sense data
+ */
+ pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
+ sense[0]);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
+ return;
+ }
+
+ status[0] |= se_cmd->scsi_status & 0x3f;/* status */
+ status[1] =
+ (sense[0] & 0x80) | /* valid */
+ ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
+ (sense[2] & 0x0f); /* sense_key */
+ status[2] = se_cmd->scsi_asc; /* sense_code */
+ status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
+
+ /* information */
+ status[4] = sense[3];
+ status[5] = sense[4];
+ status[6] = sense[5];
+ status[7] = sense[6];
+
+ /* CDB-dependent */
+ status[8] = sense[8];
+ status[9] = sense[9];
+ status[10] = sense[10];
+ status[11] = sense[11];
+
+ /* fru */
+ status[12] = sense[14];
+
+ /* sense_key-dependent */
+ status[13] = sense[15];
+ status[14] = sense[16];
+ status[15] = sense[17];
+
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(5) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static int sbp_send_sense(struct sbp_target_request *req)
+{
+ struct se_cmd *se_cmd = &req->se_cmd;
+
+ if (se_cmd->scsi_sense_length) {
+ sbp_sense_mangle(req);
+ } else {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+ }
+
+ return sbp_send_status(req);
+}
+
+static void sbp_free_request(struct sbp_target_request *req)
+{
+ kfree(req->pg_tbl);
+ kfree(req->cmd_buf);
+ kfree(req);
+}
+
+static void sbp_mgt_agent_process(struct work_struct *work)
+{
+ struct sbp_management_agent *agent =
+ container_of(work, struct sbp_management_agent, work);
+ struct sbp_management_request *req = agent->request;
+ int ret;
+ int status_data_len = 0;
+
+ /* fetch the ORB from the initiator */
+ ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ agent->orb_offset, &req->orb, sizeof(req->orb));
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("mgt_orb fetch failed: %x\n", ret);
+ goto out;
+ }
+
+ pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
+ sbp2_pointer_to_addr(&req->orb.ptr1),
+ sbp2_pointer_to_addr(&req->orb.ptr2),
+ be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
+ sbp2_pointer_to_addr(&req->orb.status_fifo));
+
+ if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
+ ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
+ pr_err("mgt_orb bad request\n");
+ goto out;
+ }
+
+ switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
+ case MANAGEMENT_ORB_FUNCTION_LOGIN:
+ sbp_management_request_login(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
+ sbp_management_request_query_logins(agent, req,
+ &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_RECONNECT:
+ sbp_management_request_reconnect(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
+ pr_notice("SET PASSWORD not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_LOGOUT:
+ sbp_management_request_logout(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
+ pr_notice("ABORT TASK not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
+ pr_notice("ABORT TASK SET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
+ pr_notice("LOGICAL UNIT RESET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
+ pr_notice("TARGET RESET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ default:
+ pr_notice("unknown management function 0x%x\n",
+ MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+ }
+
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
+ STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
+ STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
+ req->status.orb_low = cpu_to_be32(agent->orb_offset);
+
+ /* write the status block back to the initiator */
+ ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ sbp2_pointer_to_addr(&req->orb.status_fifo),
+ &req->status, 8 + status_data_len);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("mgt_orb status write failed: %x\n", ret);
+ goto out;
+ }
+
+out:
+ fw_card_put(req->card);
+ kfree(req);
+
+ spin_lock_bh(&agent->lock);
+ agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+ spin_unlock_bh(&agent->lock);
+}
+
+static void sbp_mgt_agent_rw(struct fw_card *card,
+ struct fw_request *request, int tcode, int destination, int source,
+ int generation, unsigned long long offset, void *data, size_t length,
+ void *callback_data)
+{
+ struct sbp_management_agent *agent = callback_data;
+ struct sbp2_pointer *ptr = data;
+ int rcode = RCODE_ADDRESS_ERROR;
+
+ if (!agent->tport->enable)
+ goto out;
+
+ if ((offset != agent->handler.offset) || (length != 8))
+ goto out;
+
+ if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
+ struct sbp_management_request *req;
+ int prev_state;
+
+ spin_lock_bh(&agent->lock);
+ prev_state = agent->state;
+ agent->state = MANAGEMENT_AGENT_STATE_BUSY;
+ spin_unlock_bh(&agent->lock);
+
+ if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
+ pr_notice("ignoring management request while busy\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ goto out;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ rcode = RCODE_CONFLICT_ERROR;
+ goto out;
+ }
+
+ req->card = fw_card_get(card);
+ req->generation = generation;
+ req->node_addr = source;
+ req->speed = fw_get_request_speed(request);
+
+ agent->orb_offset = sbp2_pointer_to_addr(ptr);
+ agent->request = req;
+
+ queue_work(system_unbound_wq, &agent->work);
+ rcode = RCODE_COMPLETE;
+ } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
+ addr_to_sbp2_pointer(agent->orb_offset, ptr);
+ rcode = RCODE_COMPLETE;
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+
+out:
+ fw_send_response(card, request, rcode);
+}
+
+static struct sbp_management_agent *sbp_management_agent_register(
+ struct sbp_tport *tport)
+{
+ int ret;
+ struct sbp_management_agent *agent;
+
+ agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&agent->lock);
+ agent->tport = tport;
+ agent->handler.length = 0x08;
+ agent->handler.address_callback = sbp_mgt_agent_rw;
+ agent->handler.callback_data = agent;
+ agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+ INIT_WORK(&agent->work, sbp_mgt_agent_process);
+ agent->orb_offset = 0;
+ agent->request = NULL;
+
+ ret = fw_core_add_address_handler(&agent->handler,
+ &sbp_register_region);
+ if (ret < 0) {
+ kfree(agent);
+ return ERR_PTR(ret);
+ }
+
+ return agent;
+}
+
+static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
+{
+ fw_core_remove_address_handler(&agent->handler);
+ cancel_work_sync(&agent->work);
+ kfree(agent);
+}
+
+static int sbp_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int sbp_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *sbp_get_fabric_name(void)
+{
+ return "sbp";
+}
+
+static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 sbp_get_tag(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct sbp_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to allocate struct sbp_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void sbp_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct sbp_nacl *nacl =
+ container_of(se_nacl, struct sbp_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void sbp_release_cmd(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ sbp_free_request(req);
+}
+
+static int sbp_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void sbp_close_session(struct se_session *se_sess)
+{
+ return;
+}
+
+static u32 sbp_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int sbp_write_pending(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+ int ret;
+
+ ret = sbp_rw_data(req);
+ if (ret) {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ return ret;
+ }
+
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static int sbp_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ /* only used for printk until we do TMRs */
+ return (u32)req->orb_pointer;
+}
+
+static int sbp_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int sbp_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+ int ret;
+
+ ret = sbp_rw_data(req);
+ if (ret) {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ return ret;
+ }
+
+ return sbp_send_sense(req);
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int sbp_queue_status(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ return sbp_send_sense(req);
+}
+
+static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+}
+
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
+static int sbp_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ transport_generic_free_cmd(&req->se_cmd, 0);
+ return 1;
+}
+
+/*
+ * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
+ */
+static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
+ * This is defined in section 7.5.1 Table 362 in spc4r17
+ */
+ return SCSI_PROTOCOL_SBP;
+}
+
+static u32 sbp_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ int ret;
+
+ /*
+ * Set PROTOCOL IDENTIFIER to 3h for SBP
+ */
+ buf[0] = SCSI_PROTOCOL_SBP;
+ /*
+ * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
+ * over IEEE 1394
+ */
+ ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
+ if (ret < 0)
+ pr_debug("sbp transport_id: invalid hex string\n");
+
+ /*
+ * The IEEE 1394 Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+
+static u32 sbp_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
+ * over IEEE 1394
+ *
+ * The SBP Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+static char *sbp_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
+ * for initiator ports using SCSI over SBP Serial SCSI Protocol
+ *
+ * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
+ * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[8];
+}
+
+static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
+{
+ int i, count = 0;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ struct se_lun *se_lun = tpg->tpg_lun_list[i];
+
+ if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
+ continue;
+
+ count++;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return count;
+}
+
+static int sbp_update_unit_directory(struct sbp_tport *tport)
+{
+ int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
+ u32 *data;
+
+ if (tport->unit_directory.data) {
+ fw_core_remove_descriptor(&tport->unit_directory);
+ kfree(tport->unit_directory.data);
+ tport->unit_directory.data = NULL;
+ }
+
+ if (!tport->enable || !tport->tpg)
+ return 0;
+
+ num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
+
+ /*
+ * Number of entries in the final unit directory:
+ * - all of those in the template
+ * - management_agent
+ * - unit_characteristics
+ * - reconnect_timeout
+ * - unit unique ID
+ * - one for each LUN
+ *
+ * MUST NOT include leaf or sub-directory entries
+ */
+ num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
+
+ if (tport->directory_id != -1)
+ num_entries++;
+
+ /* allocate num_entries + 4 for the header and unique ID leaf */
+ data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* directory_length */
+ data[idx++] = num_entries << 16;
+
+ /* directory_id */
+ if (tport->directory_id != -1)
+ data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
+
+ /* unit directory template */
+ memcpy(&data[idx], sbp_unit_directory_template,
+ sizeof(sbp_unit_directory_template));
+ idx += ARRAY_SIZE(sbp_unit_directory_template);
+
+ /* management_agent */
+ mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
+ data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
+
+ /* unit_characteristics */
+ data[idx++] = 0x3a000000 |
+ (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
+ SBP_ORB_FETCH_SIZE;
+
+ /* reconnect_timeout */
+ data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
+
+ /* unit unique ID (leaf is just after LUNs) */
+ data[idx++] = 0x8d000000 | (num_luns + 1);
+
+ spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
+ struct se_device *dev;
+ int type;
+
+ if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
+ continue;
+
+ spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+
+ dev = se_lun->lun_se_dev;
+ type = dev->transport->get_device_type(dev);
+
+ /* logical_unit_number */
+ data[idx++] = 0x14000000 |
+ ((type << 16) & 0x1f0000) |
+ (se_lun->unpacked_lun & 0xffff);
+
+ spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ }
+ spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+
+ /* unit unique ID leaf */
+ data[idx++] = 2 << 16;
+ data[idx++] = tport->guid >> 32;
+ data[idx++] = tport->guid;
+
+ tport->unit_directory.length = idx;
+ tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
+ tport->unit_directory.data = data;
+
+ ret = fw_core_add_descriptor(&tport->unit_directory);
+ if (ret < 0) {
+ kfree(tport->unit_directory.data);
+ tport->unit_directory.data = NULL;
+ }
+
+ return ret;
+}
+
+static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
+{
+ const char *cp;
+ char c, nibble;
+ int pos = 0, err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (c == '\0') {
+ err = 2;
+ if (pos != 16)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ pos++;
+ }
+ err = 4;
+fail:
+ printk(KERN_INFO "err %u len %zu pos %u\n",
+ err, cp - name, pos);
+ return -1;
+}
+
+static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ return snprintf(buf, len, "%016llx", wwn);
+}
+
+static struct se_node_acl *sbp_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct sbp_nacl *nacl;
+ u64 guid = 0;
+ u32 nexus_depth = 1;
+
+ if (sbp_parse_wwn(name, &guid) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ sbp_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+
+ nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
+ nacl->guid = guid;
+ sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
+
+ return se_nacl;
+}
+
+static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct sbp_nacl *nacl =
+ container_of(se_acl, struct sbp_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+static int sbp_post_link_lun(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+
+ return sbp_update_unit_directory(tpg->tport);
+}
+
+static void sbp_pre_unlink_lun(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ int ret;
+
+ if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
+ tport->enable = 0;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ pr_err("unlink LUN: failed to update unit directory\n");
+}
+
+static struct se_portal_group *sbp_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct sbp_tport *tport =
+ container_of(wwn, struct sbp_tport, tport_wwn);
+
+ struct sbp_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (tport->tpg) {
+ pr_err("Only one TPG per Unit is possible.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct sbp_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+ tport->tpg = tpg;
+
+ /* default attribute values */
+ tport->enable = 0;
+ tport->directory_id = -1;
+ tport->mgt_orb_timeout = 15;
+ tport->max_reconnect_timeout = 5;
+ tport->max_logins_per_lun = 1;
+
+ tport->mgt_agt = sbp_management_agent_register(tport);
+ if (IS_ERR(tport->mgt_agt)) {
+ ret = PTR_ERR(tport->mgt_agt);
+ goto out_free_tpg;
+ }
+
+ ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0)
+ goto out_unreg_mgt_agt;
+
+ return &tpg->se_tpg;
+
+out_unreg_mgt_agt:
+ sbp_management_agent_unregister(tport->mgt_agt);
+out_free_tpg:
+ tport->tpg = NULL;
+ kfree(tpg);
+ return ERR_PTR(ret);
+}
+
+static void sbp_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ core_tpg_deregister(se_tpg);
+ sbp_management_agent_unregister(tport->mgt_agt);
+ tport->tpg = NULL;
+ kfree(tpg);
+}
+
+static struct se_wwn *sbp_make_tport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct sbp_tport *tport;
+ u64 guid = 0;
+
+ if (sbp_parse_wwn(name, &guid) < 0)
+ return ERR_PTR(-EINVAL);
+
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+ if (!tport) {
+ pr_err("Unable to allocate struct sbp_tport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ tport->guid = guid;
+ sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
+
+ return &tport->tport_wwn;
+}
+
+static void sbp_drop_tport(struct se_wwn *wwn)
+{
+ struct sbp_tport *tport =
+ container_of(wwn, struct sbp_tport, tport_wwn);
+
+ kfree(tport);
+}
+
+static ssize_t sbp_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
+}
+
+TF_WWN_ATTR_RO(sbp, version);
+
+static struct configfs_attribute *sbp_wwn_attrs[] = {
+ &sbp_wwn_version.attr,
+ NULL,
+};
+
+static ssize_t sbp_tpg_show_directory_id(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ if (tport->directory_id == -1)
+ return sprintf(page, "implicit\n");
+ else
+ return sprintf(page, "%06x\n", tport->directory_id);
+}
+
+static ssize_t sbp_tpg_store_directory_id(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+
+ if (tport->enable) {
+ pr_err("Cannot change the directory_id on an active target.\n");
+ return -EBUSY;
+ }
+
+ if (strstr(page, "implicit") == page) {
+ tport->directory_id = -1;
+ } else {
+ if (kstrtoul(page, 16, &val) < 0)
+ return -EINVAL;
+ if (val > 0xffffff)
+ return -EINVAL;
+
+ tport->directory_id = val;
+ }
+
+ return count;
+}
+
+static ssize_t sbp_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->enable);
+}
+
+static ssize_t sbp_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (tport->enable == val)
+ return count;
+
+ if (val) {
+ if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
+ pr_err("Cannot enable a target with no LUNs!\n");
+ return -EINVAL;
+ }
+ } else {
+ /* XXX: force-shutdown sessions instead? */
+ spin_lock_bh(&se_tpg->session_lock);
+ if (!list_empty(&se_tpg->tpg_sess_list)) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return -EBUSY;
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ }
+
+ tport->enable = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0) {
+ pr_err("Could not update Config ROM\n");
+ return ret;
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *sbp_tpg_base_attrs[] = {
+ &sbp_tpg_directory_id.attr,
+ &sbp_tpg_enable.attr,
+ NULL,
+};
+
+static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->mgt_orb_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 127))
+ return -EINVAL;
+
+ if (tport->mgt_orb_timeout == val)
+ return count;
+
+ tport->mgt_orb_timeout = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->max_reconnect_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 32767))
+ return -EINVAL;
+
+ if (tport->max_reconnect_timeout == val)
+ return count;
+
+ tport->max_reconnect_timeout = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->max_logins_per_lun);
+}
+
+static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 127))
+ return -EINVAL;
+
+ /* XXX: also check against current count? */
+
+ tport->max_logins_per_lun = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
+TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
+TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
+ &sbp_tpg_attrib_mgt_orb_timeout.attr,
+ &sbp_tpg_attrib_max_reconnect_timeout.attr,
+ &sbp_tpg_attrib_max_logins_per_lun.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops sbp_ops = {
+ .get_fabric_name = sbp_get_fabric_name,
+ .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
+ .tpg_get_wwn = sbp_get_fabric_wwn,
+ .tpg_get_tag = sbp_get_tag,
+ .tpg_get_default_depth = sbp_get_default_depth,
+ .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = sbp_check_true,
+ .tpg_check_demo_mode_cache = sbp_check_true,
+ .tpg_check_demo_mode_write_protect = sbp_check_false,
+ .tpg_check_prod_mode_write_protect = sbp_check_false,
+ .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
+ .tpg_release_fabric_acl = sbp_release_fabric_acl,
+ .tpg_get_inst_index = sbp_tpg_get_inst_index,
+ .release_cmd = sbp_release_cmd,
+ .shutdown_session = sbp_shutdown_session,
+ .close_session = sbp_close_session,
+ .sess_get_index = sbp_sess_get_index,
+ .write_pending = sbp_write_pending,
+ .write_pending_status = sbp_write_pending_status,
+ .set_default_node_attributes = sbp_set_default_node_attrs,
+ .get_task_tag = sbp_get_task_tag,
+ .get_cmd_state = sbp_get_cmd_state,
+ .queue_data_in = sbp_queue_data_in,
+ .queue_status = sbp_queue_status,
+ .queue_tm_rsp = sbp_queue_tm_rsp,
+ .aborted_task = sbp_aborted_task,
+ .check_stop_free = sbp_check_stop_free,
+
+ .fabric_make_wwn = sbp_make_tport,
+ .fabric_drop_wwn = sbp_drop_tport,
+ .fabric_make_tpg = sbp_make_tpg,
+ .fabric_drop_tpg = sbp_drop_tpg,
+ .fabric_post_link = sbp_post_link_lun,
+ .fabric_pre_unlink = sbp_pre_unlink_lun,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = sbp_make_nodeacl,
+ .fabric_drop_nodeacl = sbp_drop_nodeacl,
+};
+
+static int sbp_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+
+ fabric->tf_ops = sbp_ops;
+
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for SBP\n");
+ return ret;
+ }
+
+ sbp_fabric_configfs = fabric;
+
+ return 0;
+};
+
+static void sbp_deregister_configfs(void)
+{
+ if (!sbp_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(sbp_fabric_configfs);
+ sbp_fabric_configfs = NULL;
+};
+
+static int __init sbp_init(void)
+{
+ int ret;
+
+ ret = sbp_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+};
+
+static void __exit sbp_exit(void)
+{
+ sbp_deregister_configfs();
+};
+
+MODULE_DESCRIPTION("FireWire SBP fabric driver");
+MODULE_LICENSE("GPL");
+module_init(sbp_init);
+module_exit(sbp_exit);
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
new file mode 100644
index 00000000000..6d0d74a2c54
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.h
@@ -0,0 +1,251 @@
+#ifndef _SBP_BASE_H
+#define _SBP_BASE_H
+
+#include <linux/firewire.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <target/target_core_base.h>
+
+#define SBP_VERSION "v0.1"
+#define SBP_NAMELEN 32
+
+#define SBP_ORB_FETCH_SIZE 8
+
+#define MANAGEMENT_AGENT_STATE_IDLE 0
+#define MANAGEMENT_AGENT_STATE_BUSY 1
+
+#define ORB_NOTIFY(v) (((v) >> 31) & 0x01)
+#define ORB_REQUEST_FORMAT(v) (((v) >> 29) & 0x03)
+
+#define MANAGEMENT_ORB_FUNCTION(v) (((v) >> 16) & 0x0f)
+
+#define MANAGEMENT_ORB_FUNCTION_LOGIN 0x0
+#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS 0x1
+#define MANAGEMENT_ORB_FUNCTION_RECONNECT 0x3
+#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD 0x4
+#define MANAGEMENT_ORB_FUNCTION_LOGOUT 0x7
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK 0xb
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET 0xc
+#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET 0xe
+#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET 0xf
+
+#define LOGIN_ORB_EXCLUSIVE(v) (((v) >> 28) & 0x01)
+#define LOGIN_ORB_RESERVED(v) (((v) >> 24) & 0x0f)
+#define LOGIN_ORB_RECONNECT(v) (((v) >> 20) & 0x0f)
+#define LOGIN_ORB_LUN(v) (((v) >> 0) & 0xffff)
+#define LOGIN_ORB_PASSWORD_LENGTH(v) (((v) >> 16) & 0xffff)
+#define LOGIN_ORB_RESPONSE_LENGTH(v) (((v) >> 0) & 0xffff)
+
+#define RECONNECT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
+#define LOGOUT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
+
+#define CMDBLK_ORB_DIRECTION(v) (((v) >> 27) & 0x01)
+#define CMDBLK_ORB_SPEED(v) (((v) >> 24) & 0x07)
+#define CMDBLK_ORB_MAX_PAYLOAD(v) (((v) >> 20) & 0x0f)
+#define CMDBLK_ORB_PG_TBL_PRESENT(v) (((v) >> 19) & 0x01)
+#define CMDBLK_ORB_PG_SIZE(v) (((v) >> 16) & 0x07)
+#define CMDBLK_ORB_DATA_SIZE(v) (((v) >> 0) & 0xffff)
+
+#define STATUS_BLOCK_SRC(v) (((v) & 0x03) << 30)
+#define STATUS_BLOCK_RESP(v) (((v) & 0x03) << 28)
+#define STATUS_BLOCK_DEAD(v) (((v) ? 1 : 0) << 27)
+#define STATUS_BLOCK_LEN(v) (((v) & 0x07) << 24)
+#define STATUS_BLOCK_SBP_STATUS(v) (((v) & 0xff) << 16)
+#define STATUS_BLOCK_ORB_OFFSET_HIGH(v) (((v) & 0xffff) << 0)
+
+#define STATUS_SRC_ORB_CONTINUING 0
+#define STATUS_SRC_ORB_FINISHED 1
+#define STATUS_SRC_UNSOLICITED 2
+
+#define STATUS_RESP_REQUEST_COMPLETE 0
+#define STATUS_RESP_TRANSPORT_FAILURE 1
+#define STATUS_RESP_ILLEGAL_REQUEST 2
+#define STATUS_RESP_VENDOR_DEPENDENT 3
+
+#define SBP_STATUS_OK 0
+#define SBP_STATUS_REQ_TYPE_NOTSUPP 1
+#define SBP_STATUS_SPEED_NOTSUPP 2
+#define SBP_STATUS_PAGE_SIZE_NOTSUPP 3
+#define SBP_STATUS_ACCESS_DENIED 4
+#define SBP_STATUS_LUN_NOTSUPP 5
+#define SBP_STATUS_PAYLOAD_TOO_SMALL 6
+/* 7 is reserved */
+#define SBP_STATUS_RESOURCES_UNAVAIL 8
+#define SBP_STATUS_FUNCTION_REJECTED 9
+#define SBP_STATUS_LOGIN_ID_UNKNOWN 10
+#define SBP_STATUS_DUMMY_ORB_COMPLETE 11
+#define SBP_STATUS_REQUEST_ABORTED 12
+#define SBP_STATUS_UNSPECIFIED_ERROR 0xff
+
+#define AGENT_STATE_RESET 0
+#define AGENT_STATE_ACTIVE 1
+#define AGENT_STATE_SUSPENDED 2
+#define AGENT_STATE_DEAD 3
+
+struct sbp2_pointer {
+ __be32 high;
+ __be32 low;
+};
+
+struct sbp_command_block_orb {
+ struct sbp2_pointer next_orb;
+ struct sbp2_pointer data_descriptor;
+ __be32 misc;
+ u8 command_block[12];
+};
+
+struct sbp_page_table_entry {
+ __be16 segment_length;
+ __be16 segment_base_hi;
+ __be32 segment_base_lo;
+};
+
+struct sbp_management_orb {
+ struct sbp2_pointer ptr1;
+ struct sbp2_pointer ptr2;
+ __be32 misc;
+ __be32 length;
+ struct sbp2_pointer status_fifo;
+};
+
+struct sbp_status_block {
+ __be32 status;
+ __be32 orb_low;
+ u8 data[24];
+};
+
+struct sbp_login_response_block {
+ __be32 misc;
+ struct sbp2_pointer command_block_agent;
+ __be32 reconnect_hold;
+};
+
+struct sbp_login_descriptor {
+ struct sbp_session *sess;
+ struct list_head link;
+
+ struct se_lun *lun;
+
+ u64 status_fifo_addr;
+ int exclusive;
+ u16 login_id;
+
+ struct sbp_target_agent *tgt_agt;
+};
+
+struct sbp_session {
+ spinlock_t lock;
+ struct se_session *se_sess;
+ struct list_head login_list;
+ struct delayed_work maint_work;
+
+ u64 guid; /* login_owner_EUI_64 */
+ int node_id; /* login_owner_ID */
+
+ struct fw_card *card;
+ int generation;
+ int speed;
+
+ int reconnect_hold;
+ u64 reconnect_expires;
+};
+
+struct sbp_nacl {
+ /* Initiator EUI-64 */
+ u64 guid;
+ /* ASCII formatted GUID for SBP Initiator port */
+ char iport_name[SBP_NAMELEN];
+ /* Returned by sbp_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct sbp_tpg {
+ /* Target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Pointer back to sbp_tport */
+ struct sbp_tport *tport;
+ /* Returned by sbp_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+struct sbp_tport {
+ /* Target Unit Identifier (EUI-64) */
+ u64 guid;
+ /* Target port name */
+ char tport_name[SBP_NAMELEN];
+ /* Returned by sbp_make_tport() */
+ struct se_wwn tport_wwn;
+
+ struct sbp_tpg *tpg;
+
+ /* FireWire unit directory */
+ struct fw_descriptor unit_directory;
+
+ /* SBP Management Agent */
+ struct sbp_management_agent *mgt_agt;
+
+ /* Parameters */
+ int enable;
+ s32 directory_id;
+ int mgt_orb_timeout;
+ int max_reconnect_timeout;
+ int max_logins_per_lun;
+};
+
+static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr)
+{
+ return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 |
+ (be32_to_cpu(ptr->low) & 0xfffffffc);
+}
+
+static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr)
+{
+ ptr->high = cpu_to_be32(addr >> 32);
+ ptr->low = cpu_to_be32(addr);
+}
+
+struct sbp_target_agent {
+ spinlock_t lock;
+ struct fw_address_handler handler;
+ struct sbp_login_descriptor *login;
+ int state;
+ struct work_struct work;
+ u64 orb_pointer;
+ bool doorbell;
+};
+
+struct sbp_target_request {
+ struct sbp_login_descriptor *login;
+ u64 orb_pointer;
+ struct sbp_command_block_orb orb;
+ struct sbp_status_block status;
+ struct work_struct work;
+
+ struct se_cmd se_cmd;
+ struct sbp_page_table_entry *pg_tbl;
+ void *cmd_buf;
+
+ unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct sbp_management_agent {
+ spinlock_t lock;
+ struct sbp_tport *tport;
+ struct fw_address_handler handler;
+ int state;
+ struct work_struct work;
+ u64 orb_offset;
+ struct sbp_management_request *request;
+};
+
+struct sbp_management_request {
+ struct sbp_management_orb orb;
+ struct sbp_status_block status;
+ struct fw_card *card;
+ int generation;
+ int node_addr;
+ int speed;
+};
+
+#endif
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 01a2691dfb4..fbc5ebb5f76 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
- * Copyright (c) 2009-2010 Rising Tide Systems
- * Copyright (c) 2009-2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -28,8 +27,10 @@
#include <linux/spinlock.h>
#include <linux/configfs.h>
#include <linux/export.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -40,10 +41,13 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
-static int core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int valid,
+ int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port, int explict, int offline);
+ struct se_port *port, int explicit, int offline);
+
+static char *core_alua_dump_state(int state);
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -54,34 +58,122 @@ static LIST_HEAD(lu_gps_list);
struct t10_alua_lu_gp *default_lu_gp;
/*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *map_mem;
+ unsigned char *buf;
+ u32 rd_len = 0, off;
+
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT REFERRALS allocation length %u too"
+ " small\n", cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ off = 4;
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ transport_kunmap_data_sg(cmd);
+
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ int desc_num = off + 3;
+ int pg_num;
+
+ off += 4;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+ off += 8;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+ off += 8;
+ rd_len += 20;
+ pg_num = 0;
+ list_for_each_entry(map_mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ int alua_state = map_mem->lba_map_mem_alua_state;
+ int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+ if (cmd->data_length > off)
+ buf[off] = alua_state & 0x0f;
+ off += 2;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id >> 8) & 0xff;
+ off++;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id & 0xff);
+ off++;
+ rd_len += 4;
+ pg_num++;
+ }
+ if (cmd->data_length > desc_num)
+ buf[desc_num] = pg_num;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ put_unaligned_be16(rd_len, &buf[2]);
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
*/
-int target_emulate_report_target_port_groups(struct se_task *task)
+sense_reason_t
+target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf;
- u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
- Target port group descriptor */
+ u32 rd_len = 0, off;
+ int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
+
/*
- * Need at least 4 bytes of response data or else we can't
- * even fit the return data length.
+ * Skip over RESERVED area to first Target port group descriptor
+ * depending on the PARAMETER DATA FORMAT type..
*/
- if (cmd->data_length < 4) {
- pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
- " too small\n", cmd->data_length);
- return -EINVAL;
- }
+ if (ext_hdr != 0)
+ off = 8;
+ else
+ off = 4;
+ if (cmd->data_length < off) {
+ pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
+ " small for %s header\n", cmd->data_length,
+ (ext_hdr) ? "extended" : "normal");
+ return TCM_INVALID_CDB_FIELD;
+ }
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* Check if the Target port group and Target port descriptor list
@@ -108,12 +200,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
- buf[off] = 0x80; /* T_SUP */
- buf[off] |= 0x40; /* O_SUP */
- buf[off] |= 0x8; /* U_SUP */
- buf[off] |= 0x4; /* S_SUP */
- buf[off] |= 0x2; /* AN_SUP */
- buf[off++] |= 0x1; /* AO_SUP */
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
/*
* TARGET PORT GROUP
*/
@@ -154,57 +241,82 @@ int target_emulate_report_target_port_groups(struct se_task *task)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
- buf[0] = ((rd_len >> 24) & 0xff);
- buf[1] = ((rd_len >> 16) & 0xff);
- buf[2] = ((rd_len >> 8) & 0xff);
- buf[3] = (rd_len & 0xff);
+ put_unaligned_be32(rd_len, &buf[0]);
+ /*
+ * Fill in the Extended header parameter data format if requested
+ */
+ if (ext_hdr != 0) {
+ buf[4] = 0x10;
+ /*
+ * Set the implicit transition time (in seconds) for the application
+ * client to use as a base for it's transition timeout value.
+ *
+ * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
+ * this CDB was received upon to determine this value individually
+ * for ALUA target port group.
+ */
+ port = cmd->se_lun->lun_sep;
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (tg_pt_gp_mem) {
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (tg_pt_gp)
+ buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ }
+ }
transport_kunmap_data_sg(cmd);
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
+ target_complete_cmd(cmd, GOOD);
return 0;
}
/*
- * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
*
* See spc4r17 section 6.35
*/
-int target_emulate_set_target_port_groups(struct se_task *task)
+sense_reason_t
+target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
+ sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0, rc;
+ int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
- if (!l_port) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ if (!l_port)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (cmd->data_length < 4) {
+ pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
+ " small\n", cmd->data_length);
+ return TCM_INVALID_PARAMETER_LIST;
}
+
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
- * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -212,32 +324,32 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
- rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!rc) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
- " while TPGS_EXPLICT_ALUA is disabled\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ " while TPGS_EXPLICIT_ALUA is disabled\n");
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
+ bool found = false;
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state, &primary);
- if (rc != 0) {
+ rc = core_alua_check_transition(alua_access_state,
+ valid_states, &primary);
+ if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
@@ -248,11 +360,9 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
goto out;
}
- rc = -1;
+
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
@@ -267,15 +377,14 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* changed.
*/
if (primary) {
- tg_pt_id = ((ptr[2] << 8) & 0xff);
- tg_pt_id |= (ptr[3] & 0xff);
+ tg_pt_id = get_unaligned_be16(ptr + 2);
/*
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
@@ -284,38 +393,30 @@ int target_emulate_set_target_port_groups(struct se_task *task)
continue;
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ smp_mb__after_atomic();
+
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- rc = core_alua_do_port_transition(tg_pt_gp,
+ if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
- alua_access_state, 1);
+ alua_access_state, 1))
+ found = true;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
break;
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
- /*
- * If not matching target port group ID can be located
- * throw an exception with ASCQ: INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
/*
- * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+ * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the the incoming
* SET_TARGET_PORT_GROUPS op.
*/
- rtpi = ((ptr[2] << 8) & 0xff);
- rtpi |= (ptr[3] & 0xff);
+ rtpi = get_unaligned_be16(ptr + 2);
/*
- * Locate the matching relative target port identifer
+ * Locate the matching relative target port identifier
* for the struct se_device storage object.
*/
spin_lock(&dev->se_port_lock);
@@ -325,25 +426,22 @@ int target_emulate_set_target_port_groups(struct se_task *task)
continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+
spin_unlock(&dev->se_port_lock);
- rc = core_alua_set_tg_pt_secondary_state(
- tg_pt_gp_mem, port, 1, 1);
+ if (!core_alua_set_tg_pt_secondary_state(
+ tg_pt_gp_mem, port, 1, 1))
+ found = true;
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
- /*
- * If not matching relative target port identifier can
- * be located, throw an exception with ASCQ:
- * INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ }
+
+ if (!found) {
+ rc = TCM_INVALID_PARAMETER_LIST;
+ goto out;
}
ptr += 4;
@@ -352,16 +450,31 @@ int target_emulate_set_target_port_groups(struct se_task *task)
out:
transport_kunmap_data_sg(cmd);
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
+ if (!rc)
+ target_complete_cmd(cmd, GOOD);
+ return rc;
}
-static inline int core_alua_state_nonoptimized(
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ cmd->scsi_asc = 0x04;
+ cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
- int nonop_delay_msecs,
- u8 *alua_ascq)
+ int nonop_delay_msecs)
{
/*
* Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -370,13 +483,85 @@ static inline int core_alua_state_nonoptimized(
*/
cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
cmd->alua_nonop_delay = nonop_delay_msecs;
+}
+
+static inline int core_alua_state_lba_dependent(
+ struct se_cmd *cmd,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ struct se_device *dev = cmd->se_dev;
+ u64 segment_size, segment_mult, sectors, lba;
+
+ /* Only need to check for cdb actually containing LBAs */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+ return 0;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ segment_size = dev->t10_alua.lba_map_segment_size;
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ lba = cmd->t_task_lba;
+ while (lba < cmd->t_task_lba + sectors) {
+ struct t10_alua_lba_map *cur_map = NULL, *map;
+ struct t10_alua_lba_map_member *map_mem;
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ u64 start_lba, last_lba;
+ u64 first_lba = map->lba_map_first_lba;
+
+ if (segment_mult) {
+ u64 tmp = lba;
+ start_lba = do_div(tmp, segment_size * segment_mult);
+
+ last_lba = first_lba + segment_size - 1;
+ if (start_lba >= first_lba &&
+ start_lba <= last_lba) {
+ lba += segment_size;
+ cur_map = map;
+ break;
+ }
+ } else {
+ last_lba = map->lba_map_last_lba;
+ if (lba >= first_lba && lba <= last_lba) {
+ lba = last_lba + 1;
+ cur_map = map;
+ break;
+ }
+ }
+ }
+ if (!cur_map) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+ return 1;
+ }
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (map_mem->lba_map_mem_alua_pg_id !=
+ tg_pt_gp->tg_pt_gp_id)
+ continue;
+ switch(map_mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_STANDBY:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+ return 1;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
+ return 1;
+ default:
+ break;
+ }
+ }
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
return 0;
}
static inline int core_alua_state_standby(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -391,12 +576,22 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
+ case READ_CAPACITY:
+ return 0;
+ case SERVICE_ACTION_IN:
+ switch (cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ return 0;
+ default:
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
+ return 1;
+ }
case MAINTENANCE_IN:
- switch (cdb[1]) {
+ switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
case MAINTENANCE_OUT:
@@ -404,7 +599,7 @@ static inline int core_alua_state_standby(
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
case REQUEST_SENSE:
@@ -414,7 +609,7 @@ static inline int core_alua_state_standby(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
return 1;
}
@@ -423,8 +618,7 @@ static inline int core_alua_state_standby(
static inline int core_alua_state_unavailable(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -433,12 +627,13 @@ static inline int core_alua_state_unavailable(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
+ return 0;
case MAINTENANCE_IN:
- switch (cdb[1]) {
+ switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
case MAINTENANCE_OUT:
@@ -446,7 +641,7 @@ static inline int core_alua_state_unavailable(
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
case REQUEST_SENSE:
@@ -454,7 +649,7 @@ static inline int core_alua_state_unavailable(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
return 1;
}
@@ -463,22 +658,22 @@ static inline int core_alua_state_unavailable(
static inline int core_alua_state_transition(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
- * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
+ return 0;
case MAINTENANCE_IN:
- switch (cdb[1]) {
+ switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
return 1;
}
case REQUEST_SENSE:
@@ -486,7 +681,7 @@ static inline int core_alua_state_transition(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
return 1;
}
@@ -494,41 +689,26 @@ static inline int core_alua_state_transition(
}
/*
- * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
- * in transport_cmd_sequencer(). This function is assigned to
- * struct t10_alua *->state_check() in core_setup_alua()
- */
-static int core_alua_state_check_nop(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
-{
- return 0;
-}
-
-/*
- * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
- * This function is assigned to struct t10_alua *->state_check() in
- * core_setup_alua()
- *
- * Also, this function can return three different return codes to
- * signal transport_generic_cmd_sequencer()
- *
- * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 1: Is used to signal LUN not accessible, and check condition/not ready
* return 0: Used to signal success
- * reutrn -1: Used to signal failure, and invalid cdb field
+ * return -1: Used to signal failure, and invalid cdb field
*/
-static int core_alua_state_check(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+sense_reason_t
+target_alua_state_check(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
+
if (!port)
return 0;
/*
@@ -536,11 +716,10 @@ static int core_alua_state_check(
* access state: OFFLINE
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- return 1;
+ set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+ return TCM_CHECK_CONDITION_NOT_READY;
}
/*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -549,30 +728,43 @@ static int core_alua_state_check(
* a ALUA logical unit group.
*/
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return 0;
+
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
/*
- * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
- if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
return 0;
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
- return core_alua_state_nonoptimized(cmd, cdb,
- nonop_delay_msecs, alua_ascq);
+ core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
+ break;
case ALUA_ACCESS_STATE_STANDBY:
- return core_alua_state_standby(cmd, cdb, alua_ascq);
+ if (core_alua_state_standby(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+ if (core_alua_state_unavailable(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
+ break;
case ALUA_ACCESS_STATE_TRANSITION:
- return core_alua_state_transition(cmd, cdb, alua_ascq);
+ if (core_alua_state_transition(cmd, cdb))
+ return TCM_CHECK_CONDITION_NOT_READY;
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+ return TCM_CHECK_CONDITION_NOT_READY;
+ break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -581,26 +773,46 @@ static int core_alua_state_check(
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
return 0;
}
/*
- * Check implict and explict ALUA state change request.
+ * Check implicit and explicit ALUA state change request.
*/
-static int core_alua_check_transition(int state, int *primary)
+static sense_reason_t
+core_alua_check_transition(int state, int valid, int *primary)
{
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ if (!(valid & ALUA_AO_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ if (!(valid & ALUA_AN_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_STANDBY:
+ if (!(valid & ALUA_S_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- /*
- * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
- * defined as primary target port asymmetric access states.
- */
+ if (!(valid & ALUA_U_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ if (!(valid & ALUA_LBD_SUP))
+ goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
@@ -608,29 +820,46 @@ static int core_alua_check_transition(int state, int *primary)
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
+ if (!(valid & ALUA_O_SUP))
+ goto not_supported;
*primary = 0;
break;
+ case ALUA_ACCESS_STATE_TRANSITION:
+ /*
+ * Transitioning is set internally, and
+ * cannot be selected manually.
+ */
+ goto not_supported;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
return 0;
+
+not_supported:
+ pr_err("ALUA access state %s not supported",
+ core_alua_dump_state(state));
+ return TCM_INVALID_PARAMETER_LIST;
}
static char *core_alua_dump_state(int state)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ return "LBA Dependent";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return "Transitioning";
default:
return "Unknown";
}
@@ -643,10 +872,10 @@ char *core_alua_dump_status(int status)
switch (status) {
case ALUA_STATUS_NONE:
return "None";
- case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
- return "Altered by Explict STPG";
- case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
- return "Altered by Implict ALUA";
+ case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
+ return "Altered by Explicit STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
+ return "Altered by Implicit ALUA";
default:
return "Unknown";
}
@@ -689,95 +918,67 @@ static int core_alua_write_tpg_metadata(
unsigned char *md_buf,
u32 md_buf_len)
{
- mm_segment_t old_fs;
- struct file *file;
- struct iovec iov[1];
- int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
-
- memset(iov, 0, sizeof(struct iovec));
+ struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ int ret;
- file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
- pr_err("filp_open(%s) for ALUA metadata failed\n",
- path);
+ if (IS_ERR(file)) {
+ pr_err("filp_open(%s) for ALUA metadata failed\n", path);
return -ENODEV;
}
-
- iov[0].iov_base = &md_buf[0];
- iov[0].iov_len = md_buf_len;
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
-
- if (ret < 0) {
+ ret = kernel_write(file, md_buf, md_buf_len, 0);
+ if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
-
- return 0;
+ fput(file);
+ return (ret < 0) ? -EIO : 0;
}
/*
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- int primary_state,
- unsigned char *md_buf)
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- struct t10_wwn *wwn = &su_dev->t10_wwn;
+ unsigned char *md_buf;
+ struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
- len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
- tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_id,
+ tg_pt_gp->tg_pt_gp_alua_pending_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+ return rc;
}
-static int core_alua_do_transition_tg_pt(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- struct se_port *l_port,
- struct se_node_acl *nacl,
- unsigned char *md_buf,
- int new_state,
- int explict)
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
struct se_port *port;
struct t10_alua_tg_pt_gp_member *mem;
- int old_state = 0;
- /*
- * Save the old primary ALUA access state, and set the current state
- * to ALUA_ACCESS_STATE_TRANSITION.
- */
- old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
- /*
- * Check for the optional ALUA primary state transition delay
- */
- if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
- msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+ bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
@@ -788,7 +989,7 @@ static int core_alua_do_transition_tg_pt(
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
- * ACCESS STATE CHAGED.
+ * ACCESS STATE CHANGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
@@ -798,7 +999,7 @@ static int core_alua_do_transition_tg_pt(
* TARGET PORT GROUPS command
*/
atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
spin_lock_bh(&port->sep_alua_lock);
@@ -807,14 +1008,17 @@ static int core_alua_do_transition_tg_pt(
lacl = se_deve->se_lun_acl;
/*
* se_deve->se_lun_acl pointer may be NULL for a
- * entry created without explict Node+MappedLUN ACLs
+ * entry created without explicit Node+MappedLUN ACLs
*/
if (!lacl)
continue;
- if (explict &&
- (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
- (l_port != NULL) && (l_port == port))
+ if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
+ (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_port == port))
continue;
core_scsi3_ua_allocate(lacl->se_lun_nacl,
@@ -825,7 +1029,7 @@ static int core_alua_do_transition_tg_pt(
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
@@ -842,20 +1046,102 @@ static int core_alua_do_transition_tg_pt(
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
- core_alua_update_tpg_primary_metadata(tg_pt_gp,
- new_state, md_buf);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ tg_pt_gp->tg_pt_gp_alua_pending_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " from primary access state %s to %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
- tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
- core_alua_dump_state(new_state));
+ " from primary access state %s to %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (tg_pt_gp->tg_pt_gp_transition_complete)
+ complete(tg_pt_gp->tg_pt_gp_transition_complete);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int new_state,
+ int explicit)
+{
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ /* Nothing to be done here */
+ if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+ return 0;
+
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return -EAGAIN;
+
+ /*
+ * Flush any pending transitions
+ */
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
+ ALUA_ACCESS_STATE_TRANSITION) {
+ /* Just in case */
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ return 0;
+ }
+
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_TRANSITION);
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ /*
+ * Take a reference for workqueue item
+ */
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
+ unsigned long transition_tmo;
+
+ transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work,
+ transition_tmo);
+ } else {
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work, 0);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ }
return 0;
}
@@ -866,32 +1152,23 @@ int core_alua_do_port_transition(
struct se_port *l_port,
struct se_node_acl *l_nacl,
int new_state,
- int explict)
+ int explicit)
{
struct se_device *dev;
- struct se_port *port;
- struct se_subsystem_dev *su_dev;
- struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- int primary;
+ int primary, valid_states, rc = 0;
- if (core_alua_check_transition(new_state, &primary) != 0)
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
return -EINVAL;
- md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate buf for ALUA metadata\n");
- return -ENOMEM;
- }
-
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/*
* For storage objects that are members of the 'default_lu_gp',
@@ -903,12 +1180,13 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explict);
+ l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+ rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+ new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ smp_mb__after_atomic();
+ return rc;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
@@ -920,21 +1198,20 @@ int core_alua_do_port_transition(
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
- * is changed for any target port group accessiable via
+ * is changed for any target port group accessible via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
@@ -944,44 +1221,48 @@ int core_alua_do_port_transition(
continue;
if (l_tg_pt_gp == tg_pt_gp) {
- port = l_port;
- nacl = l_nacl;
+ tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
- port = NULL;
- nacl = NULL;
+ tg_pt_gp->tg_pt_gp_alua_port = NULL;
+ tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ smp_mb__after_atomic();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explict);
+ rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+ new_state, explicit);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
+ if (rc)
+ break;
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&lu_gp->lu_gp_lock);
- pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
- " Group IDs: %hu %s transition to primary state: %s\n",
- config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
- core_alua_dump_state(new_state));
+ if (!rc) {
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id,
+ (explicit) ? "explicit" : "implicit",
+ core_alua_dump_state(new_state));
+ }
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ smp_mb__after_atomic();
+ return rc;
}
/*
@@ -989,13 +1270,18 @@ int core_alua_do_port_transition(
*/
static int core_alua_update_tpg_secondary_metadata(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
- unsigned char *md_buf,
- u32 md_buf_len)
+ struct se_port *port)
{
+ unsigned char *md_buf;
struct se_portal_group *se_tpg = port->sep_tpg;
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
@@ -1007,7 +1293,7 @@ static int core_alua_update_tpg_secondary_metadata(
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&port->sep_tg_pt_secondary_offline),
port->sep_tg_pt_secondary_stat);
@@ -1016,18 +1302,19 @@ static int core_alua_update_tpg_secondary_metadata(
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+
+ return rc;
}
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port,
- int explict,
+ int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- u32 md_buf_len;
int trans_delay_msecs;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1048,14 +1335,13 @@ static int core_alua_set_tg_pt_secondary_state(
else
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
- md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
- port->sep_tg_pt_secondary_stat = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ port->sep_tg_pt_secondary_stat = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " to secondary access state: %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " to secondary access state: %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1070,23 +1356,115 @@ static int core_alua_set_tg_pt_secondary_state(
* secondary state and status
*/
if (port->sep_tg_pt_secondary_write_md) {
- md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate md_buf for"
- " secondary ALUA access metadata\n");
- return -ENOMEM;
- }
mutex_lock(&port->sep_tg_pt_md_mutex);
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
- md_buf, md_buf_len);
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
mutex_unlock(&port->sep_tg_pt_md_mutex);
+ }
+
+ return 0;
+}
+
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+ u64 first_lba, u64 last_lba)
+{
+ struct t10_alua_lba_map *lba_map;
+
+ lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+ if (!lba_map) {
+ pr_err("Unable to allocate struct t10_alua_lba_map\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+ lba_map->lba_map_first_lba = first_lba;
+ lba_map->lba_map_last_lba = last_lba;
+
+ list_add_tail(&lba_map->lba_map_list, list);
+ return lba_map;
+}
- kfree(md_buf);
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+ int pg_id, int state)
+{
+ struct t10_alua_lba_map_member *lba_map_mem;
+
+ list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+ pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+ return -EINVAL;
+ }
}
+ lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+ if (!lba_map_mem) {
+ pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+ return -ENOMEM;
+ }
+ lba_map_mem->lba_map_mem_alua_state = state;
+ lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+
+ list_add_tail(&lba_map_mem->lba_map_mem_list,
+ &lba_map->lba_map_mem_list);
return 0;
}
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+ struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+ struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+ list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+ lba_map_list) {
+ list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+ &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ list_del(&lba_map_mem->lba_map_mem_list);
+ kmem_cache_free(t10_alua_lba_map_mem_cache,
+ lba_map_mem);
+ }
+ list_del(&lba_map->lba_map_list);
+ kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+ }
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+ int segment_size, int segment_mult)
+{
+ struct list_head old_lba_map_list;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ int activate = 0, supported;
+
+ INIT_LIST_HEAD(&old_lba_map_list);
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ dev->t10_alua.lba_map_segment_size = segment_size;
+ dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+ list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+ if (lba_map_list) {
+ list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+ activate = 1;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
+ continue;
+ supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (activate)
+ supported |= ALUA_LBD_SUP;
+ else
+ supported &= ~ALUA_LBD_SUP;
+ tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+ }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ core_alua_free_lba_map(&old_lba_map_list);
+}
+
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
@@ -1220,7 +1598,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
- * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ * we want to re-associate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != default_lu_gp)
@@ -1239,14 +1617,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return;
@@ -1329,10 +1702,8 @@ void __core_alua_drop_lu_gp_mem(
spin_unlock(&lu_gp->lu_gp_lock);
}
-struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *su_dev,
- const char *name,
- int def_group)
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
+ const char *name, int def_group)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1346,30 +1717,39 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- tg_pt_gp->tg_pt_gp_su_dev = su_dev;
- tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+ INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
+ tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
/*
- * Enable both explict and implict ALUA support by default
+ * Enable both explicit and implicit ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
+
+ /*
+ * Enable all supported states
+ */
+ tg_pt_gp->tg_pt_gp_alua_supported_states =
+ ALUA_T_SUP | ALUA_O_SUP |
+ ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
if (def_group) {
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- su_dev->t10_alua.alua_tg_pt_gps_count++;
+ dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1379,9 +1759,10 @@ int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp;
+
/*
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
@@ -1391,19 +1772,19 @@ int core_alua_set_tg_pt_gp_id(
return -EINVAL;
}
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!tg_pt_gp_id)
@@ -1411,7 +1792,7 @@ again:
pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -EINVAL;
}
}
@@ -1419,9 +1800,9 @@ again:
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- su_dev->t10_alua.alua_tg_pt_gps_count++;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1450,20 +1831,24 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
- * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- su_dev->t10_alua.alua_tg_pt_gps_counter--;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1472,6 +1857,7 @@ void core_alua_free_tg_pt_gp(
*/
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax();
+
/*
* Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port.
@@ -1491,13 +1877,13 @@ void core_alua_free_tg_pt_gp(
* core_alua_free_tg_pt_gp_mem().
*
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
- * assume we want to re-assocate a given tg_pt_gp_mem with
+ * assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
+ if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1511,14 +1897,9 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return;
@@ -1544,25 +1925,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
}
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
- struct se_subsystem_dev *su_dev,
- const char *name)
+ struct se_device *dev, const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1570,11 +1950,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1610,16 +1990,11 @@ static void __core_alua_drop_tg_pt_gp_mem(
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return len;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return len;
@@ -1653,7 +2028,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
{
struct se_portal_group *tpg;
struct se_lun *lun;
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct se_device *dev = port->sep_lun->lun_se_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
@@ -1662,13 +2037,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
- tpg->se_tpg_tfo->tpg_get_tag(tpg),
- config_item_name(&lun->lun_group.cg_item));
- return -EINVAL;
- }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return 0;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
@@ -1686,18 +2057,11 @@ ssize_t core_alua_store_tg_pt_gp_info(
* struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below.
*/
- tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
strstrip(buf));
if (!tg_pt_gp_new)
return -ENODEV;
}
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem) {
- if (tg_pt_gp_new)
- core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
- return -EINVAL;
- }
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
@@ -1720,7 +2084,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -1752,13 +2116,13 @@ ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
- (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
- return sprintf(page, "Implict and Explict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
- return sprintf(page, "Implict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
- return sprintf(page, "Explict\n");
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
+ return sprintf(page, "Implicit and Explicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
+ return sprintf(page, "Implicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
+ return sprintf(page, "Explicit\n");
else
return sprintf(page, "None\n");
}
@@ -1771,10 +2135,10 @@ ssize_t core_alua_store_access_type(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_access_type\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
pr_err("Illegal value for alua_access_type:"
@@ -1783,11 +2147,11 @@ ssize_t core_alua_store_access_type(
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
else if (tmp == 2)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
else if (tmp == 1)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
@@ -1809,10 +2173,10 @@ ssize_t core_alua_store_nonop_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract nonop_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
pr_err("Passed nonop_delay_msecs: %lu, exceeds"
@@ -1840,10 +2204,10 @@ ssize_t core_alua_store_trans_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract trans_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
pr_err("Passed trans_delay_msecs: %lu, exceeds"
@@ -1856,6 +2220,37 @@ ssize_t core_alua_store_trans_delay_msecs(
return count;
}
+ssize_t core_alua_show_implicit_trans_secs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
+}
+
+ssize_t core_alua_store_implicit_trans_secs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = kstrtoul(page, 0, &tmp);
+ if (ret < 0) {
+ pr_err("Unable to extract implicit_trans_secs\n");
+ return ret;
+ }
+ if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
+ pr_err("Passed implicit_trans_secs: %lu, exceeds"
+ " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
+ ALUA_MAX_IMPLICIT_TRANS_SECS);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
+
+ return count;
+}
+
ssize_t core_alua_show_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
@@ -1871,10 +2266,10 @@ ssize_t core_alua_store_preferred_bit(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract preferred ALUA value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
@@ -1906,10 +2301,10 @@ ssize_t core_alua_store_offline_bit(
if (!lun->lun_sep)
return -ENODEV;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_offline value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
@@ -1945,14 +2340,14 @@ ssize_t core_alua_store_secondary_status(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_status\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
- (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
@@ -1978,10 +2373,10 @@ ssize_t core_alua_store_secondary_write_metadata(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_write_md\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_write_md:"
@@ -1993,32 +2388,12 @@ ssize_t core_alua_store_secondary_write_metadata(
return count;
}
-int core_setup_alua(struct se_device *dev, int force_pt)
+int core_setup_alua(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
- struct t10_alua_lu_gp_member *lu_gp_mem;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but emulate SCSI logic themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
- alua->alua_type = SPC_ALUA_PASSTHROUGH;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", dev->transport->name);
- return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated ALUA.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- pr_debug("%s: Enabling ALUA Emulation for SPC-3"
- " device\n", dev->transport->name);
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
@@ -2027,8 +2402,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
if (IS_ERR(lu_gp_mem))
return PTR_ERR(lu_gp_mem);
- alua->alua_type = SPC3_ALUA_EMULATED;
- alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp);
@@ -2037,11 +2410,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
dev->transport->name);
- } else {
- alua->alua_type = SPC2_ALUA_DISABLED;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Disabling ALUA Emulation for SPC-2"
- " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c5b4ecd3e74..0a7d65e8040 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -7,29 +7,41 @@
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
-#define TPGS_IMPLICT_ALUA 0x10
-#define TPGS_EXPLICT_ALUA 0x20
+#define TPGS_IMPLICIT_ALUA 0x10
+#define TPGS_EXPLICIT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
*/
-#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
+ * from spc4r36j section 6.37 Table 306
+ */
+#define ALUA_T_SUP 0x80
+#define ALUA_O_SUP 0x40
+#define ALUA_LBD_SUP 0x10
+#define ALUA_U_SUP 0x08
+#define ALUA_S_SUP 0x04
+#define ALUA_AN_SUP 0x02
+#define ALUA_AO_SUP 0x01
+
+/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
-#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
-#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,12 +58,18 @@
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
- * Used for implict and explict ALUA transitional delay, that is disabled
+ * Used for implicit and explicit ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
+ * Used for the recommended application client implicit transition timeout
+ * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
+ */
+#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
+#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
+/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
*/
@@ -61,18 +79,30 @@
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN 1024
+
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
-extern int target_emulate_report_target_port_groups(struct se_task *);
-extern int target_emulate_set_target_port_groups(struct se_task *);
+extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+ struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+ int, int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
@@ -85,7 +115,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *, const char *, int);
+ struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
@@ -107,6 +137,10 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
+extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
@@ -121,6 +155,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
-extern int core_setup_alua(struct se_device *, int);
+extern int core_setup_alua(struct se_device *);
+extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
deleted file mode 100644
index f3d71fa88a2..00000000000
--- a/drivers/target/target_core_cdb.c
+++ /dev/null
@@ -1,1279 +0,0 @@
-/*
- * CDB emulation for non-READ/WRITE commands.
- *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_backend.h>
-#include <target/target_core_fabric.h>
-
-#include "target_core_internal.h"
-#include "target_core_ua.h"
-
-static void
-target_fill_alua_data(struct se_port *port, unsigned char *buf)
-{
- struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
- /*
- * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
- */
- buf[5] = 0x80;
-
- /*
- * Set TPGS field for explict and/or implict ALUA access type
- * and opteration.
- *
- * See spc4r17 section 6.4.2 Table 135
- */
- if (!port)
- return;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (tg_pt_gp)
- buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-}
-
-static int
-target_emulate_inquiry_std(struct se_cmd *cmd)
-{
- struct se_lun *lun = cmd->se_lun;
- struct se_device *dev = cmd->se_dev;
- struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
- unsigned char *buf;
-
- /*
- * Make sure we at least have 6 bytes of INQUIRY response
- * payload going back for EVPD=0
- */
- if (cmd->data_length < 6) {
- pr_err("SCSI Inquiry payload length: %u"
- " too small for EVPD=0\n", cmd->data_length);
- return -EINVAL;
- }
-
- buf = transport_kmap_data_sg(cmd);
-
- if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
- buf[0] = 0x3f; /* Not connected */
- } else {
- buf[0] = dev->transport->get_device_type(dev);
- if (buf[0] == TYPE_TAPE)
- buf[1] = 0x80;
- }
- buf[2] = dev->transport->get_device_rev(dev);
-
- /*
- * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
- *
- * SPC4 says:
- * A RESPONSE DATA FORMAT field set to 2h indicates that the
- * standard INQUIRY data is in the format defined in this
- * standard. Response data format values less than 2h are
- * obsolete. Response data format values greater than 2h are
- * reserved.
- */
- buf[3] = 2;
-
- /*
- * Enable SCCS and TPGS fields for Emulated ALUA
- */
- if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
- target_fill_alua_data(lun->lun_sep, buf);
-
- if (cmd->data_length < 8) {
- buf[4] = 1; /* Set additional length to 1 */
- goto out;
- }
-
- buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
-
- /*
- * Do not include vendor, product, reversion info in INQUIRY
- * response payload for cdbs with a small allocation length.
- */
- if (cmd->data_length < 36) {
- buf[4] = 3; /* Set additional length to 3 */
- goto out;
- }
-
- snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
- buf[4] = 31; /* Set additional length to 31 */
-
-out:
- transport_kunmap_data_sg(cmd);
- return 0;
-}
-
-/* unit serial number */
-static int
-target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
-{
- struct se_device *dev = cmd->se_dev;
- u16 len = 0;
-
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- u32 unit_serial_len;
-
- unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
- unit_serial_len++; /* For NULL Terminator */
-
- if (((len + 4) + unit_serial_len) > cmd->data_length) {
- len += unit_serial_len;
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff);
- return 0;
- }
- len += sprintf(&buf[4], "%s",
- dev->se_sub_dev->t10_wwn.unit_serial);
- len++; /* Extra Byte for NULL Terminator */
- buf[3] = len;
- }
- return 0;
-}
-
-static void
-target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
-{
- unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
- int cnt;
- bool next = true;
-
- /*
- * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
- * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
- * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
- * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
- * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
- * per device uniqeness.
- */
- for (cnt = 0; *p && cnt < 13; p++) {
- int val = hex_to_bin(*p);
-
- if (val < 0)
- continue;
-
- if (next) {
- next = false;
- buf[cnt++] |= val;
- } else {
- next = true;
- buf[cnt] = val << 4;
- }
- }
-}
-
-/*
- * Device identification VPD, for a complete list of
- * DESIGNATOR TYPEs see spc4r17 Table 459.
- */
-static int
-target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_lun *lun = cmd->se_lun;
- struct se_port *port = NULL;
- struct se_portal_group *tpg = NULL;
- struct t10_alua_lu_gp_member *lu_gp_mem;
- struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
- u32 prod_len;
- u32 unit_serial_len, off = 0;
- u16 len = 0, id_len;
-
- off = 4;
-
- /*
- * NAA IEEE Registered Extended Assigned designator format, see
- * spc4r17 section 7.7.3.6.5
- *
- * We depend upon a target_core_mod/ConfigFS provided
- * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
- * value in order to return the NAA id.
- */
- if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
- goto check_t10_vend_desc;
-
- if (off + 20 > cmd->data_length)
- goto check_t10_vend_desc;
-
- /* CODE SET == Binary */
- buf[off++] = 0x1;
-
- /* Set ASSOCIATION == addressed logical unit: 0)b */
- buf[off] = 0x00;
-
- /* Identifier/Designator type == NAA identifier */
- buf[off++] |= 0x3;
- off++;
-
- /* Identifier/Designator length */
- buf[off++] = 0x10;
-
- /*
- * Start NAA IEEE Registered Extended Identifier/Designator
- */
- buf[off++] = (0x6 << 4);
-
- /*
- * Use OpenFabrics IEEE Company ID: 00 14 05
- */
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- /*
- * Return ConfigFS Unit Serial Number information for
- * VENDOR_SPECIFIC_IDENTIFIER and
- * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
- */
- target_parse_naa_6h_vendor_specific(dev, &buf[off]);
-
- len = 20;
- off = (len + 4);
-
-check_t10_vend_desc:
- /*
- * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
- */
- id_len = 8; /* For Vendor field */
- prod_len = 4; /* For VPD Header */
- prod_len += 8; /* For Vendor field */
- prod_len += strlen(prod);
- prod_len++; /* For : */
-
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- unit_serial_len =
- strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
- unit_serial_len++; /* For NULL Terminator */
-
- if ((len + (id_len + 4) +
- (prod_len + unit_serial_len)) >
- cmd->data_length) {
- len += (prod_len + unit_serial_len);
- goto check_port;
- }
- id_len += sprintf(&buf[off+12], "%s:%s", prod,
- &dev->se_sub_dev->t10_wwn.unit_serial[0]);
- }
- buf[off] = 0x2; /* ASCII */
- buf[off+1] = 0x1; /* T10 Vendor ID */
- buf[off+2] = 0x0;
- memcpy(&buf[off+4], "LIO-ORG", 8);
- /* Extra Byte for NULL Terminator */
- id_len++;
- /* Identifier Length */
- buf[off+3] = id_len;
- /* Header size for Designation descriptor */
- len += (id_len + 4);
- off += (id_len + 4);
- /*
- * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
- */
-check_port:
- port = lun->lun_sep;
- if (port) {
- struct t10_alua_lu_gp *lu_gp;
- u32 padding, scsi_name_len;
- u16 lu_gp_id = 0;
- u16 tg_pt_gp_id = 0;
- u16 tpgt;
-
- tpg = port->sep_tpg;
- /*
- * Relative target port identifer, see spc4r17
- * section 7.7.3.7
- *
- * Get the PROTOCOL IDENTIFIER as defined by spc4r17
- * section 7.5.1 Table 362
- */
- if (((len + 4) + 8) > cmd->data_length) {
- len += 8;
- goto check_tpgi;
- }
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
- buf[off++] |= 0x1; /* CODE SET == Binary */
- buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOCIATION == target port: 01b */
- buf[off] |= 0x10;
- /* DESIGNATOR TYPE == Relative target port identifer */
- buf[off++] |= 0x4;
- off++; /* Skip over Reserved */
- buf[off++] = 4; /* DESIGNATOR LENGTH */
- /* Skip over Obsolete field in RTPI payload
- * in Table 472 */
- off += 2;
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
- len += 8; /* Header size + Designation descriptor */
- /*
- * Target port group identifier, see spc4r17
- * section 7.7.3.8
- *
- * Get the PROTOCOL IDENTIFIER as defined by spc4r17
- * section 7.5.1 Table 362
- */
-check_tpgi:
- if (dev->se_sub_dev->t10_alua.alua_type !=
- SPC3_ALUA_EMULATED)
- goto check_scsi_name;
-
- if (((len + 4) + 8) > cmd->data_length) {
- len += 8;
- goto check_lu_gp;
- }
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- goto check_lu_gp;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!tg_pt_gp) {
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- goto check_lu_gp;
- }
- tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
- buf[off++] |= 0x1; /* CODE SET == Binary */
- buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOCIATION == target port: 01b */
- buf[off] |= 0x10;
- /* DESIGNATOR TYPE == Target port group identifier */
- buf[off++] |= 0x5;
- off++; /* Skip over Reserved */
- buf[off++] = 4; /* DESIGNATOR LENGTH */
- off += 2; /* Skip over Reserved Field */
- buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
- buf[off++] = (tg_pt_gp_id & 0xff);
- len += 8; /* Header size + Designation descriptor */
- /*
- * Logical Unit Group identifier, see spc4r17
- * section 7.7.3.8
- */
-check_lu_gp:
- if (((len + 4) + 8) > cmd->data_length) {
- len += 8;
- goto check_scsi_name;
- }
- lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem)
- goto check_scsi_name;
-
- spin_lock(&lu_gp_mem->lu_gp_mem_lock);
- lu_gp = lu_gp_mem->lu_gp;
- if (!lu_gp) {
- spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- goto check_scsi_name;
- }
- lu_gp_id = lu_gp->lu_gp_id;
- spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
-
- buf[off++] |= 0x1; /* CODE SET == Binary */
- /* DESIGNATOR TYPE == Logical Unit Group identifier */
- buf[off++] |= 0x6;
- off++; /* Skip over Reserved */
- buf[off++] = 4; /* DESIGNATOR LENGTH */
- off += 2; /* Skip over Reserved Field */
- buf[off++] = ((lu_gp_id >> 8) & 0xff);
- buf[off++] = (lu_gp_id & 0xff);
- len += 8; /* Header size + Designation descriptor */
- /*
- * SCSI name string designator, see spc4r17
- * section 7.7.3.11
- *
- * Get the PROTOCOL IDENTIFIER as defined by spc4r17
- * section 7.5.1 Table 362
- */
-check_scsi_name:
- scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
- /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
- scsi_name_len += 10;
- /* Check for 4-byte padding */
- padding = ((-scsi_name_len) & 3);
- if (padding != 0)
- scsi_name_len += padding;
- /* Header size + Designation descriptor */
- scsi_name_len += 4;
-
- if (((len + 4) + scsi_name_len) > cmd->data_length) {
- len += scsi_name_len;
- goto set_len;
- }
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
- buf[off++] |= 0x3; /* CODE SET == UTF-8 */
- buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOCIATION == target port: 01b */
- buf[off] |= 0x10;
- /* DESIGNATOR TYPE == SCSI name string */
- buf[off++] |= 0x8;
- off += 2; /* Skip over Reserved and length */
- /*
- * SCSI name string identifer containing, $FABRIC_MOD
- * dependent information. For LIO-Target and iSCSI
- * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
- * UTF-8 encoding.
- */
- tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
- scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
- tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
- scsi_name_len += 1 /* Include NULL terminator */;
- /*
- * The null-terminated, null-padded (see 4.4.2) SCSI
- * NAME STRING field contains a UTF-8 format string.
- * The number of bytes in the SCSI NAME STRING field
- * (i.e., the value in the DESIGNATOR LENGTH field)
- * shall be no larger than 256 and shall be a multiple
- * of four.
- */
- if (padding)
- scsi_name_len += padding;
-
- buf[off-1] = scsi_name_len;
- off += scsi_name_len;
- /* Header size + Designation descriptor */
- len += (scsi_name_len + 4);
- }
-set_len:
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
- return 0;
-}
-
-/* Extended INQUIRY Data VPD Page */
-static int
-target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
-{
- if (cmd->data_length < 60)
- return 0;
-
- buf[3] = 0x3c;
- /* Set HEADSUP, ORDSUP, SIMPSUP */
- buf[5] = 0x07;
-
- /* If WriteCache emulation is enabled, set V_SUP */
- if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
- buf[6] = 0x01;
- return 0;
-}
-
-/* Block Limits VPD page */
-static int
-target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
-{
- struct se_device *dev = cmd->se_dev;
- int have_tp = 0;
-
- /*
- * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
- * emulate_tpu=1 or emulate_tpws=1 we will be expect a
- * different page length for Thin Provisioning.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
- have_tp = 1;
-
- if (cmd->data_length < (0x10 + 4)) {
- pr_debug("Received data_length: %u"
- " too small for EVPD 0xb0\n",
- cmd->data_length);
- return -EINVAL;
- }
-
- if (have_tp && cmd->data_length < (0x3c + 4)) {
- pr_debug("Received data_length: %u"
- " too small for TPE=1 EVPD 0xb0\n",
- cmd->data_length);
- have_tp = 0;
- }
-
- buf[0] = dev->transport->get_device_type(dev);
- buf[3] = have_tp ? 0x3c : 0x10;
-
- /* Set WSNZ to 1 */
- buf[4] = 0x01;
-
- /*
- * Set OPTIMAL TRANSFER LENGTH GRANULARITY
- */
- put_unaligned_be16(1, &buf[6]);
-
- /*
- * Set MAXIMUM TRANSFER LENGTH
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
-
- /*
- * Set OPTIMAL TRANSFER LENGTH
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
-
- /*
- * Exit now if we don't support TP or the initiator sent a too
- * short buffer.
- */
- if (!have_tp || cmd->data_length < (0x3c + 4))
- return 0;
-
- /*
- * Set MAXIMUM UNMAP LBA COUNT
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
-
- /*
- * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
- &buf[24]);
-
- /*
- * Set OPTIMAL UNMAP GRANULARITY
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
-
- /*
- * UNMAP GRANULARITY ALIGNMENT
- */
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
- &buf[32]);
- if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
- buf[32] |= 0x80; /* Set the UGAVALID bit */
-
- return 0;
-}
-
-/* Block Device Characteristics VPD page */
-static int
-target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
-{
- struct se_device *dev = cmd->se_dev;
-
- buf[0] = dev->transport->get_device_type(dev);
- buf[3] = 0x3c;
-
- if (cmd->data_length >= 5 &&
- dev->se_sub_dev->se_dev_attrib.is_nonrot)
- buf[5] = 1;
-
- return 0;
-}
-
-/* Thin Provisioning VPD */
-static int
-target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
- *
- * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
- * zero, then the page length shall be set to 0004h. If the DP bit
- * is set to one, then the page length shall be set to the value
- * defined in table 162.
- */
- buf[0] = dev->transport->get_device_type(dev);
-
- /*
- * Set Hardcoded length mentioned above for DP=0
- */
- put_unaligned_be16(0x0004, &buf[2]);
-
- /*
- * The THRESHOLD EXPONENT field indicates the threshold set size in
- * LBAs as a power of 2 (i.e., the threshold set size is equal to
- * 2(threshold exponent)).
- *
- * Note that this is currently set to 0x00 as mkp says it will be
- * changing again. We can enable this once it has settled in T10
- * and is actually used by Linux/SCSI ML code.
- */
- buf[4] = 0x00;
-
- /*
- * A TPU bit set to one indicates that the device server supports
- * the UNMAP command (see 5.25). A TPU bit set to zero indicates
- * that the device server does not support the UNMAP command.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
- buf[5] = 0x80;
-
- /*
- * A TPWS bit set to one indicates that the device server supports
- * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
- * A TPWS bit set to zero indicates that the device server does not
- * support the use of the WRITE SAME (16) command to unmap LBAs.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
- buf[5] |= 0x40;
-
- return 0;
-}
-
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
-
-static struct {
- uint8_t page;
- int (*emulate)(struct se_cmd *, unsigned char *);
-} evpd_handlers[] = {
- { .page = 0x00, .emulate = target_emulate_evpd_00 },
- { .page = 0x80, .emulate = target_emulate_evpd_80 },
- { .page = 0x83, .emulate = target_emulate_evpd_83 },
- { .page = 0x86, .emulate = target_emulate_evpd_86 },
- { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
- { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
- { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
-};
-
-/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
-{
- int p;
-
- if (cmd->data_length < 8)
- return 0;
- /*
- * Only report the INQUIRY EVPD=1 pages after a valid NAA
- * Registered Extended LUN WWN has been set via ConfigFS
- * during device creation/restart.
- */
- if (cmd->se_dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- buf[3] = ARRAY_SIZE(evpd_handlers);
- for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
- cmd->data_length - 4); ++p)
- buf[p + 4] = evpd_handlers[p].page;
- }
-
- return 0;
-}
-
-int target_emulate_inquiry(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned char *cdb = cmd->t_task_cdb;
- int p, ret;
-
- if (!(cdb[1] & 0x1)) {
- if (cdb[2]) {
- pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
- cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- ret = target_emulate_inquiry_std(cmd);
- goto out;
- }
-
- /*
- * Make sure we at least have 4 bytes of INQUIRY response
- * payload for 0x00 going back for EVPD=1. Note that 0x80
- * and 0x83 will check for enough payload data length and
- * jump to set_len: label when there is not enough inquiry EVPD
- * payload length left for the next outgoing EVPD metadata
- */
- if (cmd->data_length < 4) {
- pr_err("SCSI Inquiry payload length: %u"
- " too small for EVPD=1\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = dev->transport->get_device_type(dev);
-
- for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
- if (cdb[2] == evpd_handlers[p].page) {
- buf[1] = cdb[2];
- ret = evpd_handlers[p].emulate(cmd, buf);
- goto out_unmap;
- }
- }
-
- pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
-
-out_unmap:
- transport_kunmap_data_sg(cmd);
-out:
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
- return ret;
-}
-
-int target_emulate_readcapacity(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks_long = dev->transport->get_blocks(dev);
- u32 blocks;
-
- if (blocks_long >= 0x00000000ffffffff)
- blocks = 0xffffffff;
- else
- blocks = (u32)blocks_long;
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 24) & 0xff;
- buf[1] = (blocks >> 16) & 0xff;
- buf[2] = (blocks >> 8) & 0xff;
- buf[3] = blocks & 0xff;
- buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
- /*
- * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
- put_unaligned_be32(0xFFFFFFFF, &buf[0]);
-
- transport_kunmap_data_sg(cmd);
-
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-int target_emulate_readcapacity_16(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks = dev->transport->get_blocks(dev);
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 56) & 0xff;
- buf[1] = (blocks >> 48) & 0xff;
- buf[2] = (blocks >> 40) & 0xff;
- buf[3] = (blocks >> 32) & 0xff;
- buf[4] = (blocks >> 24) & 0xff;
- buf[5] = (blocks >> 16) & 0xff;
- buf[6] = (blocks >> 8) & 0xff;
- buf[7] = blocks & 0xff;
- buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
- /*
- * Set Thin Provisioning Enable bit following sbc3r22 in section
- * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
- buf[14] = 0x80;
-
- transport_kunmap_data_sg(cmd);
-
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-static int
-target_modesense_rwrecovery(unsigned char *p)
-{
- p[0] = 0x01;
- p[1] = 0x0a;
-
- return 12;
-}
-
-static int
-target_modesense_control(struct se_device *dev, unsigned char *p)
-{
- p[0] = 0x0a;
- p[1] = 0x0a;
- p[2] = 2;
- /*
- * From spc4r23, 7.4.7 Control mode page
- *
- * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
- * restrictions on the algorithm used for reordering commands
- * having the SIMPLE task attribute (see SAM-4).
- *
- * Table 368 -- QUEUE ALGORITHM MODIFIER field
- * Code Description
- * 0h Restricted reordering
- * 1h Unrestricted reordering allowed
- * 2h to 7h Reserved
- * 8h to Fh Vendor specific
- *
- * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
- * the device server shall order the processing sequence of commands
- * having the SIMPLE task attribute such that data integrity is maintained
- * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
- * requests is halted at any time, the final value of all data observable
- * on the medium shall be the same as if all the commands had been processed
- * with the ORDERED task attribute).
- *
- * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
- * device server may reorder the processing sequence of commands having the
- * SIMPLE task attribute in any manner. Any data integrity exposures related to
- * command sequence order shall be explicitly handled by the application client
- * through the selection of appropriate ommands and task attributes.
- */
- p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
- /*
- * From spc4r17, section 7.4.6 Control mode Page
- *
- * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
- *
- * 00b: The logical unit shall clear any unit attention condition
- * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
- * status and shall not establish a unit attention condition when a com-
- * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
- * status.
- *
- * 10b: The logical unit shall not clear any unit attention condition
- * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
- * status and shall not establish a unit attention condition when
- * a command is completed with BUSY, TASK SET FULL, or RESERVATION
- * CONFLICT status.
- *
- * 11b a The logical unit shall not clear any unit attention condition
- * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
- * status and shall establish a unit attention condition for the
- * initiator port associated with the I_T nexus on which the BUSY,
- * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
- * Depending on the status, the additional sense code shall be set to
- * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
- * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
- * command, a unit attention condition shall be established only once
- * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
- * to the number of commands completed with one of those status codes.
- */
- p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
- /*
- * From spc4r17, section 7.4.6 Control mode Page
- *
- * Task Aborted Status (TAS) bit set to zero.
- *
- * A task aborted status (TAS) bit set to zero specifies that aborted
- * tasks shall be terminated by the device server without any response
- * to the application client. A TAS bit set to one specifies that tasks
- * aborted by the actions of an I_T nexus other than the I_T nexus on
- * which the command was received shall be completed with TASK ABORTED
- * status (see SAM-4).
- */
- p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
- p[8] = 0xff;
- p[9] = 0xff;
- p[11] = 30;
-
- return 12;
-}
-
-static int
-target_modesense_caching(struct se_device *dev, unsigned char *p)
-{
- p[0] = 0x08;
- p[1] = 0x12;
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
- p[2] = 0x04; /* Write Cache Enable */
- p[12] = 0x20; /* Disabled Read Ahead */
-
- return 20;
-}
-
-static void
-target_modesense_write_protect(unsigned char *buf, int type)
-{
- /*
- * I believe that the WP bit (bit 7) in the mode header is the same for
- * all device types..
- */
- switch (type) {
- case TYPE_DISK:
- case TYPE_TAPE:
- default:
- buf[0] |= 0x80; /* WP bit */
- break;
- }
-}
-
-static void
-target_modesense_dpofua(unsigned char *buf, int type)
-{
- switch (type) {
- case TYPE_DISK:
- buf[0] |= 0x10; /* DPOFUA bit */
- break;
- default:
- break;
- }
-}
-
-int target_emulate_modesense(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- char *cdb = cmd->t_task_cdb;
- unsigned char *rbuf;
- int type = dev->transport->get_device_type(dev);
- int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
- int offset = ten ? 8 : 4;
- int length = 0;
- unsigned char buf[SE_MODE_PAGE_BUF];
-
- memset(buf, 0, SE_MODE_PAGE_BUF);
-
- switch (cdb[2] & 0x3f) {
- case 0x01:
- length = target_modesense_rwrecovery(&buf[offset]);
- break;
- case 0x08:
- length = target_modesense_caching(dev, &buf[offset]);
- break;
- case 0x0a:
- length = target_modesense_control(dev, &buf[offset]);
- break;
- case 0x3f:
- length = target_modesense_rwrecovery(&buf[offset]);
- length += target_modesense_caching(dev, &buf[offset+length]);
- length += target_modesense_control(dev, &buf[offset+length]);
- break;
- default:
- pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
- cdb[2] & 0x3f, cdb[3]);
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- return -EINVAL;
- }
- offset += length;
-
- if (ten) {
- offset -= 2;
- buf[0] = (offset >> 8) & 0xff;
- buf[1] = offset & 0xff;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[3], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[3], type);
-
- if ((offset + 2) > cmd->data_length)
- offset = cmd->data_length;
-
- } else {
- offset -= 1;
- buf[0] = offset & 0xff;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[2], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[2], type);
-
- if ((offset + 1) > cmd->data_length)
- offset = cmd->data_length;
- }
-
- rbuf = transport_kmap_data_sg(cmd);
- memcpy(rbuf, buf, offset);
- transport_kunmap_data_sg(cmd);
-
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-int target_emulate_request_sense(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- unsigned char *cdb = cmd->t_task_cdb;
- unsigned char *buf;
- u8 ua_asc = 0, ua_ascq = 0;
- int err = 0;
-
- if (cdb[1] & 0x01) {
- pr_err("REQUEST_SENSE description emulation not"
- " supported\n");
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -ENOSYS;
- }
-
- buf = transport_kmap_data_sg(cmd);
-
- if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
- /*
- * CURRENT ERROR, UNIT ATTENTION
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-
- if (cmd->data_length < 18) {
- buf[7] = 0x00;
- err = -EINVAL;
- goto end;
- }
- /*
- * The Additional Sense Code (ASC) from the UNIT ATTENTION
- */
- buf[SPC_ASC_KEY_OFFSET] = ua_asc;
- buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
- buf[7] = 0x0A;
- } else {
- /*
- * CURRENT ERROR, NO SENSE
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
-
- if (cmd->data_length < 18) {
- buf[7] = 0x00;
- err = -EINVAL;
- goto end;
- }
- /*
- * NO ADDITIONAL SENSE INFORMATION
- */
- buf[SPC_ASC_KEY_OFFSET] = 0x00;
- buf[7] = 0x0A;
- }
-
-end:
- transport_kunmap_data_sg(cmd);
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_unmap(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task_cdb[0];
- sector_t lba;
- unsigned int size = cmd->data_length, range;
- int ret = 0, offset;
- unsigned short dl, bd_dl;
-
- if (!dev->transport->do_discard) {
- pr_err("UNMAP emulation not supported for: %s\n",
- dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- /* First UNMAP block descriptor starts at 8 byte offset */
- offset = 8;
- size -= 8;
- dl = get_unaligned_be16(&cdb[0]);
- bd_dl = get_unaligned_be16(&cdb[2]);
-
- buf = transport_kmap_data_sg(cmd);
-
- ptr = &buf[offset];
- pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
- " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
-
- while (size) {
- lba = get_unaligned_be64(&ptr[0]);
- range = get_unaligned_be32(&ptr[8]);
- pr_debug("UNMAP: Using lba: %llu and range: %u\n",
- (unsigned long long)lba, range);
-
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_err("blkdev_issue_discard() failed: %d\n",
- ret);
- goto err;
- }
-
- ptr += 16;
- size -= 16;
- }
-
-err:
- transport_kunmap_data_sg(cmd);
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
- return ret;
-}
-
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_write_same(struct se_task *task)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = cmd->se_dev;
- sector_t range;
- sector_t lba = cmd->t_task_lba;
- u32 num_blocks;
- int ret;
-
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME emulation not supported"
- " for: %s\n", dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- if (cmd->t_task_cdb[0] == WRITE_SAME)
- num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
- else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
-
- /*
- * Use the explicit range when non zero is supplied, otherwise calculate
- * the remaining range based on ->get_blocks() - starting LBA.
- */
- if (num_blocks != 0)
- range = num_blocks;
- else
- range = (dev->transport->get_blocks(dev) - lba);
-
- pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
- (unsigned long long)lba, (unsigned long long)range);
-
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
- return ret;
- }
-
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-int target_emulate_synchronize_cache(struct se_task *task)
-{
- struct se_device *dev = task->task_se_cmd->se_dev;
- struct se_cmd *cmd = task->task_se_cmd;
-
- if (!dev->transport->do_sync_cache) {
- pr_err("SYNCHRONIZE_CACHE emulation not supported"
- " for: %s\n", dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- dev->transport->do_sync_cache(task);
- return 0;
-}
-
-int target_emulate_noop(struct se_task *task)
-{
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
-
-/*
- * Write a CDB into @cdb that is based on the one the intiator sent us,
- * but updated to only cover the sectors that the current task handles.
- */
-void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
-
- memcpy(cdb, cmd->t_task_cdb, cdb_len);
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- unsigned long long lba = task->task_lba;
- u32 sectors = task->task_sectors;
-
- switch (cdb_len) {
- case 6:
- /* 21-bit LBA and 8-bit sectors */
- cdb[1] = (lba >> 16) & 0x1f;
- cdb[2] = (lba >> 8) & 0xff;
- cdb[3] = lba & 0xff;
- cdb[4] = sectors & 0xff;
- break;
- case 10:
- /* 32-bit LBA and 16-bit sectors */
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(sectors, &cdb[7]);
- break;
- case 12:
- /* 32-bit LBA and 32-bit sectors */
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[6]);
- break;
- case 16:
- /* 64-bit LBA and 32-bit sectors */
- put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[10]);
- break;
- case 32:
- /* 64-bit LBA and 32-bit sectors, extended CDB */
- put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(sectors, &cdb[28]);
- break;
- default:
- BUG();
- }
- }
-}
-EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 6e043eeb1db..bf55c5a04cf 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,8 +3,7 @@
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * Copyright (c) 2008-2011 Rising Tide Systems
- * Copyright (c) 2008-2011 Linux-iSCSI.org
+ * (c) Copyright 2008-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -49,11 +48,12 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
+#include "target_core_xcopy.h"
extern struct t10_alua_lu_gp *default_lu_gp;
-static struct list_head g_tf_list;
-static struct mutex g_tf_lock;
+static LIST_HEAD(g_tf_list);
+static DEFINE_MUTEX(g_tf_lock);
struct target_core_configfs_attribute {
struct configfs_attribute attr;
@@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
tf->tf_group.default_groups[1] = NULL;
config_group_init_type_name(&tf->tf_group, name,
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
- &TF_CIT_TMPL(tf)->tfc_discovery_cit);
+ &tf->tf_cit_tmpl.tfc_discovery_cit);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
@@ -269,7 +269,7 @@ static struct configfs_subsystem target_core_fabrics = {
},
};
-static struct configfs_subsystem *target_core_subsystem[] = {
+struct configfs_subsystem *target_core_subsystem[] = {
&target_core_fabrics,
NULL,
};
@@ -421,18 +421,6 @@ static int target_fabric_tf_ops_check(
pr_err("Missing tfo->close_session()\n");
return -EINVAL;
}
- if (!tfo->stop_session) {
- pr_err("Missing tfo->stop_session()\n");
- return -EINVAL;
- }
- if (!tfo->fall_back_to_erl0) {
- pr_err("Missing tfo->fall_back_to_erl0()\n");
- return -EINVAL;
- }
- if (!tfo->sess_logged_in) {
- pr_err("Missing tfo->sess_logged_in()\n");
- return -EINVAL;
- }
if (!tfo->sess_get_index) {
pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
@@ -469,16 +457,8 @@ static int target_fabric_tf_ops_check(
pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
- if (!tfo->set_fabric_sense_len) {
- pr_err("Missing tfo->set_fabric_sense_len()\n");
- return -EINVAL;
- }
- if (!tfo->get_fabric_sense_len) {
- pr_err("Missing tfo->get_fabric_sense_len()\n");
- return -EINVAL;
- }
- if (!tfo->is_state_remove) {
- pr_err("Missing tfo->is_state_remove()\n");
+ if (!tfo->aborted_task) {
+ pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
/*
@@ -589,21 +569,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
- ssize_t rb; \
- \
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
- rb = snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)dev->se_sub_dev->se_dev_attrib._name); \
- spin_unlock(&se_dev->se_dev_lock); \
- \
- return rb; \
+ return snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)da->da_dev->dev_attrib._name); \
}
#define DEF_DEV_ATTRIB_STORE(_name) \
@@ -612,26 +579,16 @@ static ssize_t target_core_dev_store_attr_##_name( \
const char *page, \
size_t count) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
unsigned long val; \
int ret; \
\
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
- ret = strict_strtoul(page, 0, &val); \
+ ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
- spin_unlock(&se_dev->se_dev_lock); \
- pr_err("strict_strtoul() failed with" \
+ pr_err("kstrtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
- ret = se_dev_set_##_name(dev, (u32)val); \
- spin_unlock(&se_dev->se_dev_lock); \
+ ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
@@ -657,6 +614,9 @@ static struct target_core_dev_attrib_attribute \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_show_attr_##_name);
+DEF_DEV_ATTRIB(emulate_model_alias);
+SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(emulate_dpo);
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
@@ -681,6 +641,21 @@ SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_tpws);
SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(emulate_caw);
+SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_3pc);
+SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(pi_prot_type);
+SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
+SE_DEV_ATTR_RO(hw_pi_prot_type);
+
+DEF_DEV_ATTRIB(pi_prot_format);
+SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
@@ -699,8 +674,8 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_max_sectors);
SE_DEV_ATTR_RO(hw_max_sectors);
-DEF_DEV_ATTRIB(max_sectors);
-SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(fabric_max_sectors);
+SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(optimal_sectors);
SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
@@ -723,9 +698,13 @@ SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(unmap_granularity_alignment);
SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(max_write_same_len);
+SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
+
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_model_alias.attr,
&target_core_dev_attrib_emulate_dpo.attr,
&target_core_dev_attrib_emulate_fua_write.attr,
&target_core_dev_attrib_emulate_fua_read.attr,
@@ -734,13 +713,18 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tas.attr,
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
+ &target_core_dev_attrib_emulate_caw.attr,
+ &target_core_dev_attrib_emulate_3pc.attr,
+ &target_core_dev_attrib_pi_prot_type.attr,
+ &target_core_dev_attrib_hw_pi_prot_type.attr,
+ &target_core_dev_attrib_pi_prot_format.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_is_nonrot.attr,
&target_core_dev_attrib_emulate_rest_reord.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
- &target_core_dev_attrib_max_sectors.attr,
+ &target_core_dev_attrib_fabric_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
@@ -748,6 +732,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
&target_core_dev_attrib_unmap_granularity_alignment.attr,
+ &target_core_dev_attrib_max_write_same_len.attr,
NULL,
};
@@ -788,13 +773,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
-
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&t10_wwn->unit_serial[0]);
}
@@ -804,8 +782,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
+ struct se_device *dev = t10_wwn->t10_dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
/*
@@ -818,7 +795,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
- if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+ if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
@@ -835,15 +812,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
- dev = su_dev->se_dev_ptr;
- if (dev) {
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- pr_err("Unable to set VPD Unit Serial while"
- " active %d $FABRIC_MOD exports exist\n",
- atomic_read(&dev->dev_export_obj.obj_access_count));
- return -EINVAL;
- }
+ if (dev->export_count) {
+ pr_err("Unable to set VPD Unit Serial while"
+ " active %d $FABRIC_MOD exports exist\n",
+ dev->export_count);
+ return -EINVAL;
}
+
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
@@ -852,12 +827,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
*/
memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
- snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+ snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
- su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+ dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
- " %s\n", su_dev->t10_wwn.unit_serial);
+ " %s\n", dev->t10_wwn.unit_serial);
return count;
}
@@ -871,16 +846,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE];
ssize_t len = 0;
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
@@ -918,16 +887,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
struct t10_wwn *t10_wwn, \
char *page) \
{ \
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
- struct se_device *dev; \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
- dev = se_dev->se_dev_ptr; \
- if (!dev) \
- return -ENODEV; \
- \
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
@@ -1027,7 +990,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
/* Start functions for struct config_item_type target_core_dev_pr_cit */
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
#define SE_DEV_PR_ATTR(_name, _mode) \
static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
@@ -1039,149 +1002,90 @@ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_pr_show_attr_##_name);
-/*
- * res_holder
- */
-static ssize_t target_core_dev_pr_show_spc3_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
- *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
- }
+ if (!pr_reg)
+ return sprintf(page, "No SPC-3 Reservation holder\n");
+
se_nacl = pr_reg->pr_reg_nacl;
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
- *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+ return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
+ se_nacl->initiatorname, i_buf);
}
-static ssize_t target_core_dev_pr_show_spc2_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
+ ssize_t len;
- spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!se_nacl) {
- *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
+ if (se_nacl) {
+ len = sprintf(page,
+ "SPC-2 Reservation: %s Initiator: %s\n",
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->initiatorname);
+ } else {
+ len = sprintf(page, "No SPC-2 Reservation holder\n");
}
- *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- se_nacl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
+ return len;
}
-static ssize_t target_core_dev_pr_show_attr_res_holder(
- struct se_subsystem_dev *su_dev,
- char *page)
+static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
+ char *page)
{
- ssize_t len = 0;
-
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
+ int ret;
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC2_RESERVATIONS:
- target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC_PASSTHROUGH:
- len += sprintf(page+len, "Passthrough\n");
- break;
- default:
- len += sprintf(page+len, "Unknown\n");
- break;
- }
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "Passthrough\n");
- return len;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_core_dev_pr_show_spc2_res(dev, page);
+ else
+ ret = target_core_dev_pr_show_spc3_res(dev, page);
+ spin_unlock(&dev->dev_reservation_lock);
+ return ret;
}
SE_DEV_PR_ATTR_RO(res_holder);
-/*
- * res_pr_all_tgt_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
- struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
- pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (!dev->dev_pr_res_holder) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
- }
- /*
- * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
- * Basic PERSISTENT RESERVER OUT parameter list, page 290
- */
- if (pr_reg->pr_reg_all_tg_pt)
+ } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
- else
+ } else {
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
- spin_unlock(&dev->dev_reservation_lock);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
-/*
- * res_pr_generation
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return 0;
-
- return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+ return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1190,10 +1094,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
* res_pr_holder_tg_port
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct se_node_acl *se_nacl;
struct se_lun *lun;
struct se_portal_group *se_tpg;
@@ -1201,20 +1103,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct target_core_fabric_ops *tfo;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
+ goto out_unlock;
}
+
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
@@ -1224,52 +1119,42 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
- " Identifer Tag: %hu %s Portal Group Tag: %hu"
+ " Identifier Tag: %hu %s Portal Group Tag: %hu"
" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
tfo->get_fabric_name(), lun->unpacked_lun);
- spin_unlock(&dev->dev_reservation_lock);
+out_unlock:
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
-/*
- * res_pr_registered_i_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
ssize_t len = 0;
- int reg_count = 0, prf_isid;
-
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
+ int reg_count = 0;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
tfo->get_fabric_name(),
- pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
- &i_buf[0] : "", pr_reg->pr_res_key,
+ pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (len + strlen(buf) >= PAGE_SIZE)
@@ -1278,7 +1163,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(page+len, "None\n");
@@ -1288,88 +1173,48 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
-/*
- * res_pr_type
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (pr_reg) {
+ len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+ } else {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
}
- len = sprintf(page, "SPC-3 Reservation Type: %s\n",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type));
- spin_unlock(&dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_type);
-/*
- * res_type
- */
static ssize_t target_core_dev_pr_show_attr_res_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- ssize_t len = 0;
-
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
- break;
- case SPC2_RESERVATIONS:
- len = sprintf(page, "SPC2_RESERVATIONS\n");
- break;
- case SPC_PASSTHROUGH:
- len = sprintf(page, "SPC_PASSTHROUGH\n");
- break;
- default:
- len = sprintf(page, "UNKNOWN\n");
- break;
- }
-
- return len;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "SPC_PASSTHROUGH\n");
+ else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ return sprintf(page, "SPC2_RESERVATIONS\n");
+ else
+ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
SE_DEV_PR_ATTR_RO(res_type);
-/*
- * res_aptpl_active
- */
-
static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+ (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1378,13 +1223,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
* res_aptpl_metadata
*/
static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1416,11 +1257,10 @@ static match_table_t tokens = {
};
static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
+ struct se_device *dev,
const char *page,
size_t count)
{
- struct se_device *dev;
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *arg_p, *opts;
@@ -1432,14 +1272,12 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u16 port_rpti = 0, tpgt = 0;
u8 type = 0, scope;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
@@ -1497,9 +1335,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- ret = strict_strtoull(arg_p, 0, &tmp_ll);
+ ret = kstrtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
+ pr_err("kstrtoull() failed for"
" sa_res_key=\n");
goto out;
}
@@ -1582,7 +1420,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1597,7 +1435,7 @@ out:
SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_core_dev_pr_res_holder.attr,
@@ -1629,18 +1467,14 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
int bl = 0;
ssize_t read_bytes = 0;
- if (!se_dev->se_dev_ptr)
- return -ENODEV;
-
- transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+ transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
- read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+ read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
return read_bytes;
}
@@ -1657,17 +1491,10 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate struct se_subsystem_dev>se"
- "_dev_su_ptr\n");
- return -EINVAL;
- }
-
- return t->set_configfs_dev_params(hba, se_dev, page, count);
+ return t->set_configfs_dev_params(dev, page, count);
}
static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1680,12 +1507,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+ if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
static ssize_t target_core_store_dev_alias(
@@ -1693,8 +1520,8 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1704,19 +1531,18 @@ static ssize_t target_core_store_dev_alias(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
- "%s", page);
+ read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
- se_dev->se_dev_alias[read_bytes - 1] = '\0';
+ if (dev->dev_alias[read_bytes - 1] == '\n')
+ dev->dev_alias[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_ALIAS;
+ dev->dev_flags |= DF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_alias);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->dev_alias);
return read_bytes;
}
@@ -1731,12 +1557,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
static ssize_t target_core_store_dev_udev_path(
@@ -1744,8 +1570,8 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1755,19 +1581,19 @@ static ssize_t target_core_store_dev_udev_path(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+ read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
- se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+ if (dev->udev_path[read_bytes - 1] == '\n')
+ dev->udev_path[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+ dev->dev_flags |= DF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_udev_path);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->udev_path);
return read_bytes;
}
@@ -1780,16 +1606,21 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
.store = target_core_store_dev_udev_path,
};
+static ssize_t target_core_show_dev_enable(void *p, char *page)
+{
+ struct se_device *dev = p;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+}
+
static ssize_t target_core_store_dev_enable(
void *p,
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_device *dev;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
char *ptr;
+ int ret;
ptr = strstr(page, "1");
if (!ptr) {
@@ -1797,58 +1628,32 @@ static ssize_t target_core_store_dev_enable(
" is \"1\"\n");
return -EINVAL;
}
- if (se_dev->se_dev_ptr) {
- pr_err("se_dev->se_dev_ptr already set for storage"
- " object\n");
- return -EEXIST;
- }
-
- if (t->check_configfs_dev_params(hba, se_dev) < 0)
- return -EINVAL;
-
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- else if (!dev)
- return -EINVAL;
-
- se_dev->se_dev_ptr = dev;
- pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
- " %p\n", se_dev->se_dev_ptr);
+ ret = target_configure_device(dev);
+ if (ret)
+ return ret;
return count;
}
static struct target_core_configfs_attribute target_core_attr_dev_enable = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "enable",
- .ca_mode = S_IWUSR },
- .show = NULL,
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_enable,
.store = target_core_store_dev_enable,
};
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
+ struct se_device *dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
- return len;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
+ if (!lu_gp_mem)
+ return 0;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1867,24 +1672,17 @@ static ssize_t target_core_store_alua_lu_gp(
const char *page,
size_t count)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
- struct se_hba *hba = su_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF];
int move = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!lu_gp_mem)
+ return 0;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
- config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
- return -EINVAL;
- }
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
@@ -1905,14 +1703,6 @@ static ssize_t target_core_store_alua_lu_gp(
if (!lu_gp_new)
return -ENODEV;
}
- lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- if (lu_gp_new)
- core_alua_put_lu_gp_from_name(lu_gp_new);
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1926,7 +1716,7 @@ static ssize_t target_core_store_alua_lu_gp(
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
@@ -1951,7 +1741,7 @@ static ssize_t target_core_store_alua_lu_gp(
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
@@ -1967,6 +1757,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
.store = target_core_store_alua_lu_gp,
};
+static ssize_t target_core_show_dev_lba_map(void *p, char *page)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *mem;
+ char *b = page;
+ int bl = 0;
+ char state;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ bl += sprintf(b + bl, "%u %u\n",
+ dev->t10_alua.lba_map_segment_size,
+ dev->t10_alua.lba_map_segment_multiplier);
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+ bl += sprintf(b + bl, "%llu %llu",
+ map->lba_map_first_lba, map->lba_map_last_lba);
+ list_for_each_entry(mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ switch (mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ state = 'O';
+ break;
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ state = 'A';
+ break;
+ case ALUA_ACCESS_STATE_STANDBY:
+ state = 'S';
+ break;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ state = 'U';
+ break;
+ default:
+ state = '.';
+ break;
+ }
+ bl += sprintf(b + bl, " %d:%c",
+ mem->lba_map_mem_alua_pg_id, state);
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return bl;
+}
+
+static ssize_t target_core_store_dev_lba_map(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *lba_map = NULL;
+ struct list_head lba_list;
+ char *map_entries, *ptr;
+ char state;
+ int pg_num = -1, pg;
+ int ret = 0, num = 0, pg_id, alua_state;
+ unsigned long start_lba = -1, end_lba = -1;
+ unsigned long segment_size = -1, segment_mult = -1;
+
+ map_entries = kstrdup(page, GFP_KERNEL);
+ if (!map_entries)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&lba_list);
+ while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ if (num == 0) {
+ if (sscanf(ptr, "%lu %lu\n",
+ &segment_size, &segment_mult) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ continue;
+ }
+ if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing end lba\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing state definitions\n",
+ num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ lba_map = core_alua_allocate_lba_map(&lba_list,
+ start_lba, end_lba);
+ if (IS_ERR(lba_map)) {
+ ret = PTR_ERR(lba_map);
+ break;
+ }
+ pg = 0;
+ while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+ switch (state) {
+ case 'O':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+ break;
+ case 'A':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+ break;
+ case 'S':
+ alua_state = ALUA_ACCESS_STATE_STANDBY;
+ break;
+ case 'U':
+ alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+ break;
+ default:
+ pr_err("Invalid ALUA state '%c'\n", state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = core_alua_allocate_lba_map_mem(lba_map,
+ pg_id, alua_state);
+ if (ret) {
+ pr_err("Invalid target descriptor %d:%c "
+ "at line %d\n",
+ pg_id, state, num);
+ break;
+ }
+ pg++;
+ ptr = strchr(ptr, ' ');
+ if (ptr)
+ ptr++;
+ else
+ break;
+ }
+ if (pg_num == -1)
+ pg_num = pg;
+ else if (pg != pg_num) {
+ pr_err("Only %d from %d port groups definitions "
+ "at line %d\n", pg, pg_num, num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ }
+out:
+ if (ret) {
+ core_alua_free_lba_map(&lba_list);
+ count = ret;
+ } else
+ core_alua_set_lba_map(dev, &lba_list,
+ segment_size, segment_mult);
+ kfree(map_entries);
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "lba_map",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_lba_map,
+ .store = target_core_store_dev_lba_map,
+};
+
static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
@@ -1974,74 +1934,50 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_udev_path.attr,
&target_core_attr_dev_enable.attr,
&target_core_attr_dev_alua_lu_gp.attr,
+ &target_core_attr_dev_lba_map.attr,
NULL,
};
static void target_core_dev_release(struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
- struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
- struct se_subsystem_api *t = hba->transport;
- struct config_group *dev_cg = &se_dev->se_dev_group;
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
kfree(dev_cg->default_groups);
- /*
- * This pointer will set when the storage is enabled with:
- *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
- */
- if (se_dev->se_dev_ptr) {
- pr_debug("Target_Core_ConfigFS: Calling se_free_"
- "virtual_device() for se_dev_ptr: %p\n",
- se_dev->se_dev_ptr);
-
- se_free_virtual_device(se_dev->se_dev_ptr, hba);
- } else {
- /*
- * Release struct se_subsystem_dev->se_dev_su_ptr..
- */
- pr_debug("Target_Core_ConfigFS: Calling t->free_"
- "device() for se_dev_su_ptr: %p\n",
- se_dev->se_dev_su_ptr);
-
- t->free_device(se_dev->se_dev_su_ptr);
- }
-
- pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
- "_dev_t: %p\n", se_dev);
- kfree(se_dev);
+ target_free_device(dev);
}
static ssize_t target_core_dev_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show(se_dev, page);
+ return tc_attr->show(dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store(se_dev, page, count);
+ return tc_attr->store(dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2096,11 +2032,11 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
unsigned long lu_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &lu_gp_id);
+ ret = kstrtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" lu_gp_id\n", ret);
- return -EINVAL;
+ return ret;
}
if (lu_gp_id > 0x0000ffff) {
pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
@@ -2131,7 +2067,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
{
struct se_device *dev;
struct se_hba *hba;
- struct se_subsystem_dev *su_dev;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2141,12 +2076,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
- hba = su_dev->se_dev_hba;
+ hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
+ config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
@@ -2284,31 +2218,43 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implict ALUA on non valid"
+ pr_err("Unable to do implicit ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("Unable to set alua_access_state while device is"
+ " not configured\n");
+ return -ENODEV;
+ }
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
- return -EINVAL;
+ return ret;
}
new_state = (int)tmp;
- if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- pr_err("Unable to process implict configfs ALUA"
- " transition while TPGS_IMPLICT_ALUA is diabled\n");
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
+ pr_err("Unable to process implicit configfs ALUA"
+ " transition while TPGS_IMPLICIT_ALUA is disabled\n");
+ return -EINVAL;
+ }
+ if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+ new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+ /* LBA DEPENDENT is only allowed with implicit ALUA */
+ pr_err("Unable to process implicit configfs ALUA transition"
+ " while explicit ALUA management is enabled\n");
return -EINVAL;
}
- ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+ ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
@@ -2341,17 +2287,17 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
return -EINVAL;
}
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
- return -EINVAL;
+ return ret;
}
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
- (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
@@ -2384,6 +2330,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
/*
+ * alua_supported_states
+ */
+
+#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
+ struct t10_alua_tg_pt_gp *t, char *p) \
+{ \
+ return sprintf(p, "%d\n", !!(t->_var & _bit)); \
+}
+
+#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
+ struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
+{ \
+ unsigned long tmp; \
+ int ret; \
+ \
+ if (!t->tg_pt_gp_valid_id) { \
+ pr_err("Unable to do set ##_name ALUA state on non" \
+ " valid tg_pt_gp ID: %hu\n", \
+ t->tg_pt_gp_valid_id); \
+ return -EINVAL; \
+ } \
+ \
+ ret = kstrtoul(p, 0, &tmp); \
+ if (ret < 0) { \
+ pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
+ return -EINVAL; \
+ } \
+ if (tmp > 1) { \
+ pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
+ return -EINVAL; \
+ } \
+ if (!tmp) \
+ t->_var |= _bit; \
+ else \
+ t->_var &= ~_bit; \
+ \
+ return c; \
+}
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
+
+/*
* alua_write_metadata
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
@@ -2401,10 +2431,10 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_write_metadata\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
@@ -2463,6 +2493,26 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
/*
+ * implicit_trans_secs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
+
+/*
* preferred
*/
@@ -2505,11 +2555,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
unsigned long tg_pt_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+ ret = kstrtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" tg_pt_gp_id\n", ret);
- return -EINVAL;
+ return ret;
}
if (tg_pt_gp_id > 0x0000ffff) {
pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
@@ -2583,9 +2633,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_core_alua_tg_pt_gp_alua_access_state.attr,
&target_core_alua_tg_pt_gp_alua_access_status.attr,
&target_core_alua_tg_pt_gp_alua_access_type.attr,
+ &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
+ &target_core_alua_tg_pt_gp_alua_support_offline.attr,
+ &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
+ &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
+ &target_core_alua_tg_pt_gp_alua_support_standby.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+ &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
&target_core_alua_tg_pt_gp_preferred.attr,
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
&target_core_alua_tg_pt_gp_members.attr,
@@ -2623,11 +2681,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
- tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
if (!tg_pt_gp)
return NULL;
@@ -2724,10 +2781,10 @@ static struct config_group *target_core_make_subdev(
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *se_dev;
struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
+ struct se_device *dev;
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
struct config_group *dev_stat_grp = NULL;
int errno = -ENOMEM, ret;
@@ -2740,120 +2797,80 @@ static struct config_group *target_core_make_subdev(
*/
t = hba->transport;
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
- goto unlock;
- }
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
-
- se_dev->se_dev_hba = hba;
- dev_cg = &se_dev->se_dev_group;
-
- dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
+ dev = target_alloc_device(hba, name);
+ if (!dev)
+ goto out_unlock;
+
+ dev_cg = &dev->dev_group;
+
+ dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!dev_cg->default_groups)
- goto out;
- /*
- * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
- * for ->allocate_virtdevice()
- *
- * se_dev->se_dev_ptr will be set after ->create_virtdev()
- * has been called successfully in the next level up in the
- * configfs tree for device object's struct config_group.
- */
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
- goto out;
- }
+ goto out_free_device;
- config_group_init_type_name(&se_dev->se_dev_group, name,
- &target_core_dev_cit);
- config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+ config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+ config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&target_core_dev_attrib_cit);
- config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+ config_group_init_type_name(&dev->dev_pr_group, "pr",
&target_core_dev_pr_cit);
- config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+ config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&target_core_dev_wwn_cit);
- config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+ config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &target_core_alua_tg_pt_gps_cit);
- config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+ config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &target_core_stat_cit);
- dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
- dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
- dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
- dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+ dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+ dev_cg->default_groups[1] = &dev->dev_pr_group;
+ dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+ dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+ dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
dev_cg->default_groups[5] = NULL;
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
- tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
- goto out;
+ goto out_free_dev_cg_default_groups;
+ dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
- tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!tg_pt_gp_cg->default_groups) {
pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp;
}
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
- dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
+ dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
pr_err("Unable to allocate dev_stat_grp->default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp_cg_default_groups;
}
- target_stat_setup_dev_default_groups(se_dev);
-
- pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
- " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+ target_stat_setup_dev_default_groups(dev);
mutex_unlock(&hba->hba_access_mutex);
- return &se_dev->se_dev_group;
-out:
- if (se_dev->t10_alua.default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
- se_dev->t10_alua.default_tg_pt_gp = NULL;
- }
- if (dev_stat_grp)
- kfree(dev_stat_grp->default_groups);
- if (tg_pt_gp_cg)
- kfree(tg_pt_gp_cg->default_groups);
- if (dev_cg)
- kfree(dev_cg->default_groups);
- if (se_dev->se_dev_su_ptr)
- t->free_device(se_dev->se_dev_su_ptr);
- kfree(se_dev);
-unlock:
+ return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+ kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+ kfree(dev_cg->default_groups);
+out_free_device:
+ target_free_device(dev);
+out_unlock:
mutex_unlock(&hba->hba_access_mutex);
return ERR_PTR(errno);
}
@@ -2862,20 +2879,19 @@ static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
- struct se_subsystem_api *t;
struct config_item *df_item;
- struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+ struct config_group *tg_pt_gp_cg, *dev_stat_grp;
int i;
- hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
- t = hba->transport;
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2883,7 +2899,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2894,17 +2910,15 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- se_dev->t10_alua.default_tg_pt_gp = NULL;
+ dev->t10_alua.default_tg_pt_gp = NULL;
- dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
df_item = &dev_cg->default_groups[i]->cg_item;
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
/*
- * The releasing of se_dev and associated se_dev->se_dev_ptr is done
- * from target_core_dev_item_ops->release() ->target_core_dev_release().
+ * se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
@@ -2961,19 +2975,16 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
if (transport->pmode_enable_hba == NULL)
return -EINVAL;
- ret = strict_strtoul(page, 0, &mode_flag);
+ ret = kstrtoul(page, 0, &mode_flag);
if (ret < 0) {
pr_err("Unable to extract hba mode flag: %d\n", ret);
- return -EINVAL;
+ return ret;
}
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("Unable to set hba_mode with active devices\n");
- spin_unlock(&hba->device_lock);
return -EINVAL;
}
- spin_unlock(&hba->device_lock);
ret = transport->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
@@ -3055,11 +3066,11 @@ static struct config_group *target_core_call_addhbatotarget(
str++; /* Skip to start of plugin dependent ID */
}
- ret = strict_strtoul(str, 0, &plugin_dep_id);
+ ret = kstrtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" plugin_dep_id\n", ret);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(ret);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
@@ -3117,8 +3128,6 @@ static int __init target_core_init_configfs(void)
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
- INIT_LIST_HEAD(&g_tf_list);
- mutex_init(&g_tf_lock);
ret = init_se_kmem_caches();
if (ret < 0)
return ret;
@@ -3127,10 +3136,11 @@ static int __init target_core_init_configfs(void)
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
- target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!target_cg->default_groups) {
pr_err("Unable to allocate target_cg->default_groups\n");
+ ret = -ENOMEM;
goto out_global;
}
@@ -3142,10 +3152,11 @@ static int __init target_core_init_configfs(void)
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
hba_cg = &target_core_hbagroup;
- hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!hba_cg->default_groups) {
pr_err("Unable to allocate hba_cg->default_groups\n");
+ ret = -ENOMEM;
goto out_global;
}
config_group_init_type_name(&alua_group,
@@ -3157,10 +3168,11 @@ static int __init target_core_init_configfs(void)
* groups under /sys/kernel/config/target/core/alua/
*/
alua_cg = &alua_group;
- alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!alua_cg->default_groups) {
pr_err("Unable to allocate alua_cg->default_groups\n");
+ ret = -ENOMEM;
goto out_global;
}
@@ -3172,14 +3184,17 @@ static int __init target_core_init_configfs(void)
* Add core/alua/lu_gps/default_lu_gp
*/
lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
- if (IS_ERR(lu_gp))
+ if (IS_ERR(lu_gp)) {
+ ret = -ENOMEM;
goto out_global;
+ }
lu_gp_cg = &alua_lu_gps_group;
- lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lu_gp_cg->default_groups) {
pr_err("Unable to allocate lu_gp_cg->default_groups\n");
+ ret = -ENOMEM;
goto out_global;
}
@@ -3207,7 +3222,12 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
- if (core_dev_setup_virtual_lun0() < 0)
+ ret = core_dev_setup_virtual_lun0();
+ if (ret < 0)
+ goto out;
+
+ ret = target_xcopy_setup_pt();
+ if (ret < 0)
goto out;
return 0;
@@ -3282,6 +3302,7 @@ static void __exit target_core_exit_configfs(void)
core_dev_release_virtual_lun0();
rd_module_exit();
+ target_xcopy_release_pt();
release_se_kmem_caches();
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 0b25b50900e..98da9016715 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -4,10 +4,7 @@
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,45 +47,39 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-static void se_dev_start(struct se_device *dev);
-static void se_dev_stop(struct se_device *dev);
+DEFINE_MUTEX(g_device_mutex);
+LIST_HEAD(g_device_list);
static struct se_hba *lun0_hba;
-static struct se_subsystem_dev *lun0_su_dev;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
-int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+sense_reason_t
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_device *dev;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+ return TCM_NON_EXISTENT_LUN;
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
struct se_dev_entry *deve = se_cmd->se_deve;
deve->total_cmds++;
- deve->total_bytes += se_cmd->data_length;
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
- return -EACCES;
+ return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -96,13 +87,14 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
deve->read_bytes += se_cmd->data_length;
- deve->deve_cmds++;
-
se_lun = deve->se_lun;
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -113,55 +105,37 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -ENODEV;
+ return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -EACCES;
- }
+ (se_cmd->data_direction != DMA_NONE))
+ return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
- }
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
- /* TODO: get rid of this and use atomics for stats */
dev = se_lun->lun_se_dev;
- spin_lock_irqsave(&dev->stats_lock, flags);
- dev->num_cmds++;
+ atomic_long_inc(&dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- dev->write_bytes += se_cmd->data_length;
+ atomic_long_add(se_cmd->data_length, &dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- dev->read_bytes += se_cmd->data_length;
- spin_unlock_irqrestore(&dev->stats_lock, flags);
-
- spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
- spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+ atomic_long_add(se_cmd->data_length, &dev->read_bytes);
return 0;
}
@@ -175,14 +149,11 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return -ENODEV;
- }
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
deve = se_cmd->se_deve;
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
@@ -199,15 +170,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -ENODEV;
}
@@ -240,7 +202,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
@@ -263,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
continue;
atomic_inc(&deve->pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_irq(&nacl->device_list_lock);
return deve;
@@ -286,7 +248,7 @@ int core_free_device_list_for_node(
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
@@ -300,29 +262,18 @@ int core_free_device_list_for_node(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
- kfree(nacl->device_list);
+ array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
nacl->device_list = NULL;
return 0;
}
-void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
-{
- struct se_dev_entry *deve;
- unsigned long flags;
-
- spin_lock_irqsave(&se_nacl->device_list_lock, flags);
- deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
- deve->deve_cmds--;
- spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
-}
-
void core_update_device_list_access(
u32 mapped_lun,
u32 lun_access,
@@ -331,7 +282,7 @@ void core_update_device_list_access(
struct se_dev_entry *deve;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[mapped_lun];
+ deve = nacl->device_list[mapped_lun];
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
@@ -342,72 +293,46 @@ void core_update_device_list_access(
spin_unlock_irq(&nacl->device_list_lock);
}
-/* core_update_device_list_for_node():
+/* core_enable_device_list_for_node():
*
*
*/
-int core_update_device_list_for_node(
+int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
- struct se_portal_group *tpg,
- int enable)
+ struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
- int trans = 0;
- /*
- * If the MappedLUN entry is being disabled, the entry in
- * port->sep_alua_list must be removed now before clearing the
- * struct se_dev_entry pointers below as logic in
- * core_alua_do_transition_tg_pt() depends on these being present.
- */
- if (!enable) {
- /*
- * deve->se_lun_acl will be NULL for demo-mode created LUNs
- * that have not been explicitly concerted to MappedLUNs ->
- * struct se_lun_acl, but we remove deve->alua_port_list from
- * port->sep_alua_list. This also means that active UAs and
- * NodeACL context specific PR metadata for demo-mode
- * MappedLUN *deve will be released below..
- */
- spin_lock_bh(&port->sep_alua_lock);
- list_del(&deve->alua_port_list);
- spin_unlock_bh(&port->sep_alua_lock);
- }
+ struct se_dev_entry *deve;
spin_lock_irq(&nacl->device_list_lock);
- if (enable) {
- /*
- * Check if the call is handling demo mode -> explict LUN ACL
- * transition. This transition must be for the same struct se_lun
- * + mapped_lun that was setup in demo mode..
- */
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (deve->se_lun_acl != NULL) {
- pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
- " LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- if (deve->se_lun != lun) {
- pr_err("struct se_dev_entry->se_lun does"
- " match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- deve->se_lun_acl = lun_acl;
- trans = 1;
- } else {
- deve->se_lun = lun;
- deve->se_lun_acl = lun_acl;
- deve->mapped_lun = mapped_lun;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+
+ deve = nacl->device_list[mapped_lun];
+
+ /*
+ * Check if the call is handling demo mode -> explicit LUN ACL
+ * transition. This transition must be for the same struct se_lun
+ * + mapped_lun that was setup in demo mode..
+ */
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (deve->se_lun_acl != NULL) {
+ pr_err("struct se_dev_entry->se_lun_acl"
+ " already set for demo mode -> explicit"
+ " LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
}
+ if (deve->se_lun != lun) {
+ pr_err("struct se_dev_entry->se_lun does"
+ " match passed struct se_lun for demo mode"
+ " -> explicit LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
+ }
+ deve->se_lun_acl = lun_acl;
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
@@ -417,27 +342,72 @@ int core_update_device_list_for_node(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
- if (trans) {
- spin_unlock_irq(&nacl->device_list_lock);
- return 0;
- }
- deve->creation_time = get_jiffies_64();
- deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock);
+ return 0;
+ }
- spin_lock_bh(&port->sep_alua_lock);
- list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ deve->se_lun = lun;
+ deve->se_lun_acl = lun_acl;
+ deve->mapped_lun = mapped_lun;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
- return 0;
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
+
+ deve->creation_time = get_jiffies_64();
+ deve->attach_count++;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ return 0;
+}
+
+/* core_disable_device_list_for_node():
+ *
+ *
+ */
+int core_disable_device_list_for_node(
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl,
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ struct se_port *port = lun->lun_sep;
+ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+
+ /*
+ * If the MappedLUN entry is being disabled, the entry in
+ * port->sep_alua_list must be removed now before clearing the
+ * struct se_dev_entry pointers below as logic in
+ * core_alua_do_transition_tg_pt() depends on these being present.
+ *
+ * deve->se_lun_acl will be NULL for demo-mode created LUNs
+ * that have not been explicitly converted to MappedLUNs ->
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
+ */
+ spin_lock_bh(&port->sep_alua_lock);
+ list_del(&deve->alua_port_list);
+ spin_unlock_bh(&port->sep_alua_lock);
/*
* Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
* PR operation to complete.
*/
- spin_unlock_irq(&nacl->device_list_lock);
while (atomic_read(&deve->pr_ref_count) != 0)
cpu_relax();
+
spin_lock_irq(&nacl->device_list_lock);
/*
* Disable struct se_dev_entry LUN ACL mapping
@@ -470,14 +440,14 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
if (lun != deve->se_lun)
continue;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL,
+ core_disable_device_list_for_node(lun, NULL,
deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
- nacl, tpg, 0);
+ nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
@@ -512,7 +482,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
}
again:
/*
- * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+ * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
* Here is the table from spc4r17 section 7.7.3.8.
*
* Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
@@ -529,7 +499,7 @@ again:
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
/*
- * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+ * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
* for 16-bit wrap..
*/
if (port->sep_rtpi == port_tmp->sep_rtpi)
@@ -546,7 +516,6 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -559,7 +528,8 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
pr_err("Unable to allocate t10_alua_tg_pt"
@@ -568,7 +538,7 @@ static void core_export_port(
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
@@ -576,7 +546,7 @@ static void core_export_port(
}
dev->dev_port_count++;
- port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+ port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
}
/*
@@ -606,6 +576,7 @@ int core_dev_export(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port;
port = core_alloc_port(dev);
@@ -613,9 +584,11 @@ int core_dev_export(
return PTR_ERR(port);
lun->lun_se_dev = dev;
- se_dev_start(dev);
- atomic_inc(&dev->dev_export_obj.obj_access_count);
+ spin_lock(&hba->device_lock);
+ dev->export_count++;
+ spin_unlock(&hba->device_lock);
+
core_export_port(dev, tpg, port, lun);
return 0;
}
@@ -625,6 +598,7 @@ void core_dev_unexport(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
@@ -635,278 +609,55 @@ void core_dev_unexport(
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
- atomic_dec(&dev->dev_export_obj.obj_access_count);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
- se_dev_stop(dev);
- lun->lun_se_dev = NULL;
-}
-
-int target_report_luns(struct se_task *se_task)
-{
- struct se_cmd *se_cmd = se_task->task_se_cmd;
- struct se_dev_entry *deve;
- struct se_lun *se_lun;
- struct se_session *se_sess = se_cmd->se_sess;
- unsigned char *buf;
- u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
-
- buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
-
- /*
- * If no struct se_session pointer is present, this struct se_cmd is
- * coming via a target_core_mod PASSTHROUGH op, and not through
- * a $FABRIC_MOD. In that case, report LUN=0 only.
- */
- if (!se_sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
- goto done;
- }
-
- spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &se_sess->se_node_acl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
- se_lun = deve->se_lun;
- /*
- * We determine the correct LUN LIST LENGTH even once we
- * have reached the initial allocation length.
- * See SPC2-R20 7.19.
- */
- lun_count++;
- if ((cdb_offset + 8) >= se_cmd->data_length)
- continue;
-
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
- offset += 8;
- cdb_offset += 8;
- }
- spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
-
- /*
- * See SPC3 r07, page 159.
- */
-done:
- transport_kunmap_data_sg(se_cmd);
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
-
- se_task->task_scsi_status = GOOD;
- transport_complete_task(se_task, 1);
- return 0;
-}
-
-/* se_release_device_for_hba():
- *
- *
- */
-void se_release_device_for_hba(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
- se_dev_stop(dev);
-
- if (dev->dev_ptr) {
- kthread_stop(dev->process_thread);
- if (dev->transport->free_device)
- dev->transport->free_device(dev->dev_ptr);
- }
-
spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
+ dev->export_count--;
spin_unlock(&hba->device_lock);
- core_scsi3_free_all_registrations(dev);
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
+ lun->lun_sep = NULL;
+ lun->lun_se_dev = NULL;
}
-void se_release_vpd_for_dev(struct se_device *dev)
+static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+ spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
+ &dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
-}
-
-/* se_free_virtual_device():
- *
- * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
- */
-int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
-{
- if (!list_empty(&dev->dev_sep_list))
- dump_stack();
-
- core_alua_free_lu_gp_mem(dev);
- se_release_device_for_hba(dev);
-
- return 0;
-}
-
-static void se_dev_start(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_inc(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
- if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
- dev->dev_status &=
- ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
+ spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
-static void se_dev_stop(struct se_device *dev)
+static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_dec(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
- if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
-}
-
-int se_dev_check_online(struct se_device *dev)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dev->dev_status_lock, flags);
- ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irqrestore(&dev->dev_status_lock, flags);
-
- return ret;
-}
-
-int se_dev_check_shutdown(struct se_device *dev)
-{
- int ret;
-
- spin_lock_irq(&dev->dev_status_lock);
- ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
- spin_unlock_irq(&dev->dev_status_lock);
-
- return ret;
-}
-
-u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
-{
- u32 tmp, aligned_max_sectors;
+ u32 aligned_max_sectors;
+ u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
- aligned_max_sectors = (tmp / block_size);
- if (max_sectors != aligned_max_sectors) {
- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
- " to %u\n", max_sectors, aligned_max_sectors);
- return aligned_max_sectors;
- }
+ alignment = max(1ul, PAGE_SIZE / block_size);
+ aligned_max_sectors = rounddown(max_sectors, alignment);
- return max_sectors;
-}
+ if (max_sectors != aligned_max_sectors)
+ pr_info("Rounding down aligned max_sectors from %u to %u\n",
+ max_sectors, aligned_max_sectors);
-void se_dev_set_default_attribs(
- struct se_device *dev,
- struct se_dev_limits *dev_limits)
-{
- struct queue_limits *limits = &dev_limits->limits;
-
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
- dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
- dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
- dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
- /*
- * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
- * iblock_create_virtdevice() from struct queue_limits values
- * if blk_queue_discard()==1
- */
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
- DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
- /*
- * block_size is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
- dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
- /*
- * max_sectors is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
- limits->logical_block_size);
- dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
- /*
- * Set optimal_sectors from max_sectors, which can be lowered via
- * configfs.
- */
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
- /*
- * queue_depth is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
+ return aligned_max_sectors;
}
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
+ dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -914,10 +665,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ dev->dev_attrib.max_unmap_block_desc_count =
max_unmap_block_desc_count;
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
+ dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -925,9 +676,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ dev->dev_attrib.unmap_granularity = unmap_granularity;
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
+ dev, dev->dev_attrib.unmap_granularity);
return 0;
}
@@ -935,9 +686,57 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
+ dev, dev->dev_attrib.unmap_granularity_alignment);
+ return 0;
+}
+
+int se_dev_set_max_write_same_len(
+ struct se_device *dev,
+ u32 max_write_same_len)
+{
+ dev->dev_attrib.max_write_same_len = max_write_same_len;
+ pr_debug("dev[%p]: Set max_write_same_len: %u\n",
+ dev, dev->dev_attrib.max_write_same_len);
+ return 0;
+}
+
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+ const char *configname;
+
+ configname = config_item_name(&dev->dev_group.cg_item);
+ if (strlen(configname) >= 16) {
+ pr_warn("dev[%p]: Backstore name '%s' is too long for "
+ "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+ configname);
+ }
+ snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
+{
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change model alias"
+ " while export_count is %d\n",
+ dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ if (flag) {
+ dev_set_t10_wwn_model_alias(dev);
+ } else {
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ }
+ dev->dev_attrib.emulate_model_alias = flag;
+
return 0;
}
@@ -963,13 +762,14 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (flag && dev->transport->fua_write_emulated == 0) {
- pr_err("fua_write_emulated not supported\n");
+ if (flag &&
+ dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("emulate_fua_write not supported for pSCSI\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
+ dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
@@ -994,13 +794,20 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (flag && dev->transport->write_cache_emulated == 0) {
- pr_err("write_cache_emulated not supported\n");
+ if (flag &&
+ dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("emulate_write_cache not supported for pSCSI\n");
+ return -EINVAL;
+ }
+ if (flag &&
+ dev->transport->get_write_cache) {
+ pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+
+ dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
+ dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
@@ -1011,16 +818,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " UA_INTRLCK_CTRL while dev_export_obj: %d count"
- " exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " UA_INTRLCK_CTRL while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
+ dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1032,15 +838,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ dev->dev_attrib.emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
+ dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1055,12 +861,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ dev->dev_attrib.emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
@@ -1076,26 +882,140 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ dev->dev_attrib.emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
+int se_dev_set_emulate_caw(struct se_device *dev, int flag)
+{
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->dev_attrib.emulate_caw = flag;
+ pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
+ dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
+{
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->dev_attrib.emulate_3pc = flag;
+ pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
+ dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
+{
+ int rc, old_prot = dev->dev_attrib.pi_prot_type;
+
+ if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+ pr_err("Illegal value %d for pi_prot_type\n", flag);
+ return -EINVAL;
+ }
+ if (flag == 2) {
+ pr_err("DIF TYPE2 protection currently not supported\n");
+ return -ENOSYS;
+ }
+ if (dev->dev_attrib.hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return 0;
+ }
+ if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ /* 0 is only allowed value for non-supporting backends */
+ if (flag == 0)
+ return 0;
+
+ pr_err("DIF protection not supported by backend: %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ dev->dev_attrib.pi_prot_type = flag;
+
+ if (flag && !old_prot) {
+ rc = dev->transport->init_prot(dev);
+ if (rc) {
+ dev->dev_attrib.pi_prot_type = old_prot;
+ return rc;
+ }
+
+ } else if (!flag && old_prot) {
+ dev->transport->free_prot(dev);
+ }
+ pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
+{
+ int rc;
+
+ if (!flag)
+ return 0;
+
+ if (flag != 1) {
+ pr_err("Illegal value %d for pi_prot_format\n", flag);
+ return -EINVAL;
+ }
+ if (!dev->transport->format_prot) {
+ pr_err("DIF protection format not supported by backend %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection format requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to format SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ rc = dev->transport->format_prot(dev);
+ if (rc)
+ return rc;
+
+ pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+
+ return 0;
+}
+
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ dev->dev_attrib.enforce_pr_isids = flag;
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
@@ -1105,7 +1025,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
printk(KERN_ERR "Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ dev->dev_attrib.is_nonrot = flag;
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
dev, flag);
return 0;
@@ -1118,7 +1038,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
" reordering not implemented\n", dev);
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ dev->dev_attrib.emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1128,10 +1048,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (!queue_depth) {
@@ -1141,73 +1061,65 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
} else {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
}
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
+ dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
-int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
- int force = 0; /* Force setting for VDEVS */
+ int block_size = dev->dev_attrib.block_size;
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " max_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " fabric_max_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- if (!max_sectors) {
+ if (!fabric_max_sectors) {
pr_err("dev[%p]: Illegal ZERO value for"
- " max_sectors\n", dev);
+ " fabric_max_sectors\n", dev);
return -EINVAL;
}
- if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+ if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
+ " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed max_sectors: %u"
+ if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
- " %u\n", dev, max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ " %u\n", dev, fabric_max_sectors,
+ dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
- if (!force && (max_sectors >
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
- pr_err("dev[%p]: Passed max_sectors: %u"
- " greater than TCM/SE_Device max_sectors"
- ": %u, use force=1 to override.\n", dev,
- max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed max_sectors: %u"
+ if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, max_sectors,
+ " %u\n", dev, fabric_max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
return -EINVAL;
}
@@ -1215,21 +1127,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
- max_sectors = se_dev_align_max_sectors(max_sectors,
- dev->se_sub_dev->se_dev_attrib.block_size);
+ if (!block_size) {
+ block_size = 512;
+ pr_warn("Defaulting to 512 for zero block_size\n");
+ }
+ fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
+ block_size);
- dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
+ dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, max_sectors);
+ dev, fabric_max_sectors);
return 0;
}
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " optimal_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " optimal_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
@@ -1237,14 +1153,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than max_sectors: %u\n", dev,
- optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
+ " greater than fabric_max_sectors: %u\n", dev,
+ optimal_sectors, dev->dev_attrib.fabric_max_sectors);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ dev->dev_attrib.optimal_sectors = optimal_sectors;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
@@ -1252,10 +1168,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
- " while dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
@@ -1276,45 +1192,38 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
+
+ if (dev->dev_attrib.max_bytes_per_io)
+ dev->dev_attrib.hw_max_sectors =
+ dev->dev_attrib.max_bytes_per_io / block_size;
+
return 0;
}
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
- struct se_hba *hba,
struct se_device *dev,
- u32 lun)
+ u32 unpacked_lun)
{
- struct se_lun *lun_p;
- u32 lun_access = 0;
+ struct se_lun *lun;
int rc;
- if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
- atomic_read(&dev->dev_access_obj.obj_access_count));
- return ERR_PTR(-EACCES);
- }
-
- lun_p = core_tpg_pre_addlun(tpg, lun);
- if (IS_ERR(lun_p))
- return lun_p;
-
- if (dev->dev_flags & DF_READ_ONLY)
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
- else
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun = core_tpg_alloc_lun(tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return lun;
- rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
+ rc = core_tpg_add_lun(tpg, lun,
+ TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
return ERR_PTR(rc);
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
@@ -1334,7 +1243,7 @@ struct se_lun *core_dev_add_lun(
spin_unlock_irq(&tpg->acl_node_lock);
}
- return lun_p;
+ return lun;
}
/* core_dev_del_lun():
@@ -1375,7 +1284,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
pr_err("%s Logical Unit Number: %u is not free on"
@@ -1408,7 +1317,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("%s Logical Unit Number: %u is not active on"
@@ -1425,24 +1334,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
+ struct se_node_acl *nacl,
u32 mapped_lun,
- char *initiatorname,
int *ret)
{
struct se_lun_acl *lacl;
- struct se_node_acl *nacl;
- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!nacl) {
- *ret = -EINVAL;
- return NULL;
- }
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
@@ -1453,7 +1356,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
+ nacl->initiatorname);
return lacl;
}
@@ -1486,14 +1390,14 @@ int core_dev_add_initiator_node_lun_acl(
lacl->se_lun = lun;
- if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
- lun_access, nacl, tpg, 1) < 0)
+ if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
+ lun_access, nacl, tpg) < 0)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
atomic_inc(&lun->lun_acl_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock);
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1527,11 +1431,11 @@ int core_dev_del_initiator_node_lun_acl(
spin_lock(&lun->lun_acl_lock);
list_del(&lacl->lacl_list);
atomic_dec(&lun->lun_acl_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock);
- core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
lacl->se_lun = NULL;
@@ -1557,73 +1461,267 @@ void core_dev_free_initiator_node_lun_acl(
kfree(lacl);
}
-int core_dev_setup_virtual_lun0(void)
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+ struct t10_wwn *wwn = &dev->t10_wwn;
+ char buf[17];
+ int i, device_type;
+ /*
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+ */
+ for (i = 0; i < 8; i++)
+ if (wwn->vendor[i] >= 0x20)
+ buf[i] = wwn->vendor[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Vendor: %s\n", buf);
+
+ for (i = 0; i < 16; i++)
+ if (wwn->model[i] >= 0x20)
+ buf[i] = wwn->model[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Model: %s\n", buf);
+
+ for (i = 0; i < 4; i++)
+ if (wwn->revision[i] >= 0x20)
+ buf[i] = wwn->revision[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Revision: %s\n", buf);
+
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+}
+
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
- struct se_hba *hba;
struct se_device *dev;
- struct se_subsystem_dev *se_dev = NULL;
- struct se_subsystem_api *t;
- char buf[16];
+ struct se_lun *xcopy_lun;
+
+ dev = hba->transport->alloc_device(hba, name);
+ if (!dev)
+ return NULL;
+
+ dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ dev->se_hba = hba;
+ dev->transport = hba->transport;
+ dev->prot_length = sizeof(struct se_dif_v1_tuple);
+
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
+ INIT_LIST_HEAD(&dev->state_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
+ INIT_LIST_HEAD(&dev->g_dev_node);
+ spin_lock_init(&dev->execute_task_lock);
+ spin_lock_init(&dev->delayed_cmd_lock);
+ spin_lock_init(&dev->dev_reservation_lock);
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
+ sema_init(&dev->caw_sem, 1);
+ atomic_set(&dev->dev_ordered_id, 0);
+ INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&dev->t10_pr.registration_lock);
+ spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+ spin_lock_init(&dev->t10_alua.lba_map_lock);
+
+ dev->t10_wwn.t10_dev = dev;
+ dev->t10_alua.t10_dev = dev;
+
+ dev->dev_attrib.da_dev = dev;
+ dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
+ dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
+ dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
+ dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+ dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->dev_attrib.unmap_granularity_alignment =
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+ dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+
+ xcopy_lun = &dev->xcopy_lun;
+ xcopy_lun->lun_se_dev = dev;
+ init_completion(&xcopy_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
+ spin_lock_init(&xcopy_lun->lun_acl_lock);
+ spin_lock_init(&xcopy_lun->lun_sep_lock);
+ init_completion(&xcopy_lun->lun_ref_comp);
+
+ return dev;
+}
+
+int target_configure_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
int ret;
- hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
- if (IS_ERR(hba))
- return PTR_ERR(hba);
+ if (dev->dev_flags & DF_CONFIGURED) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
+ " object\n");
+ return -EEXIST;
+ }
- lun0_hba = hba;
- t = hba->transport;
+ ret = dev->transport->configure_device(dev);
+ if (ret)
+ goto out;
+ dev->dev_flags |= DF_CONFIGURED;
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
- ret = -ENOMEM;
+ /*
+ * XXX: there is not much point to have two different values here..
+ */
+ dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
+ dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
+
+ /*
+ * Align max_hw_sectors down to PAGE_SIZE I/O transfers
+ */
+ dev->dev_attrib.hw_max_sectors =
+ se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size);
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+
+ ret = core_setup_alua(dev);
+ if (ret)
goto out;
- }
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
- se_dev->se_dev_hba = hba;
-
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
+
+ /*
+ * Startup the struct se_device processing thread
+ */
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ dev->transport->name);
+ if (!dev->tmr_wq) {
+ pr_err("Unable to create tmr workqueue for %s\n",
+ dev->transport->name);
ret = -ENOMEM;
- goto out;
+ goto out_free_alua;
}
- lun0_su_dev = se_dev;
- memset(buf, 0, 16);
- sprintf(buf, "rd_pages=8");
- t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto out;
+ /*
+ * Preload the initial INQUIRY const values if we are doing
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+ * passthrough because this is being provided by the backend LLD.
+ */
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ strncpy(&dev->t10_wwn.revision[0],
+ dev->transport->inquiry_rev, 4);
}
- se_dev->se_dev_ptr = dev;
- g_lun0_dev = dev;
+
+ scsi_dump_inquiry(dev);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count++;
+ spin_unlock(&hba->device_lock);
+
+ mutex_lock(&g_device_mutex);
+ list_add_tail(&dev->g_dev_node, &g_device_list);
+ mutex_unlock(&g_device_mutex);
return 0;
+
+out_free_alua:
+ core_alua_free_lu_gp_mem(dev);
out:
- lun0_su_dev = NULL;
- kfree(se_dev);
- if (lun0_hba) {
- core_delete_hba(lun0_hba);
- lun0_hba = NULL;
+ se_release_vpd_for_dev(dev);
+ return ret;
+}
+
+void target_free_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ WARN_ON(!list_empty(&dev->dev_sep_list));
+
+ if (dev->dev_flags & DF_CONFIGURED) {
+ destroy_workqueue(dev->tmr_wq);
+
+ mutex_lock(&g_device_mutex);
+ list_del(&dev->g_dev_node);
+ mutex_unlock(&g_device_mutex);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
}
+
+ core_alua_free_lu_gp_mem(dev);
+ core_alua_set_lba_map(dev, NULL, 0, 0);
+ core_scsi3_free_all_registrations(dev);
+ se_release_vpd_for_dev(dev);
+
+ if (dev->transport->free_prot)
+ dev->transport->free_prot(dev);
+
+ dev->transport->free_device(dev);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+ struct se_hba *hba;
+ struct se_device *dev;
+ char buf[] = "rd_pages=8,rd_nullio=1";
+ int ret;
+
+ hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
+ if (IS_ERR(hba))
+ return PTR_ERR(hba);
+
+ dev = target_alloc_device(hba, "virt_lun0");
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_free_hba;
+ }
+
+ hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
+
+ ret = target_configure_device(dev);
+ if (ret)
+ goto out_free_se_dev;
+
+ lun0_hba = hba;
+ g_lun0_dev = dev;
+ return 0;
+
+out_free_se_dev:
+ target_free_device(dev);
+out_free_hba:
+ core_delete_hba(hba);
return ret;
}
@@ -1631,14 +1729,11 @@ out:
void core_dev_release_virtual_lun0(void)
{
struct se_hba *hba = lun0_hba;
- struct se_subsystem_dev *su_dev = lun0_su_dev;
if (!hba)
return;
if (g_lun0_dev)
- se_free_virtual_device(g_lun0_dev, hba);
-
- kfree(su_dev);
+ target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 9a2ce11e1a6..7de9f0475d0 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * Copyright (c) 2010,2011 Rising Tide Systems
- * Copyright (c) 2010,2011 Linux-iSCSI.org
+ * (c) Copyright 2010-2013 Datera, Inc.
*
- * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,7 +21,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -72,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
+
+ if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+ pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+ " %p to struct lun: %p\n", lun_ci, lun);
+ return -EFAULT;
+ }
/*
* Ensure that the source port exists
*/
@@ -108,7 +112,7 @@ static int target_fabric_mappedlun_link(
* tpg_1/attrib/demo_mode_write_protect=1
*/
spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
- deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+ deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
lun_access = deve->lun_flags;
else
@@ -137,7 +141,7 @@ static int target_fabric_mappedlun_unlink(
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+ struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
struct se_portal_group *se_tpg;
/*
* Determine if the underlying MappedLUN has already been released..
@@ -168,7 +172,7 @@ static ssize_t target_fabric_mappedlun_show_write_protect(
ssize_t len;
spin_lock_irq(&se_nacl->device_list_lock);
- deve = &se_nacl->device_list[lacl->mapped_lun];
+ deve = se_nacl->device_list[lacl->mapped_lun];
len = sprintf(page, "%d\n",
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
1 : 0);
@@ -185,9 +189,11 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long op;
+ int ret;
- if (strict_strtoul(page, 0, &op))
- return -EINVAL;
+ ret = kstrtoul(page, 0, &op);
+ if (ret)
+ return ret;
if ((op != 1) && (op != 0))
return -EINVAL;
@@ -346,20 +352,31 @@ static struct config_group *target_fabric_make_mappedlun(
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
- if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+ ret = kstrtoul(buf + 4, 0, &mapped_lun);
+ if (ret)
+ goto out;
+ if (mapped_lun > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n", mapped_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
ret = -EINVAL;
goto out;
}
- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
- config_item_name(acl_ci), &ret);
+ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
+ mapped_lun, &ret);
if (!lacl) {
ret = -EINVAL;
goto out;
}
lacl_cg = &lacl->se_lun_group;
- lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n");
@@ -368,14 +385,14 @@ static struct config_group *target_fabric_make_mappedlun(
}
config_group_init_type_name(&lacl->se_lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
@@ -487,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
nacl_cg->default_groups[4] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
"fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
return &se_nacl->acl_group;
}
@@ -578,7 +595,7 @@ static struct config_group *target_fabric_make_np(
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
@@ -735,17 +752,26 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
- struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(se_dev_ci), struct se_subsystem_dev,
- se_dev_group);
+ struct se_device *dev =
+ container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
struct target_fabric_configfs *tf;
int ret;
+ if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+ pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+ " %p to struct se_device: %p\n", se_dev_ci, dev);
+ return -EFAULT;
+ }
+
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("se_device not configured yet, cannot port link\n");
+ return -ENODEV;
+ }
+
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
@@ -756,16 +782,7 @@ static int target_fabric_port_link(
return -EEXIST;
}
- dev = se_dev->se_dev_ptr;
- if (!dev) {
- pr_err("Unable to locate struct se_device pointer from"
- " %s\n", config_item_name(se_dev_ci));
- ret = -ENODEV;
- goto out;
- }
-
- lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
- lun->unpacked_lun);
+ lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
ret = PTR_ERR(lun_p);
@@ -863,7 +880,10 @@ static struct config_group *target_fabric_make_lun(
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
- if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+ errno = kstrtoul(name + 4, 0, &unpacked_lun);
+ if (errno)
+ return ERR_PTR(errno);
+ if (unpacked_lun > UINT_MAX)
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
@@ -871,7 +891,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
- lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n");
@@ -879,14 +899,14 @@ static struct config_group *target_fabric_make_lun(
}
config_group_init_type_name(&lun->lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_port_cit);
config_group_init_type_name(&lun->port_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
port_stat_grp = &lun->port_stat_grps.stat_group;
- port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
pr_err("Unable to allocate port_stat_grp->default_groups\n");
@@ -953,6 +973,19 @@ TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_attrib_cit */
+/* Start of tfc_tpg_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_auth, se_portal_group, tpg_auth_group);
+
+static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {
+ .show_attribute = target_fabric_tpg_auth_attr_show,
+ .store_attribute = target_fabric_tpg_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
/* Start of tfc_tpg_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
@@ -1018,21 +1051,24 @@ static struct config_group *target_fabric_make_tpg(
se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
- se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
- se_tpg->tpg_group.default_groups[5] = NULL;
+ se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
+ se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
+ se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
- &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
- &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
+ config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
+ &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_param_cit);
return &se_tpg->tpg_group;
}
@@ -1119,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
wwn->wwn_group.default_groups[1] = NULL;
config_group_init_type_name(&wwn->wwn_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_cit);
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
return &wwn->wwn_group;
}
@@ -1190,6 +1226,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
target_fabric_setup_tpg_attrib_cit(tf);
+ target_fabric_setup_tpg_auth_cit(tf);
target_fabric_setup_tpg_param_cit(tf);
target_fabric_setup_tpg_nacl_cit(tf);
target_fabric_setup_tpg_nacl_base_cit(tf);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 283a36e464e..0d1cf8b4f49 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
- * Copyright (c) 2010 Rising Tide Systems, Inc.
- * Copyright (c) 2010 Linux-iSCSI.org
+ * (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -338,7 +337,7 @@ u32 iscsi_get_pr_transport_id_len(
* 00b: iSCSI Initiator device TransportID format
*/
if (pr_reg->isid_present_at_reg) {
- len += 5; /* For ",i,0x" ASCII seperator */
+ len += 5; /* For ",i,0x" ASCII separator */
len += 7; /* For iSCSI Initiator Session ID + Null terminator */
*format_code = 1;
} else
@@ -415,20 +414,20 @@ char *iscsi_parse_pr_out_transport_id(
*out_tid_len = (add_len + 4);
}
/*
- * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+ * Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
* format.
*/
if (format_code == 0x40) {
p = strstr(&buf[4], ",i,0x");
if (!p) {
- pr_err("Unable to locate \",i,0x\" seperator"
+ pr_err("Unable to locate \",i,0x\" separator"
" for Initiator port identifier: %s\n",
&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
- p += 5; /* Skip over ",i,0x" seperator */
+ p += 5; /* Skip over ",i,0x" separator */
*port_nexus_ptr = p;
/*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7ed58e2df79..7d6cddaec52 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,10 +3,7 @@
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
- * Copyright (c) 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2005-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -33,15 +30,20 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/falloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_file.h"
-static struct se_subsystem_api fileio_template;
+static inline struct fd_dev *FD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct fd_dev, dev);
+}
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -64,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+ hba->hba_id, fd_host->fd_host_id);
return 0;
}
@@ -82,7 +83,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr;
@@ -97,68 +98,48 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
- return fd_dev;
+ return &fd_dev->dev;
}
-/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static struct se_device *fd_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int fd_configure_device(struct se_device *dev)
{
- char *dev_p = NULL;
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct queue_limits *limits;
- struct fd_dev *fd_dev = p;
- struct fd_host *fd_host = hba->hba_ptr;
- mm_segment_t old_fs;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct file *file;
struct inode *inode = NULL;
- int dev_flags = 0, flags, ret = -EINVAL;
+ int flags, ret = -EINVAL;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
+ }
- old_fs = get_fs();
- set_fs(get_ds());
- dev_p = getname(fd_dev->fd_dev_name);
- set_fs(old_fs);
+ /*
+ * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
- if (IS_ERR(dev_p)) {
- pr_err("getname(%s) failed: %lu\n",
- fd_dev->fd_dev_name, IS_ERR(dev_p));
- ret = PTR_ERR(dev_p);
- goto fail;
- }
-#if 0
- if (di->no_create_file)
- flags = O_RDWR | O_LARGEFILE;
- else
- flags = O_RDWR | O_CREAT | O_LARGEFILE;
-#else
- flags = O_RDWR | O_CREAT | O_LARGEFILE;
-#endif
-/* flags |= O_DIRECT; */
/*
- * If fd_buffered_io=1 has not been set explicitly (the default),
- * use O_SYNC to force FILEIO writes to disk.
+ * Optionally allow fd_buffered_io=1 to be enabled for people
+ * who want use the fs buffer cache as an WriteCache mechanism.
+ *
+ * This means that in event of a hard failure, there is a risk
+ * of silent data-loss if the SCSI client has *not* performed a
+ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
+ * to write-out the entire device cache.
*/
- if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
- flags |= O_SYNC;
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
+ flags &= ~O_DSYNC;
+ }
- file = filp_open(dev_p, flags, 0600);
+ file = filp_open(fd_dev->fd_dev_name, flags, 0600);
if (IS_ERR(file)) {
- pr_err("filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
ret = PTR_ERR(file);
goto fail;
}
- if (!file || !file->f_dentry) {
- pr_err("filp_open(%s) failed\n", dev_p);
- goto fail;
- }
fd_dev->fd_file = file;
/*
* If using a block backend with this struct file, we extract
@@ -168,29 +149,48 @@ static struct se_device *fd_create_virtdevice(
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q;
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(inode->i_bdev);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
- limits->max_hw_sectors = queue_max_hw_sectors(q);
- limits->max_sectors = queue_max_sectors(q);
+ struct request_queue *q = bdev_get_queue(inode->i_bdev);
+ unsigned long long dev_size;
+
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
- fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
- fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+ dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
- fd_dev->fd_dev_size,
- div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+ dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
+ /*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+ if (blk_queue_discard(q)) {
+ dev->dev_attrib.max_unmap_lba_count =
+ q->limits.max_discard_sectors;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity =
+ q->limits.discard_granularity >> 9;
+ dev->dev_attrib.unmap_granularity_alignment =
+ q->limits.discard_alignment;
+ pr_debug("IFILE: BLOCK Discard support available,"
+ " disabled by default\n");
+ }
+ /*
+ * Enable write same emulation for IBLOCK and use 0xFFFF as
+ * the smaller WRITE_SAME(10) only has a two-byte block count.
+ */
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
+
+ if (blk_queue_nonrot(q))
+ dev->dev_attrib.is_nonrot = 1;
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
pr_err("FILEIO: Missing fd_dev_size="
@@ -199,21 +199,35 @@ static struct se_device *fd_create_virtdevice(
goto fail;
}
- limits = &dev_limits.limits;
- limits->logical_block_size = FD_BLOCKSIZE;
- limits->max_hw_sectors = FD_MAX_SECTORS;
- limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
+ /*
+ * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
+ */
+ dev->dev_attrib.max_unmap_lba_count = 0x2000;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity = 1;
+ dev->dev_attrib.unmap_granularity_alignment = 0;
+
+ /*
+ * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
+ * based upon struct iovec limit for vfs_writev()
+ */
+ dev->dev_attrib.max_write_same_len = 0x1000;
}
- dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+ dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+ dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
+ dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
- dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, fd_dev,
- &dev_limits, "FILEIO", FD_VERSION);
- if (!dev)
- goto fail;
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
+ " with FDBD_HAS_BUFFERED_IO_WCE\n");
+ dev->dev_attrib.emulate_write_cache = 1;
+ }
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
@@ -222,24 +236,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
- putname(dev_p);
- return dev;
+ return 0;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
- putname(dev_p);
- return ERR_PTR(ret);
+ return ret;
}
-/* fd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_device(void *p)
+static void fd_free_device(struct se_device *dev)
{
- struct fd_dev *fd_dev = p;
+ struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@@ -249,123 +257,144 @@ static void fd_free_device(void *p)
kfree(fd_dev);
}
-static inline struct fd_request *FILE_REQ(struct se_task *task)
+static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ int is_write)
{
- return container_of(task, struct fd_request, fd_task);
-}
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+ struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+ u32 prot_size, len, size;
+ int rc, ret = 1, i;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+ fd_prot->prot_buf = vzalloc(prot_size);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+ fd_prot->prot_sg_nents = cmd->t_prot_nents;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+ fd_prot->prot_sg_nents, GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+ vfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+ size = prot_size;
+ for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
-static struct se_task *
-fd_alloc_task(unsigned char *cdb)
-{
- struct fd_request *fd_req;
+ len = min_t(u32, PAGE_SIZE, size);
+ sg_set_buf(sg, buf, len);
+ size -= len;
+ buf += len;
+ }
+ }
- fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
- if (!fd_req) {
- pr_err("Unable to allocate struct fd_request\n");
- return NULL;
+ if (is_write) {
+ rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
+ if (rc < 0 || prot_size != rc) {
+ pr_err("kernel_write() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ } else {
+ rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
+ if (rc < 0) {
+ pr_err("kernel_read() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ }
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+ vfree(fd_prot->prot_buf);
}
- return &fd_req->fd_task;
+ return ret;
}
-static int fd_do_readv(struct se_task *task)
+static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_nents, int is_write)
{
- struct fd_request *req = FILE_REQ(task);
- struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
- struct scatterlist *sg = task->task_sg;
+ struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
+ loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
- for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
+
+ if (is_write)
+ ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ else
+ ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
set_fs(old_fs);
+ for_each_sg(sgl, sg, sgl_nents, i)
+ kunmap(sg_page(sg));
+
kfree(iov);
- /*
- * Return zeros and GOOD status even if the READ did not return
- * the expected virt_size for struct file w/o a backing struct
- * block_device.
- */
- if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
- if (ret < 0 || ret != task->task_size) {
- pr_err("vfs_readv() returned %d,"
- " expecting %d for S_ISBLK\n", ret,
- (int)task->task_size);
+
+ if (is_write) {
+ if (ret < 0 || ret != cmd->data_length) {
+ pr_err("%s() write returned %d\n", __func__, ret);
return (ret < 0 ? ret : -EINVAL);
}
} else {
- if (ret < 0) {
- pr_err("vfs_readv() returned %d for non"
- " S_ISBLK\n", ret);
- return ret;
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+ * the expected virt_size for struct file w/o a backing struct
+ * block_device.
+ */
+ if (S_ISBLK(file_inode(fd)->i_mode)) {
+ if (ret < 0 || ret != cmd->data_length) {
+ pr_err("%s() returned %d, expecting %u for "
+ "S_ISBLK\n", __func__, ret,
+ cmd->data_length);
+ return (ret < 0 ? ret : -EINVAL);
+ }
+ } else {
+ if (ret < 0) {
+ pr_err("%s() returned %d for non S_ISBLK\n",
+ __func__, ret);
+ return ret;
+ }
}
}
-
- return 1;
-}
-
-static int fd_do_writev(struct se_task *task)
-{
- struct fd_request *req = FILE_REQ(task);
- struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
- struct file *fd = dev->fd_file;
- struct scatterlist *sg = task->task_sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- loff_t pos = (task->task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
- int ret, i = 0;
-
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
- if (!iov) {
- pr_err("Unable to allocate fd_do_writev iov[]\n");
- return -ENOMEM;
- }
-
- for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
- }
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
- set_fs(old_fs);
-
- kfree(iov);
-
- if (ret < 0 || ret != task->task_size) {
- pr_err("vfs_writev() returned %d\n", ret);
- return (ret < 0 ? ret : -EINVAL);
- }
-
return 1;
}
-static void fd_emulate_sync_cache(struct se_task *task)
+static sense_reason_t
+fd_execute_sync_cache(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -375,7 +404,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
- transport_complete_sync_cache(cmd, 1);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
/*
* Determine if we will be flushing the entire device.
@@ -384,7 +413,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -395,79 +424,283 @@ static void fd_emulate_sync_cache(struct se_task *task)
if (ret != 0)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
- if (!immed)
- transport_complete_sync_cache(cmd, ret == 0);
+ if (immed)
+ return 0;
+
+ if (ret)
+ target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+ else
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+
+ return 0;
}
-/*
- * WRITE Force Unit Access (FUA) emulation on a per struct se_task
- * LBA range basis..
- */
-static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+static unsigned char *
+fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
+ unsigned int len)
{
- struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
- loff_t end = start + task->task_size;
+ struct se_device *se_dev = cmd->se_dev;
+ unsigned int block_size = se_dev->dev_attrib.block_size;
+ unsigned int i = 0, end;
+ unsigned char *buf, *p, *kmap_buf;
+
+ buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate fd_execute_write_same buf\n");
+ return NULL;
+ }
+
+ kmap_buf = kmap(sg_page(sg)) + sg->offset;
+ if (!kmap_buf) {
+ pr_err("kmap() failed in fd_setup_write_same\n");
+ kfree(buf);
+ return NULL;
+ }
+ /*
+ * Fill local *buf to contain multiple WRITE_SAME blocks up to
+ * min(len, PAGE_SIZE)
+ */
+ p = buf;
+ end = min_t(unsigned int, len, PAGE_SIZE);
+
+ while (i < end) {
+ memcpy(p, kmap_buf, block_size);
+
+ i += block_size;
+ p += block_size;
+ }
+ kunmap(sg_page(sg));
+
+ return buf;
+}
+
+static sense_reason_t
+fd_execute_write_same(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *f = fd_dev->fd_file;
+ struct scatterlist *sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+ unsigned int len, len_tmp, iov_num;
+ int i, rc;
+ unsigned char *buf;
+
+ if (!nolb) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ len = len_tmp = nolb * se_dev->dev_attrib.block_size;
+ iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ buf = fd_setup_write_same_buf(cmd, sg, len);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ iov = vzalloc(sizeof(struct iovec) * iov_num);
+ if (!iov) {
+ pr_err("Unable to allocate fd_execute_write_same iovecs\n");
+ kfree(buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * Map the single fabric received scatterlist block now populated
+ * in *buf into each iovec for I/O submission.
+ */
+ for (i = 0; i < iov_num; i++) {
+ iov[i].iov_base = buf;
+ iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
+ len_tmp -= iov[i].iov_len;
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = vfs_writev(f, &iov[0], iov_num, &pos);
+ set_fs(old_fs);
+
+ vfree(iov);
+ kfree(buf);
+
+ if (rc < 0 || rc != len) {
+ pr_err("vfs_writev() returned %d for write same\n", rc);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
+static sense_reason_t
+fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+{
+ struct file *file = priv;
+ struct inode *inode = file->f_mapping->host;
int ret;
- pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
- task->task_lba, task->task_size);
+ if (S_ISBLK(inode->i_mode)) {
+ /* The backend is block device, use discard */
+ struct block_device *bdev = inode->i_bdev;
+
+ ret = blkdev_issue_discard(bdev, lba,
+ nolb, GFP_KERNEL, 0);
+ if (ret < 0) {
+ pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
+ ret);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ } else {
+ /* The backend is normal file, use fallocate */
+ struct se_device *se_dev = cmd->se_dev;
+ loff_t pos = lba * se_dev->dev_attrib.block_size;
+ unsigned int len = nolb * se_dev->dev_attrib.block_size;
+ int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+
+ if (!file->f_op->fallocate)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
- if (ret != 0)
- pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ ret = file->f_op->fallocate(file, mode, pos, len);
+ if (ret < 0) {
+ pr_warn("FILEIO: fallocate() failed: %d\n", ret);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static sense_reason_t
+fd_execute_write_same_unmap(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *file = fd_dev->fd_file;
+ sector_t lba = cmd->t_task_lba;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ int ret;
+
+ if (!nolb) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
+ ret = fd_do_unmap(cmd, file, lba, nolb);
+ if (ret)
+ return ret;
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static sense_reason_t
+fd_execute_unmap(struct se_cmd *cmd)
+{
+ struct file *file = FD_DEV(cmd->se_dev)->fd_file;
+
+ return sbc_execute_unmap(cmd, fd_do_unmap, file);
}
-static int fd_do_task(struct se_task *task)
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
+ struct fd_prot fd_prot;
+ sense_reason_t rc;
int ret = 0;
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
- if (task->task_data_direction == DMA_FROM_DEVICE) {
- ret = fd_do_readv(task);
+ if (data_direction == DMA_FROM_DEVICE) {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+
+ if (ret > 0 && cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ }
} else {
- ret = fd_do_writev(task);
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
+ /*
+ * Perform implicit vfs_fsync_range() for fd_do_writev() ops
+ * for SCSI WRITEs with Forced Unit Access (FUA) set.
+ * Allow this to happen independent of WCE=0 setting.
+ */
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- /*
- * We might need to be a bit smarter here
- * and return some sense data to let the initiator
- * know the FUA WRITE cache sync failed..?
- */
- fd_emulate_write_fua(cmd, task);
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ loff_t start = cmd->t_task_lba *
+ dev->dev_attrib.block_size;
+ loff_t end = start + cmd->data_length;
+
+ vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
+ if (ret > 0 && cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
}
if (ret < 0) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return ret;
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
- return 0;
-}
-
-/* fd_free_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_task(struct se_task *task)
-{
- struct fd_request *req = FILE_REQ(task);
- kfree(req);
+ if (ret)
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
}
enum {
@@ -481,12 +714,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t fd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -504,14 +735,11 @@ static ssize_t fd_set_configfs_dev_params(
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
+ if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
+ FD_MAX_DEV_NAME) == 0) {
+ ret = -EINVAL;
break;
}
- snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
- "%s", arg_p);
- kfree(arg_p);
pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
@@ -522,10 +750,10 @@ static ssize_t fd_set_configfs_dev_params(
ret = -ENOMEM;
break;
}
- ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
+ pr_err("kstrtoull() failed for"
" fd_dev_size=\n");
goto out;
}
@@ -544,7 +772,7 @@ static ssize_t fd_set_configfs_dev_params(
pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
- fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
break;
default:
break;
@@ -556,82 +784,168 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
-{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
-
- if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- pr_err("Missing fd_dev_name=\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t fd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
+static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
- (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
- "Buffered" : "Synchronous");
+ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
+ "Buffered-WCE" : "O_DSYNC");
return bl;
}
-/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_rev(struct se_device *dev)
+static sector_t fd_get_blocks(struct se_device *dev)
{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *f = fd_dev->fd_file;
+ struct inode *i = f->f_mapping->host;
+ unsigned long long dev_size;
+ /*
+ * When using a file that references an underlying struct block_device,
+ * ensure dev_size is always based on the current inode size in order
+ * to handle underlying block_device resize operations.
+ */
+ if (S_ISBLK(i->i_mode))
+ dev_size = i_size_read(i);
+ else
+ dev_size = fd_dev->fd_dev_size;
+
+ return div_u64(dev_size - dev->dev_attrib.block_size,
+ dev->dev_attrib.block_size);
}
-/* fd_get_device_type(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_type(struct se_device *dev)
+static int fd_init_prot(struct se_device *dev)
{
- return TYPE_DISK;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_file, *file = fd_dev->fd_file;
+ struct inode *inode;
+ int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+ char buf[FD_MAX_DEV_PROT_NAME];
+
+ if (!file) {
+ pr_err("Unable to locate fd_dev->fd_file\n");
+ return -ENODEV;
+ }
+
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ pr_err("FILEIO Protection emulation only supported on"
+ " !S_ISBLK\n");
+ return -ENOSYS;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+ flags &= ~O_DSYNC;
+
+ snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+ fd_dev->fd_dev_name);
+
+ prot_file = filp_open(buf, flags, 0600);
+ if (IS_ERR(prot_file)) {
+ pr_err("filp_open(%s) failed\n", buf);
+ ret = PTR_ERR(prot_file);
+ return ret;
+ }
+ fd_dev->fd_prot_file = prot_file;
+
+ return 0;
}
-static sector_t fd_get_blocks(struct se_device *dev)
+static int fd_format_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ unsigned char *buf;
+ loff_t pos = 0;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+ int rc, ret = 0, size, len;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
+ }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+ prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+ size = prot_length;
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+ (unsigned long long)prot_length);
+
+ memset(buf, 0xff, unit_size);
+ for (prot = 0; prot < prot_length; prot += unit_size) {
+ len = min(unit_size, size);
+ rc = kernel_write(prot_fd, buf, len, pos);
+ if (rc != len) {
+ pr_err("vfs_write to prot file failed: %d\n", rc);
+ ret = -ENODEV;
+ goto out;
+ }
+ pos += len;
+ size -= len;
+ }
+
+out:
+ vfree(buf);
+ return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
{
- struct fd_dev *fd_dev = dev->dev_ptr;
- unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
- dev->se_sub_dev->se_dev_attrib.block_size);
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ if (!fd_dev->fd_prot_file)
+ return;
+
+ filp_close(fd_dev->fd_prot_file, NULL);
+ fd_dev->fd_prot_file = NULL;
+}
+
+static struct sbc_ops fd_sbc_ops = {
+ .execute_rw = fd_execute_rw,
+ .execute_sync_cache = fd_execute_sync_cache,
+ .execute_write_same = fd_execute_write_same,
+ .execute_write_same_unmap = fd_execute_write_same_unmap,
+ .execute_unmap = fd_execute_unmap,
+};
- return blocks_long;
+static sense_reason_t
+fd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
+ .inquiry_prod = "FILEIO",
+ .inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .write_cache_emulated = 1,
- .fua_write_emulated = 1,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
- .allocate_virtdevice = fd_allocate_virtdevice,
- .create_virtdevice = fd_create_virtdevice,
+ .alloc_device = fd_alloc_device,
+ .configure_device = fd_configure_device,
.free_device = fd_free_device,
- .alloc_task = fd_alloc_task,
- .do_task = fd_do_task,
- .do_sync_cache = fd_emulate_sync_cache,
- .free_task = fd_free_task,
- .check_configfs_dev_params = fd_check_configfs_dev_params,
+ .parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
- .get_device_rev = fd_get_device_rev,
- .get_device_type = fd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
+ .init_prot = fd_init_prot,
+ .format_prot = fd_format_prot,
+ .free_prot = fd_free_prot,
};
static int __init fileio_module_init(void)
@@ -639,7 +953,7 @@ static int __init fileio_module_init(void)
return transport_subsystem_register(&fileio_template);
}
-static void fileio_module_exit(void)
+static void __exit fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 59e6e73106c..182cbb29503 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,23 +4,32 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
+#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 1024
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
-struct fd_request {
- struct se_task fd_task;
-};
-
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
-#define FDBD_USE_BUFFERED_IO 0x04
+#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_FORMAT_UNIT_SIZE 2048
+
+struct fd_prot {
+ unsigned char *prot_buf;
+ struct scatterlist *prot_sg;
+ u32 prot_sg_nents;
+};
struct fd_dev {
+ struct se_device dev;
+
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
@@ -31,6 +40,7 @@ struct fd_dev {
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
+ struct file *fd_prot_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 3dd1bd4b6f7..a25051a37dd 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -3,10 +3,7 @@
*
* This file contains the TCM HBA Transport related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
@@ -152,8 +148,7 @@ out_free_hba:
int
core_delete_hba(struct se_hba *hba)
{
- if (!list_empty(&hba->hba_dev_list))
- dump_stack();
+ WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8572eae62da..7e6b857c6b3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -40,15 +37,23 @@
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
-static struct se_subsystem_api iblock_template;
+#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
+#define IBLOCK_BIO_POOL_SIZE 128
-static void iblock_bio_done(struct bio *, int);
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct iblock_dev, dev);
+}
+
+
+static struct se_subsystem_api iblock_template;
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -56,117 +61,70 @@ static void iblock_bio_done(struct bio *, int);
*/
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
- struct iblock_hba *ib_host;
-
- ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
- if (!ib_host) {
- pr_err("Unable to allocate memory for"
- " struct iblock_hba\n");
- return -ENOMEM;
- }
-
- ib_host->iblock_host_id = host_id;
-
- hba->hba_ptr = ib_host;
-
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
-
- pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
- hba->hba_id, ib_host->iblock_host_id);
-
return 0;
}
static void iblock_detach_hba(struct se_hba *hba)
{
- struct iblock_hba *ib_host = hba->hba_ptr;
-
- pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
- " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
-
- kfree(ib_host);
- hba->hba_ptr = NULL;
}
-static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
- struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!ib_dev) {
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
- ib_dev->ibd_host = ib_host;
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
- return ib_dev;
+ return &ib_dev->dev;
}
-static struct se_device *iblock_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int iblock_configure_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct block_device *bd = NULL;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct queue_limits *limits;
- u32 dev_flags = 0;
- int ret = -EINVAL;
+ struct block_device *bd = NULL;
+ struct blk_integrity *bi;
+ fmode_t mode;
+ int ret = -ENOMEM;
- if (!ib_dev) {
- pr_err("Unable to locate struct iblock_dev parameter\n");
- return ERR_PTR(ret);
+ if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
- /*
- * These settings need to be made tunable..
- */
- ib_dev->ibd_bio_set = bioset_create(32, 0);
+
+ ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) {
- pr_err("IBLOCK: Unable to create bioset()\n");
- return ERR_PTR(-ENOMEM);
+ pr_err("IBLOCK: Unable to create bioset\n");
+ goto out;
}
- pr_debug("IBLOCK: Created bio_set()\n");
- /*
- * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
- * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
- */
+
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+ mode = FMODE_READ|FMODE_EXCL;
+ if (!ib_dev->ibd_readonly)
+ mode |= FMODE_WRITE;
+
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- goto failed;
+ goto out_free_bioset;
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(bd);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(bd);
- limits->max_hw_sectors = queue_max_hw_sectors(q);
- limits->max_sectors = queue_max_sectors(q);
- dev_limits.hw_queue_depth = q->nr_requests;
- dev_limits.queue_depth = q->nr_requests;
-
ib_dev->ibd_bd = bd;
- dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, ib_dev,
- &dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!dev)
- goto failed;
+ q = bdev_get_queue(bd);
+
+ dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
* Check if the underlying struct block_device request_queue supports
@@ -174,64 +132,81 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+ dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
+
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+ dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ /*
+ * Enable write same emulation for IBLOCK and use 0xFFFF as
+ * the smaller WRITE_SAME(10) only has a two-byte block count.
+ */
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q))
- dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
+ dev->dev_attrib.is_nonrot = 1;
+
+ bi = bdev_get_integrity(bd);
+ if (bi) {
+ struct bio_set *bs = ib_dev->ibd_bio_set;
+
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
+ !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+ pr_err("IBLOCK export of blk_integrity: %s not"
+ " supported\n", bi->name);
+ ret = -ENOSYS;
+ goto out_blkdev_put;
+ }
- return dev;
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+ } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+ }
-failed:
- if (ib_dev->ibd_bio_set) {
- bioset_free(ib_dev->ibd_bio_set);
- ib_dev->ibd_bio_set = NULL;
+ if (dev->dev_attrib.pi_prot_type) {
+ if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+ pr_err("Unable to allocate bioset for PI\n");
+ ret = -ENOMEM;
+ goto out_blkdev_put;
+ }
+ pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+ bs->bio_integrity_pool);
+ }
+ dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
}
- ib_dev->ibd_bd = NULL;
- return ERR_PTR(ret);
+
+ return 0;
+
+out_blkdev_put:
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+out_free_bioset:
+ bioset_free(ib_dev->ibd_bio_set);
+ ib_dev->ibd_bio_set = NULL;
+out:
+ return ret;
}
-static void iblock_free_device(void *p)
+static void iblock_free_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
- kfree(ib_dev);
-}
-static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
-{
- return container_of(task, struct iblock_req, ib_task);
-}
-
-static struct se_task *
-iblock_alloc_task(unsigned char *cdb)
-{
- struct iblock_req *ib_req;
-
- ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
- if (!ib_req) {
- pr_err("Unable to allocate memory for struct iblock_req\n");
- return NULL;
- }
-
- atomic_set(&ib_req->ib_bio_cnt, 0);
- return &ib_req->ib_task;
+ kfree(ib_dev);
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -243,12 +218,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+ if (block_size == dev->dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -262,7 +237,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -277,7 +252,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -292,7 +267,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -313,6 +288,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr = cmd->priv;
+ u8 status;
+
+ if (!atomic_dec_and_test(&ibr->pending))
+ return;
+
+ if (atomic_read(&ibr->ib_bio_err_cnt))
+ status = SAM_STAT_CHECK_CONDITION;
+ else
+ status = SAM_STAT_GOOD;
+
+ target_complete_cmd(cmd, status);
+ kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+ struct se_cmd *cmd = bio->bi_private;
+ struct iblock_req *ibr = cmd->priv;
+
+ /*
+ * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+ */
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+ err = -EIO;
+
+ if (err != 0) {
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+ " err: %d\n", bio, err);
+ /*
+ * Bump the ib_bio_err_cnt and release bio.
+ */
+ atomic_inc(&ibr->ib_bio_err_cnt);
+ smp_mb__after_atomic();
+ }
+
+ bio_put(bio);
+
+ iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+ struct bio *bio;
+
+ /*
+ * Only allocate as many vector entries as the bio code allows us to,
+ * we'll loop later on until we have handled the whole request.
+ */
+ if (sg_num > BIO_MAX_PAGES)
+ sg_num = BIO_MAX_PAGES;
+
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
+ return NULL;
+ }
+
+ bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_private = cmd;
+ bio->bi_end_io = &iblock_bio_done;
+ bio->bi_iter.bi_sector = lba;
+
+ return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+ struct blk_plug plug;
+ struct bio *bio;
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(list)))
+ submit_bio(rw, bio);
+ blk_finish_plug(&plug);
+}
+
static void iblock_end_io_flush(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
@@ -320,8 +376,13 @@ static void iblock_end_io_flush(struct bio *bio, int err)
if (err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
- if (cmd)
- transport_complete_sync_cache(cmd, err == 0);
+ if (cmd) {
+ if (err)
+ target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+ else
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ }
+
bio_put(bio);
}
@@ -329,10 +390,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
-static void iblock_emulate_sync_cache(struct se_task *task)
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
@@ -341,7 +402,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
* for this SYNCHRONIZE_CACHE op.
*/
if (immed)
- transport_complete_sync_cache(cmd, 1);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
@@ -349,40 +410,131 @@ static void iblock_emulate_sync_cache(struct se_task *task)
if (!immed)
bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio);
+ return 0;
+}
+
+static sense_reason_t
+iblock_do_unmap(struct se_cmd *cmd, void *priv,
+ sector_t lba, sector_t nolb)
+{
+ struct block_device *bdev = priv;
+ int ret;
+
+ ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+ if (ret < 0) {
+ pr_err("blkdev_issue_discard() failed: %d\n", ret);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ return 0;
}
-static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd)
{
- struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
- int barrier = 0;
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
- return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+ return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
}
-static void iblock_free_task(struct se_task *task)
+static sense_reason_t
+iblock_execute_write_same_unmap(struct se_cmd *cmd)
{
- kfree(IBLOCK_REQ(task));
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ sector_t lba = cmd->t_task_lba;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ int ret;
+
+ ret = iblock_do_unmap(cmd, bdev, lba, nolb);
+ if (ret)
+ return ret;
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr;
+ struct scatterlist *sg;
+ struct bio *bio;
+ struct bio_list list;
+ sector_t block_lba = cmd->t_task_lba;
+ sector_t sectors = sbc_get_write_same_sectors(cmd);
+
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!ibr)
+ goto fail;
+ cmd->priv = ibr;
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_free_ibr;
+
+ bio_list_init(&list);
+ bio_list_add(&list, bio);
+
+ atomic_set(&ibr->pending, 1);
+
+ while (sectors) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_put_bios;
+
+ atomic_inc(&ibr->pending);
+ bio_list_add(&list, bio);
+ }
+
+ /* Always in 512 byte units for Linux/Block */
+ block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ sectors -= 1;
+ }
+
+ iblock_submit_bios(&list, WRITE);
+ return 0;
+
+fail_put_bios:
+ while ((bio = bio_list_pop(&list)))
+ bio_put(bio);
+fail_free_ibr:
+ kfree(ibr);
+fail:
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
enum {
- Opt_udev_path, Opt_force, Opt_err
+ Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
+ {Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
-static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
+ unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -403,17 +555,30 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
ret = -EEXIST;
goto out;
}
+ if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
+ SE_UDEV_PATH_LEN) == 0) {
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("IBLOCK: Referencing UDEV path: %s\n",
+ ib_dev->ibd_udev_path);
+ ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+ break;
+ case Opt_readonly:
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
- snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
- "%s", arg_p);
+ ret = kstrtoul(arg_p, 0, &tmp_readonly);
kfree(arg_p);
- pr_debug("IBLOCK: Referencing UDEV path: %s\n",
- ib_dev->ibd_udev_path);
- ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for"
+ " readonly=\n");
+ goto out;
+ }
+ ib_dev->ibd_readonly = tmp_readonly;
+ pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
break;
case Opt_force:
break;
@@ -427,44 +592,26 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t iblock_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-
- if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- pr_err("Missing udev_path= parameters for IBLOCK\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t iblock_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
- struct block_device *bd = ibd->ibd_bd;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
- if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
- bl += sprintf(b + bl, " UDEV PATH: %s\n",
- ibd->ibd_udev_path);
- } else
- bl += sprintf(b + bl, "\n");
+ if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
+ bl += sprintf(b + bl, " UDEV PATH: %s",
+ ib_dev->ibd_udev_path);
+ bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == ibd) ?
+ "" : (bd->bd_holder == ib_dev) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -473,107 +620,128 @@ static ssize_t iblock_show_configfs_dev_params(
return bl;
}
-static void iblock_bio_destructor(struct bio *bio)
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
{
- struct se_task *task = bio->bi_private;
- struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+ struct se_device *dev = cmd->se_dev;
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct scatterlist *sg;
+ int i, rc;
- bio_free(bio, ib_dev->ibd_bio_set);
-}
+ bi = bdev_get_integrity(ib_dev->ibd_bd);
+ if (!bi) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
-static struct bio *
-iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
-{
- struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
- struct iblock_req *ib_req = IBLOCK_REQ(task);
- struct bio *bio;
+ bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ if (!bip) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return -ENOMEM;
+ }
- /*
- * Only allocate as many vector entries as the bio code allows us to,
- * we'll loop later on until we have handled the whole request.
- */
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
+ bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+ dev->prot_length;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!bio) {
- pr_err("Unable to allocate memory for bio\n");
- return NULL;
- }
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
- pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
- " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
- pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+ for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
- bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = task;
- bio->bi_destructor = iblock_bio_destructor;
- bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
- atomic_inc(&ib_req->ib_bio_cnt);
+ rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+ sg->offset);
+ if (rc != sg->length) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ return -ENOMEM;
+ }
- pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
- pr_debug("Set ib_req->ib_bio_cnt: %d\n",
- atomic_read(&ib_req->ib_bio_cnt));
- return bio;
+ pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+ sg_page(sg), sg->length, sg->offset);
+ }
+
+ return 0;
}
-static int iblock_do_task(struct se_task *task)
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- struct bio *bio;
+ struct iblock_req *ibr;
+ struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
- u32 i, sg_num = task->task_sg_nents;
+ u32 sg_num = sgl_nents;
sector_t block_lba;
- struct blk_plug plug;
- int rw;
+ unsigned bio_cnt;
+ int rw = 0;
+ int i;
- if (task->task_data_direction == DMA_TO_DEVICE) {
+ if (data_direction == DMA_TO_DEVICE) {
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force data to disk if we pretend to not have a volatile
- * write cache, or the initiator set the Force Unit Access bit.
+ * Force writethrough using WRITE_FUA if a volatile write cache
+ * is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- (cmd->se_cmd_flags & SCF_FUA)))
- rw = WRITE_FUA;
- else
+ if (q->flush_flags & REQ_FUA) {
+ if (cmd->se_cmd_flags & SCF_FUA)
+ rw = WRITE_FUA;
+ else if (!(q->flush_flags & REQ_FLUSH))
+ rw = WRITE_FUA;
+ else
+ rw = WRITE;
+ } else {
rw = WRITE;
+ }
} else {
rw = READ;
}
/*
- * Do starting conversion up from non 512-byte blocksize with
- * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+ * Convert the blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
*/
- if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
- block_lba = (task->task_lba << 3);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
- block_lba = (task->task_lba << 2);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
- block_lba = (task->task_lba << 1);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
- block_lba = task->task_lba;
+ if (dev->dev_attrib.block_size == 4096)
+ block_lba = (cmd->t_task_lba << 3);
+ else if (dev->dev_attrib.block_size == 2048)
+ block_lba = (cmd->t_task_lba << 2);
+ else if (dev->dev_attrib.block_size == 1024)
+ block_lba = (cmd->t_task_lba << 1);
+ else if (dev->dev_attrib.block_size == 512)
+ block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOSYS;
+ " %u\n", dev->dev_attrib.block_size);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!ibr)
+ goto fail;
+ cmd->priv = ibr;
+
+ if (!sgl_nents) {
+ atomic_set(&ibr->pending, 1);
+ iblock_complete_cmd(cmd);
+ return 0;
}
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+ if (!bio)
+ goto fail_free_ibr;
+
+ bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
- for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ atomic_set(&ibr->pending, 2);
+ bio_cnt = 1;
+
+ for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
* length of the S/G list entry this will cause and
@@ -581,10 +749,18 @@ static int iblock_do_task(struct se_task *task)
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(task, block_lba, sg_num);
+ if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
+ iblock_submit_bios(&list, rw);
+ bio_cnt = 0;
+ }
+
+ bio = iblock_get_bio(cmd, block_lba, sg_num);
if (!bio)
- goto fail;
+ goto fail_put_bios;
+
+ atomic_inc(&ibr->pending);
bio_list_add(&list, bio);
+ bio_cnt++;
}
/* Always in 512 byte units for Linux/Block */
@@ -592,94 +768,117 @@ static int iblock_do_task(struct se_task *task)
sg_num--;
}
- blk_start_plug(&plug);
- while ((bio = bio_list_pop(&list)))
- submit_bio(rw, bio);
- blk_finish_plug(&plug);
+ if (cmd->prot_type) {
+ int rc = iblock_alloc_bip(cmd, bio_start);
+ if (rc)
+ goto fail_put_bios;
+ }
+ iblock_submit_bios(&list, rw);
+ iblock_complete_cmd(cmd);
return 0;
-fail:
+fail_put_bios:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+fail_free_ibr:
+ kfree(ibr);
+fail:
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
-static u32 iblock_get_device_rev(struct se_device *dev)
+static sector_t iblock_get_blocks(struct se_device *dev)
{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
+
+ return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
-static u32 iblock_get_device_type(struct se_device *dev)
+static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
- return TYPE_DISK;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int ret;
+
+ ret = bdev_alignment_offset(bd);
+ if (ret == -1)
+ return 0;
+
+ /* convert offset-bytes to offset-lbas */
+ return ret / bdev_logical_block_size(bd);
}
-static sector_t iblock_get_blocks(struct se_device *dev)
+static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
- struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
- struct request_queue *q = bdev_get_queue(bd);
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
- return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+ return ilog2(logs_per_phys);
}
-static void iblock_bio_done(struct bio *bio, int err)
+static unsigned int iblock_get_io_min(struct se_device *dev)
{
- struct se_task *task = bio->bi_private;
- struct iblock_req *ibr = IBLOCK_REQ(task);
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
- /*
- * Set -EIO if !BIO_UPTODATE and the passed is still err=0
- */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
- err = -EIO;
+ return bdev_io_min(bd);
+}
- if (err != 0) {
- pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
- " err: %d\n", bio, err);
- /*
- * Bump the ib_bio_err_cnt and release bio.
- */
- atomic_inc(&ibr->ib_bio_err_cnt);
- smp_mb__after_atomic_inc();
- }
+static unsigned int iblock_get_io_opt(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
- bio_put(bio);
+ return bdev_io_opt(bd);
+}
- if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
- return;
+static struct sbc_ops iblock_sbc_ops = {
+ .execute_rw = iblock_execute_rw,
+ .execute_sync_cache = iblock_execute_sync_cache,
+ .execute_write_same = iblock_execute_write_same,
+ .execute_write_same_unmap = iblock_execute_write_same_unmap,
+ .execute_unmap = iblock_execute_unmap,
+};
+
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+}
- pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba,
- (unsigned long long)bio->bi_sector, err);
+static bool iblock_get_write_cache(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
- transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
+ return q->flush_flags & REQ_FLUSH;
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
+ .inquiry_prod = "IBLOCK",
+ .inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .write_cache_emulated = 1,
- .fua_write_emulated = 1,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
- .allocate_virtdevice = iblock_allocate_virtdevice,
- .create_virtdevice = iblock_create_virtdevice,
+ .alloc_device = iblock_alloc_device,
+ .configure_device = iblock_configure_device,
.free_device = iblock_free_device,
- .alloc_task = iblock_alloc_task,
- .do_task = iblock_do_task,
- .do_discard = iblock_do_discard,
- .do_sync_cache = iblock_emulate_sync_cache,
- .free_task = iblock_free_task,
- .check_configfs_dev_params = iblock_check_configfs_dev_params,
+ .parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
- .get_device_rev = iblock_get_device_rev,
- .get_device_type = iblock_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
+ .get_lbppbe = iblock_get_lbppbe,
+ .get_io_min = iblock_get_io_min,
+ .get_io_opt = iblock_get_io_opt,
+ .get_write_cache = iblock_get_write_cache,
};
static int __init iblock_module_init(void)
@@ -687,7 +886,7 @@ static int __init iblock_module_init(void)
return transport_subsystem_register(&iblock_template);
}
-static void iblock_module_exit(void)
+static void __exit iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 5cf1860c10d..01c2afd8150 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -7,23 +7,19 @@
#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
- struct se_task ib_task;
- atomic_t ib_bio_cnt;
+ atomic_t pending;
atomic_t ib_bio_err_cnt;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev {
+ struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
- struct iblock_hba *ibd_host;
-} ____cacheline_aligned;
-
-struct iblock_hba {
- int iblock_host_id;
+ bool ibd_readonly;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 45001364788..de9cab708f4 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,42 +4,27 @@
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
-/* target_core_cdb.c */
-int target_emulate_inquiry(struct se_task *task);
-int target_emulate_readcapacity(struct se_task *task);
-int target_emulate_readcapacity_16(struct se_task *task);
-int target_emulate_modesense(struct se_task *task);
-int target_emulate_request_sense(struct se_task *task);
-int target_emulate_unmap(struct se_task *task);
-int target_emulate_write_same(struct se_task *task);
-int target_emulate_synchronize_cache(struct se_task *task);
-int target_emulate_noop(struct se_task *task);
-
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
-void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
-int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
+int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
-int target_report_luns(struct se_task *);
-void se_release_device_for_hba(struct se_device *);
-void se_release_vpd_for_dev(struct se_device *);
-int se_free_virtual_device(struct se_device *, struct se_hba *);
-int se_dev_check_online(struct se_device *);
-int se_dev_check_shutdown(struct se_device *);
-void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_max_write_same_len(struct se_device *, u32);
+int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -48,19 +33,23 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_emulate_caw(struct se_device *, int);
+int se_dev_set_emulate_3pc(struct se_device *, int);
+int se_dev_set_pi_prot_type(struct se_device *, int);
+int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
- struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
- u32, char *, int *);
+ struct se_node_acl *, u32, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
@@ -69,12 +58,17 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
+int target_configure_device(struct se_device *dev);
+void target_free_device(struct se_device *);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
int core_delete_hba(struct se_hba *);
/* target_core_tmr.c */
+void core_tmr_abort_task(struct se_device *, struct se_tmr_req *,
+ struct se_session *);
int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
@@ -83,13 +77,11 @@ extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
-struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
- unsigned char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
- u32, void *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+ u32, struct se_device *);
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
@@ -101,8 +93,6 @@ void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
-void __transport_remove_task_from_execute_queue(struct se_task *,
- struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -111,12 +101,14 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool target_stop_task(struct se_task *task, unsigned long *flags);
-int transport_clear_lun_from_sessions(struct se_lun *);
+bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
+int transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
+sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b7c779389ee..df357862286 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4,8 +4,7 @@
* This file contains SPC-3 compliant persistent reservations and
* legacy SPC-2 reservations with compatible reservation handling (CRH=1)
*
- * Copyright (c) 2009, 2010 Rising Tide Systems
- * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -28,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <asm/unaligned.h>
@@ -53,82 +53,72 @@ struct pr_transport_id_holder {
struct list_head dest_list;
};
-int core_pr_dump_initiator_port(
+void core_pr_dump_initiator_port(
struct t10_pr_registration *pr_reg,
char *buf,
u32 size)
{
if (!pr_reg->isid_present_at_reg)
- return 0;
+ buf[0] = '\0';
- snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
- return 1;
+ snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
+enum register_type {
+ REGISTER,
+ REGISTER_AND_IGNORE_EXISTING_KEY,
+ REGISTER_AND_MOVE,
+};
+
+enum preempt_type {
+ PREEMPT,
+ PREEMPT_AND_ABORT,
+};
+
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int);
-static int core_scsi2_reservation_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+static sense_reason_t
+target_scsi2_reservation_check(struct se_cmd *cmd)
{
- switch (cdb[0]) {
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case RELEASE:
case RELEASE_10:
return 0;
default:
- return 1;
+ break;
}
- return 1;
-}
-
-static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
+ if (!dev->dev_reserved_node_acl || !sess)
return 0;
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- if (dev->dev_reserved_node_acl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
+ if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ return TCM_RESERVATION_CONFLICT;
+
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+ if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+ return TCM_RESERVATION_CONFLICT;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
- spin_unlock(&dev->dev_reservation_lock);
- return ret;
+ return 0;
}
static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
struct se_node_acl *, struct se_session *);
static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
-static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
+static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
- int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
int conflict = 0;
- if (!crh)
- return false;
-
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
if (pr_reg) {
@@ -155,16 +145,14 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
*/
if (pr_reg->pr_res_holder) {
core_scsi3_put_pr_reg(pr_reg);
- *ret = 0;
- return false;
+ return 1;
}
if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
core_scsi3_put_pr_reg(pr_reg);
- *ret = 0;
- return true;
+ return 1;
}
core_scsi3_put_pr_reg(pr_reg);
conflict = 1;
@@ -188,27 +176,28 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return true;
+ return -EBUSY;
}
- return false;
+ return 0;
}
-int target_scsi2_reservation_release(struct se_task *task)
+sense_reason_t
+target_scsi2_reservation_release(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
- struct se_portal_group *tpg = sess->se_tpg;
- int ret = 0;
+ struct se_portal_group *tpg;
+ int rc;
- if (!sess || !tpg)
+ if (!sess || !sess->se_tpg)
goto out;
- if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ rc = target_check_scsi2_reservation_conflict(cmd);
+ if (rc == 1)
goto out;
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
- ret = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_reserved_node_acl || !sess)
goto out_unlock;
@@ -216,12 +205,16 @@ int target_scsi2_reservation_release(struct se_task *task)
if (dev->dev_reserved_node_acl != sess->se_node_acl)
goto out_unlock;
+ if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+ goto out_unlock;
+
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
- if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
dev->dev_res_bin_isid = 0;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
}
+ tpg = sess->se_tpg;
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
" MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
@@ -230,39 +223,39 @@ int target_scsi2_reservation_release(struct se_task *task)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
- return ret;
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
-int target_scsi2_reservation_reserve(struct se_task *task)
+sense_reason_t
+target_scsi2_reservation_reserve(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
- struct se_portal_group *tpg = sess->se_tpg;
- int ret = 0;
+ struct se_portal_group *tpg;
+ sense_reason_t ret = 0;
+ int rc;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = -EINVAL;
- goto out;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
- if (!sess || !tpg)
+ if (!sess || !sess->se_tpg)
goto out;
- if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ rc = target_check_scsi2_reservation_conflict(cmd);
+ if (rc == 1)
goto out;
- ret = 0;
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
+
+ tpg = sess->se_tpg;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
@@ -275,16 +268,15 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out_unlock;
}
dev->dev_reserved_node_acl = sess->se_node_acl;
- dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
- dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -294,10 +286,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
return ret;
}
@@ -310,9 +300,9 @@ out:
*/
static int core_scsi3_pr_seq_non_holder(
struct se_cmd *cmd,
- unsigned char *cdb,
u32 pr_reg_type)
{
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_dev_entry *se_deve;
struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
@@ -321,17 +311,11 @@ static int core_scsi3_pr_seq_non_holder(
int we = 0; /* Write Exclusive */
int legacy = 0; /* Act like a legacy device and return
* RESERVATION CONFLICT on some CDBs */
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_seq_non_holder(cmd,
- cdb, pr_reg_type);
- se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Determine if the registration should be ignored due to
- * non-matching ISIDs in core_scsi3_pr_reservation_check().
+ * non-matching ISIDs in target_scsi3_pr_reservation_check().
*/
ignore_reg = (pr_reg_type & 0x80000000);
if (ignore_reg)
@@ -490,18 +474,17 @@ static int core_scsi3_pr_seq_non_holder(
* statement.
*/
if (!ret && !other_cdb) {
-#if 0
- pr_debug("Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explicit CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
-#endif
+
return ret;
}
/*
* Check if write exclusive initiator ports *NOT* holding the
* WRITE_EXCLUSIVE_* reservation.
*/
- if ((we) && !(registered_nexus)) {
+ if (we && !registered_nexus) {
if (cmd->data_direction == DMA_TO_DEVICE) {
/*
* Conflict for write exclusive
@@ -522,14 +505,14 @@ static int core_scsi3_pr_seq_non_holder(
* as we expect registered non-reservation holding
* nexuses to issue CDBs.
*/
-#if 0
+
if (!registered_nexus) {
- pr_debug("Allowing implict CDB: 0x%02x"
+ pr_debug("Allowing implicit CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
}
-#endif
+
return 0;
}
} else if ((reg_only) || (all_reg)) {
@@ -538,11 +521,11 @@ static int core_scsi3_pr_seq_non_holder(
* For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
* allow commands from registered nexuses.
*/
-#if 0
- pr_debug("Allowing implict CDB: 0x%02x for %s"
+
+ pr_debug("Allowing implicit CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
-#endif
+
return 0;
}
}
@@ -555,10 +538,41 @@ static int core_scsi3_pr_seq_non_holder(
return 1; /* Conflict by default */
}
+static sense_reason_t
+target_scsi3_pr_reservation_check(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ u32 pr_reg_type;
+
+ if (!dev->dev_pr_res_holder)
+ return 0;
+
+ pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+ cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+ if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl)
+ goto check_nonholder;
+
+ if (dev->dev_pr_res_holder->isid_present_at_reg) {
+ if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
+ sess->sess_bin_isid) {
+ pr_reg_type |= 0x80000000;
+ goto check_nonholder;
+ }
+ }
+
+ return 0;
+
+check_nonholder:
+ if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+ return TCM_RESERVATION_CONFLICT;
+ return 0;
+}
+
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
+
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
* counter mainted by the device server.
@@ -569,56 +583,12 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = su_dev->t10_pr.pr_generation++;
+ prg = dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
}
-static int core_scsi3_pr_reservation_check(
- struct se_cmd *cmd,
- u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
- return 0;
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_check(cmd, pr_reg_type);
-
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_pr_res_holder) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
- cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
- if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!dev->dev_pr_res_holder->isid_present_at_reg) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -EINVAL;
- /*
- * Use bit in *pr_reg_type to notify ISID mismatch in
- * core_scsi3_pr_seq_non_holder().
- */
- if (ret != 0)
- *pr_reg_type |= 0x80000000;
- spin_unlock(&dev->dev_reservation_lock);
-
- return ret;
-}
-
static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
@@ -628,7 +598,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
@@ -637,14 +606,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
- GFP_ATOMIC);
- if (!pr_reg->pr_aptpl_buf) {
- pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
- kmem_cache_free(t10_pr_reg_cache, pr_reg);
- return NULL;
- }
-
INIT_LIST_HEAD(&pr_reg->pr_reg_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
@@ -714,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
atomic_inc(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
spin_lock_bh(&port->sep_alua_lock);
@@ -722,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
alua_port_list) {
/*
* This pointer will be NULL for demo mode MappedLUNs
- * that have not been make explict via a ConfigFS
+ * that have not been make explicit via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
if (!deve_tmp->se_lun_acl)
@@ -749,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
continue;
atomic_inc(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_bh(&port->sep_alua_lock);
/*
* Grab a configfs group dependency that is released
@@ -762,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
goto out;
}
/*
@@ -779,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_lunacl_undepend_item(deve_tmp);
goto out;
}
@@ -794,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
spin_lock(&dev->se_port_lock);
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&dev->se_port_lock);
@@ -835,7 +796,6 @@ int core_scsi3_alloc_aptpl_registration(
pr_err("Unable to allocate struct t10_pr_registration\n");
return -ENOMEM;
}
- pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
INIT_LIST_HEAD(&pr_reg->pr_reg_list);
INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
@@ -889,11 +849,9 @@ static void core_scsi3_aptpl_reserve(
struct t10_pr_registration *pr_reg)
{
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
dev->dev_pr_res_holder = pr_reg;
@@ -906,11 +864,11 @@ static void core_scsi3_aptpl_reserve(
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
- (prf_isid) ? &i_buf[0] : "");
+ i_buf);
}
static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
- struct t10_pr_registration *, int, int);
+ struct t10_pr_registration *, enum register_type, int);
static int __core_scsi3_check_aptpl_registration(
struct se_device *dev,
@@ -921,7 +879,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -988,11 +946,10 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
- struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+ struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1004,21 +961,19 @@ static void __core_scsi3_dump_registration(
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
- int register_type)
+ enum register_type register_type)
{
struct se_portal_group *se_tpg = nacl->se_tpg;
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
- " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
- "_AND_MOVE" : (register_type == 1) ?
+ " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ?
+ "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
- (prf_isid) ? i_buf : "");
+ i_buf);
pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
@@ -1040,13 +995,12 @@ static void __core_scsi3_add_registration(
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
- int register_type,
+ enum register_type register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1058,7 +1012,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- su_dev->t10_pr.pr_generation++ :
+ dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1107,7 +1061,7 @@ static int core_scsi3_alloc_registration(
u64 sa_res_key,
int all_tg_pt,
int aptpl,
- int register_type,
+ enum register_type register_type,
int register_move)
{
struct t10_pr_registration *pr_reg;
@@ -1127,7 +1081,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1152,11 +1106,11 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* for fabric modules (iSCSI) requiring them.
*/
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
+ if (dev->dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
@@ -1171,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
continue;
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
@@ -1201,10 +1155,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
{
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
-static int core_scsi3_check_implict_release(
+static int core_scsi3_check_implicit_release(
struct se_device *dev,
struct t10_pr_registration *pr_reg)
{
@@ -1220,7 +1174,7 @@ static int core_scsi3_check_implict_release(
}
if (pr_res_holder == pr_reg) {
/*
- * Perform an implict RELEASE if the registration that
+ * Perform an implicit RELEASE if the registration that
* is being released is holding the reservation.
*
* From spc4r17, section 5.7.11.1:
@@ -1238,7 +1192,7 @@ static int core_scsi3_check_implict_release(
* For 'All Registrants' reservation types, all existing
* registrations are still processed as reservation holders
* in core_scsi3_pr_seq_non_holder() after the initial
- * reservation holder is implictly released here.
+ * reservation holder is implicitly released here.
*/
} else if (pr_reg->pr_reg_all_tg_pt &&
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -1266,13 +1220,11 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_reg->pr_reg_deve->def_pr_registered = 0;
pr_reg->pr_reg_deve->pr_res_key = 0;
@@ -1300,7 +1252,7 @@ static void __core_scsi3_free_registration(
pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
- (prf_isid) ? &i_buf[0] : "");
+ i_buf);
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
@@ -1312,7 +1264,6 @@ static void __core_scsi3_free_registration(
if (!preempt_and_abort_list) {
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
- kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return;
}
@@ -1327,7 +1278,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1357,7 +1308,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1381,7 +1332,6 @@ void core_scsi3_free_all_registrations(
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
list_del(&pr_reg->pr_reg_aptpl_list);
- kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
}
spin_unlock(&pr_tmpl->aptpl_reg_lock);
@@ -1399,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
&tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1419,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
if (nacl->dynamic_node_acl) {
atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return;
}
@@ -1427,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
&nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1458,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
*/
if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return;
}
nacl = lun_acl->se_lun_nacl;
@@ -1468,10 +1418,11 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
&lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
-static int core_scsi3_decode_spec_i_port(
+static sense_reason_t
+core_scsi3_decode_spec_i_port(
struct se_cmd *cmd,
struct se_portal_group *tpg,
unsigned char *l_isid,
@@ -1487,20 +1438,20 @@ static int core_scsi3_decode_spec_i_port(
struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct list_head tid_dest_list;
+ LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ sense_reason_t ret;
u32 tpdl, tid_len = 0;
- int ret, dest_local_nexus, prf_isid;
+ int dest_local_nexus;
u32 dest_rtpi = 0;
memset(dest_iport, 0, 64);
- INIT_LIST_HEAD(&tid_dest_list);
- local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Allocate a struct pr_transport_id_holder and setup the
* local_node_acl and local_se_deve pointers and add to
@@ -1510,8 +1461,7 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1523,8 +1473,7 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1535,7 +1484,19 @@ static int core_scsi3_decode_spec_i_port(
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+ if (cmd->data_length < 28) {
+ pr_warn("SPC-PR: Received PR OUT parameter list"
+ " length too small: %u\n", cmd->data_length);
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1550,9 +1511,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Start processing the received transport IDs using the
@@ -1592,22 +1552,19 @@ static int core_scsi3_decode_spec_i_port(
continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(tmp_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ smp_mb__after_atomic();
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
/*
- * Locate the desination initiator ACL to be registered
+ * Locate the destination initiator ACL to be registered
* from the decoded fabric module specific TransportID
* at *i_str.
*/
@@ -1616,7 +1573,7 @@ static int core_scsi3_decode_spec_i_port(
tmp_tpg, i_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
spin_unlock_irq(&tmp_tpg->acl_node_lock);
@@ -1626,17 +1583,14 @@ static int core_scsi3_decode_spec_i_port(
continue;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_tpg_undepend_item(tmp_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
dest_tpg = tmp_tpg;
@@ -1653,24 +1607,22 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
-#if 0
+
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
-#endif
+
if (tid_len > tpdl) {
pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Locate the desintation struct se_dev_entry pointer for matching
@@ -1687,30 +1639,26 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
-#if 0
+
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %u\n",
dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
-#endif
+
/*
* Skip any TransportIDs that already have a registration for
* this target port.
@@ -1739,10 +1687,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -ENOMEM;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = dest_tpg;
@@ -1773,9 +1719,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
@@ -1812,8 +1757,7 @@ static int core_scsi3_decode_spec_i_port(
kfree(tidh);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(dest_pr_reg, i_buf, PR_REG_ISID_ID_LEN);
__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
dest_pr_reg, 0, 0);
@@ -1821,8 +1765,7 @@ static int core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
" %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
- dest_node_acl->initiatorname, (prf_isid) ?
- &i_buf[0] : "", dest_se_deve->mapped_lun);
+ dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun);
if (dest_local_nexus)
continue;
@@ -1833,8 +1776,9 @@ static int core_scsi3_decode_spec_i_port(
}
return 0;
-out:
+out_unmap:
transport_kunmap_data_sg(cmd);
+out:
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1860,7 +1804,6 @@ out:
kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
}
- kfree(dest_pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
if (dest_local_nexus)
@@ -1873,37 +1816,25 @@ out:
return ret;
}
-/*
- * Called with struct se_device->dev_reservation_lock held
- */
-static int __core_scsi3_update_aptpl_buf(
+static int core_scsi3_update_aptpl_buf(
struct se_device *dev,
unsigned char *buf,
- u32 pr_aptpl_buf_len,
- int clear_aptpl_metadata)
+ u32 pr_aptpl_buf_len)
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
int reg_count = 0;
+ int ret = 0;
- memset(buf, 0, pr_aptpl_buf_len);
- /*
- * Called to clear metadata once APTPL has been deactivated.
- */
- if (clear_aptpl_metadata) {
- snprintf(buf, pr_aptpl_buf_len,
- "No Registrations or Reservations\n");
- return 0;
- }
+ spin_lock(&dev->dev_reservation_lock);
+ spin_lock(&dev->t10_pr.registration_lock);
/*
* Walk the registration list..
*/
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1948,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1966,53 +1897,34 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
- return 0;
-}
-
-static int core_scsi3_update_aptpl_buf(
- struct se_device *dev,
- unsigned char *buf,
- u32 pr_aptpl_buf_len,
- int clear_aptpl_metadata)
-{
- int ret;
-
- spin_lock(&dev->dev_reservation_lock);
- ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
- clear_aptpl_metadata);
+out:
+ spin_unlock(&dev->t10_pr.registration_lock);
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
-/*
- * Called with struct se_device->aptpl_file_mutex held
- */
static int __core_scsi3_write_aptpl_to_file(
struct se_device *dev,
- unsigned char *buf,
- u32 pr_aptpl_buf_len)
+ unsigned char *buf)
{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+ struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
- struct iovec iov[1];
- mm_segment_t old_fs;
int flags = O_RDWR | O_CREAT | O_TRUNC;
char path[512];
+ u32 pr_aptpl_buf_len;
int ret;
- memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
if (strlen(&wwn->unit_serial[0]) >= 512) {
@@ -2023,101 +1935,88 @@ static int __core_scsi3_write_aptpl_to_file(
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
+ if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+ return PTR_ERR(file);
}
- iov[0].iov_base = &buf[0];
- if (!pr_aptpl_buf_len)
- iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
- else
- iov[0].iov_len = pr_aptpl_buf_len;
+ pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
+ ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
- if (ret < 0) {
+ if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
+ fput(file);
- return 0;
+ return (ret < 0) ? -EIO : 0;
}
-static int core_scsi3_update_and_write_aptpl(
- struct se_device *dev,
- unsigned char *in_buf,
- u32 in_pr_aptpl_buf_len)
+/*
+ * Clear the APTPL metadata if APTPL has been disabled, otherwise
+ * write out the updated metadata to struct file for this SCSI device.
+ */
+static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
{
- unsigned char null_buf[64], *buf;
- u32 pr_aptpl_buf_len;
- int ret, clear_aptpl_metadata = 0;
- /*
- * Can be called with a NULL pointer from PROUT service action CLEAR
- */
- if (!in_buf) {
- memset(null_buf, 0, 64);
- buf = &null_buf[0];
- /*
- * This will clear the APTPL metadata to:
- * "No Registrations or Reservations" status
- */
- pr_aptpl_buf_len = 64;
- clear_aptpl_metadata = 1;
- } else {
- buf = in_buf;
- pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+ unsigned char *buf;
+ int rc;
+
+ if (!aptpl) {
+ char *null_buf = "No Registrations or Reservations\n";
+
+ rc = __core_scsi3_write_aptpl_to_file(dev, null_buf);
+ dev->t10_pr.pr_aptpl_active = 0;
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated\n");
+
+ if (rc)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ return 0;
}
- ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
- clear_aptpl_metadata);
- if (ret != 0)
- return ret;
- /*
- * __core_scsi3_write_aptpl_to_file() will call strlen()
- * on the passed buf to determine pr_aptpl_buf_len.
- */
- ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
- if (ret != 0)
- return ret;
+ buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return TCM_OUT_OF_RESOURCES;
- return ret;
+ rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+ if (rc < 0) {
+ kfree(buf);
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ rc = __core_scsi3_write_aptpl_to_file(dev, buf);
+ if (rc != 0) {
+ pr_err("SPC-3 PR: Could not update APTPL\n");
+ kfree(buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ dev->t10_pr.pr_aptpl_active = 1;
+ kfree(buf);
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
+ return 0;
}
-static int core_scsi3_emulate_pro_register(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int all_tg_pt,
- int spec_i_pt,
- int ignore_key)
+static sense_reason_t
+core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ bool aptpl, bool all_tg_pt, bool spec_i_pt, enum register_type register_type)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
- struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
- /* Used for APTPL metadata w/ UNREGISTER */
- unsigned char *pr_aptpl_buf = NULL;
+ struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- int pr_holder = 0, ret = 0, type;
+ sense_reason_t ret = TCM_NO_SENSE;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
se_tpg = se_sess->se_tpg;
- se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
@@ -2128,13 +2027,12 @@ static int core_scsi3_emulate_pro_register(
/*
* Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
*/
- pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!pr_reg_e) {
+ pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+ if (!pr_reg) {
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
@@ -2148,15 +2046,13 @@ static int core_scsi3_emulate_pro_register(
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
- ignore_key, 0);
- if (ret != 0) {
+ register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
} else {
/*
@@ -2172,219 +2068,137 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0)
return ret;
}
+
+ return core_scsi3_update_and_write_aptpl(dev, aptpl);
+ }
+
+ /* ok, existing registration */
+
+ if ((register_type == REGISTER) && (res_key != pr_reg->pr_res_key)) {
+ pr_err("SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key,
+ pr_reg->pr_res_key);
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out;
+ }
+
+ if (spec_i_pt) {
+ pr_err("SPC-3 PR REGISTER: SPEC_I_PT"
+ " set on a registered nexus\n");
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ /*
+ * An existing ALL_TG_PT=1 registration being released
+ * must also set ALL_TG_PT=1 in the incoming PROUT.
+ */
+ if (pr_reg->pr_reg_all_tg_pt && !all_tg_pt) {
+ pr_err("SPC-3 PR REGISTER: ALL_TG_PT=1"
+ " registration exists, but ALL_TG_PT=1 bit not"
+ " present in received PROUT\n");
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out;
+ }
+
+ /*
+ * sa_res_key=1 Change Reservation Key for registered I_T Nexus.
+ */
+ if (sa_res_key) {
/*
- * Nothing left to do for the APTPL=0 case.
+ * Increment PRgeneration counter for struct se_device"
+ * upon a successful REGISTER, see spc4r17 section 6.3.2
+ * READ_KEYS service action.
*/
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
- " REGISTER\n");
- return 0;
- }
+ pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
+ pr_reg->pr_res_key = sa_res_key;
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ " Key for %s to: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+ (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
+ pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+ } else {
/*
- * Locate the newly allocated local I_T Nexus *pr_reg, and
- * update the APTPL metadata information using its
- * preallocated *pr_reg->pr_aptpl_buf.
+ * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
- se_sess->se_node_acl, se_sess);
-
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ pr_holder = core_scsi3_check_implicit_release(
+ cmd->se_dev, pr_reg);
+ if (pr_holder < 0) {
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out;
}
+ type = pr_reg->pr_res_type;
- core_scsi3_put_pr_reg(pr_reg);
- return ret;
- } else {
+ spin_lock(&pr_tmpl->registration_lock);
/*
- * Locate the existing *pr_reg via struct se_node_acl pointers
+ * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+ * and matching pr_res_key.
*/
- pr_reg = pr_reg_e;
- type = pr_reg->pr_res_type;
+ if (pr_reg->pr_reg_all_tg_pt) {
+ list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ if (!pr_reg_p->pr_reg_all_tg_pt)
+ continue;
+ if (pr_reg_p->pr_res_key != res_key)
+ continue;
+ if (pr_reg == pr_reg_p)
+ continue;
+ if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg_p->pr_reg_nacl->initiatorname))
+ continue;
- if (!ignore_key) {
- if (res_key != pr_reg->pr_res_key) {
- pr_err("SPC-3 PR REGISTER: Received"
- " res_key: 0x%016Lx does not match"
- " existing SA REGISTER res_key:"
- " 0x%016Lx\n", res_key,
- pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ __core_scsi3_free_registration(dev,
+ pr_reg_p, NULL, 0);
}
}
- if (spec_i_pt) {
- pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
- " set while sa_res_key=0\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
- }
+
/*
- * An existing ALL_TG_PT=1 registration being released
- * must also set ALL_TG_PT=1 in the incoming PROUT.
+ * Release the calling I_T Nexus registration now..
*/
- if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
- " registration exists, but ALL_TG_PT=1 bit not"
- " present in received PROUT\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
+
/*
- * Allocate APTPL metadata buffer used for UNREGISTER ops
+ * From spc4r17, section 5.7.11.3 Unregistering
+ *
+ * If the persistent reservation is a registrants only
+ * type, the device server shall establish a unit
+ * attention condition for the initiator port associated
+ * with every registered I_T nexus except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, with the additional sense code set to
+ * RESERVATIONS RELEASED.
*/
- if (aptpl) {
- pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
- GFP_KERNEL);
- if (!pr_aptpl_buf) {
- pr_err("Unable to allocate"
- " pr_aptpl_buf\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ if (pr_holder &&
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ core_scsi3_ua_allocate(
+ pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
}
}
- /*
- * sa_res_key=0 Unregister Reservation Key for registered I_T
- * Nexus sa_res_key=1 Change Reservation Key for registered I_T
- * Nexus.
- */
- if (!sa_res_key) {
- pr_holder = core_scsi3_check_implict_release(
- cmd->se_dev, pr_reg);
- if (pr_holder < 0) {
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
- }
-
- spin_lock(&pr_tmpl->registration_lock);
- /*
- * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
- * and matching pr_res_key.
- */
- if (pr_reg->pr_reg_all_tg_pt) {
- list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- if (!pr_reg_p->pr_reg_all_tg_pt)
- continue;
-
- if (pr_reg_p->pr_res_key != res_key)
- continue;
- if (pr_reg == pr_reg_p)
- continue;
-
- if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
- pr_reg_p->pr_reg_nacl->initiatorname))
- continue;
-
- __core_scsi3_free_registration(dev,
- pr_reg_p, NULL, 0);
- }
- }
- /*
- * Release the calling I_T Nexus registration now..
- */
- __core_scsi3_free_registration(cmd->se_dev, pr_reg,
- NULL, 1);
- /*
- * From spc4r17, section 5.7.11.3 Unregistering
- *
- * If the persistent reservation is a registrants only
- * type, the device server shall establish a unit
- * attention condition for the initiator port associated
- * with every registered I_T nexus except for the I_T
- * nexus on which the PERSISTENT RESERVE OUT command was
- * received, with the additional sense code set to
- * RESERVATIONS RELEASED.
- */
- if (pr_holder &&
- ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
- (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
- list_for_each_entry(pr_reg_p,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- core_scsi3_ua_allocate(
- pr_reg_p->pr_reg_nacl,
- pr_reg_p->pr_res_mapped_lun,
- 0x2A,
- ASCQ_2AH_RESERVATIONS_RELEASED);
- }
- }
- spin_unlock(&pr_tmpl->registration_lock);
-
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for UNREGISTER\n");
- return 0;
- }
-
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for UNREGISTER\n");
- }
-
- kfree(pr_aptpl_buf);
- return ret;
- } else {
- /*
- * Increment PRgeneration counter for struct se_device"
- * upon a successful REGISTER, see spc4r17 section 6.3.2
- * READ_KEYS service action.
- */
- pr_reg->pr_res_generation = core_scsi3_pr_generation(
- cmd->se_dev);
- pr_reg->pr_res_key = sa_res_key;
- pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
- " Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
- (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
- pr_reg->pr_reg_nacl->initiatorname,
- pr_reg->pr_res_key, pr_reg->pr_res_generation);
-
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- core_scsi3_put_pr_reg(pr_reg);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for REGISTER\n");
- return 0;
- }
+ spin_unlock(&pr_tmpl->registration_lock);
+ }
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for REGISTER\n");
- }
+ ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- }
- }
- return 0;
+out:
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
}
unsigned char *core_scsi3_pr_dump_type(int type)
@@ -2409,31 +2223,23 @@ unsigned char *core_scsi3_pr_dump_type(int type)
return "Unknown SPC-3 PR Type";
}
-static int core_scsi3_pro_reserve(
- struct se_cmd *cmd,
- struct se_device *dev,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
{
+ struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
- struct se_dev_entry *se_deve;
struct se_lun *se_lun = cmd->se_lun;
- struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
- int ret, prf_isid;
+ sense_reason_t ret;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- se_tpg = se_sess->se_tpg;
- se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
@@ -2442,8 +2248,7 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2458,9 +2263,8 @@ static int core_scsi3_pro_reserve(
pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2474,9 +2278,8 @@ static int core_scsi3_pro_reserve(
*/
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2485,7 +2288,7 @@ static int core_scsi3_pro_reserve(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if ((pr_res_holder)) {
+ if (pr_res_holder) {
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -2507,9 +2310,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2531,9 +2333,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2546,8 +2347,8 @@ static int core_scsi3_pro_reserve(
* shall completethe command with GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ ret = 0;
+ goto out_put_pr_reg;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2557,8 +2358,7 @@ static int core_scsi3_pro_reserve(
pr_reg->pr_res_type = type;
pr_reg->pr_res_holder = 1;
dev->dev_pr_res_holder = pr_reg;
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
@@ -2567,31 +2367,22 @@ static int core_scsi3_pro_reserve(
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- (prf_isid) ? &i_buf[0] : "");
+ i_buf);
spin_unlock(&dev->dev_reservation_lock);
- if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
- pr_debug("SPC-3 PR: Updated APTPL metadata"
- " for RESERVE\n");
- }
+ if (pr_tmpl->pr_aptpl_active)
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+ ret = 0;
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_reserve(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
- struct se_device *dev = cmd->se_dev;
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -2599,16 +2390,12 @@ static int core_scsi3_emulate_pro_reserve(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
- break;
+ return core_scsi3_pro_reserve(cmd, type, scope, res_key);
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
/*
@@ -2618,15 +2405,13 @@ static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explict)
+ int explicit)
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
*/
@@ -2634,35 +2419,33 @@ static void __core_scsi3_complete_pro_release(
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+ tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
- (prf_isid) ? &i_buf[0] : "");
+ i_buf);
/*
* Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
*/
pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
}
-static int core_scsi3_emulate_pro_release(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
- int ret, all_reg = 0;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ int all_reg = 0;
+ sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2671,8 +2454,7 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2693,8 +2475,7 @@ static int core_scsi3_emulate_pro_release(
* No persistent reservation, return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2707,9 +2488,9 @@ static int core_scsi3_emulate_pro_release(
* persistent reservation holder. return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
+
/*
* From spc4r17 Section 5.7.11.2 Releasing:
*
@@ -2729,9 +2510,8 @@ static int core_scsi3_emulate_pro_release(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2752,9 +2532,8 @@ static int core_scsi3_emulate_pro_release(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* In response to a persistent reservation release request from the
@@ -2806,26 +2585,21 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&pr_tmpl->registration_lock);
write_aptpl:
- if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
- pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
- }
+ if (pr_tmpl->pr_aptpl_active)
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_clear(
- struct se_cmd *cmd,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
@@ -2837,8 +2611,7 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2857,8 +2630,7 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* a) Release the persistent reservation, if any;
@@ -2899,11 +2671,7 @@ static int core_scsi3_emulate_pro_clear(
pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
cmd->se_tfo->get_fabric_name());
- if (pr_tmpl->pr_aptpl_active) {
- core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
- pr_debug("SPC-3 PR: Updated APTPL metadata"
- " for CLEAR\n");
- }
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
core_scsi3_pr_generation(dev);
return 0;
@@ -2918,18 +2686,16 @@ static void __core_scsi3_complete_pro_preempt(
struct list_head *preempt_and_abort_list,
int type,
int scope,
- int abort)
+ enum preempt_type preempt_type)
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
- int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
- * Do an implict RELEASE of the existing reservation.
+ * Do an implicit RELEASE of the existing reservation.
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
@@ -2942,12 +2708,12 @@ static void __core_scsi3_complete_pro_preempt(
pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+ tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
- tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
- nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+ tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+ nacl->initiatorname, i_buf);
/*
* For PREEMPT_AND_ABORT, add the preempting reservation's
* struct t10_pr_registration to the list that will be compared
@@ -2977,57 +2743,44 @@ static void core_scsi3_release_preempt_and_abort(
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
- kfree(pr_reg->pr_aptpl_buf);
kmem_cache_free(t10_pr_reg_cache, pr_reg);
}
}
-static int core_scsi3_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+ u64 sa_res_key, enum preempt_type preempt_type)
{
struct se_device *dev = cmd->se_dev;
- struct se_dev_entry *se_deve;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
- struct list_head preempt_and_abort_list;
+ LIST_HEAD(preempt_and_abort_list);
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
- int prh_type = 0, prh_scope = 0, ret;
+ int prh_type = 0, prh_scope = 0;
- if (!se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
- (abort) ? "_AND_ABORT" : "");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "");
+ return TCM_RESERVATION_CONFLICT;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
- INIT_LIST_HEAD(&preempt_and_abort_list);
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
@@ -3039,8 +2792,7 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3086,7 +2838,7 @@ static int core_scsi3_pro_preempt(
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
- (abort) ? &preempt_and_abort_list :
+ (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, calling_it_nexus);
released_regs++;
} else {
@@ -3096,7 +2848,7 @@ static int core_scsi3_pro_preempt(
* 5.7.11.4 Preempting, Table 52 and Figure 7.
*
* For a ZERO SA Reservation key, release
- * all other registrations and do an implict
+ * all other registrations and do an implicit
* release of active persistent reservation.
*
* For a non-ZERO SA Reservation key, only
@@ -3114,7 +2866,7 @@ static int core_scsi3_pro_preempt(
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
- (abort) ? &preempt_and_abort_list :
+ (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, 0);
released_regs++;
}
@@ -3134,8 +2886,7 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* For an existing all registrants type reservation
@@ -3144,24 +2895,17 @@ static int core_scsi3_pro_preempt(
*/
if (pr_res_holder && all_reg && !(sa_res_key)) {
__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
- (abort) ? &preempt_and_abort_list : NULL,
- type, scope, abort);
+ (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
+ type, scope, preempt_type);
- if (abort)
+ if (preempt_type == PREEMPT_AND_ABORT)
core_scsi3_release_preempt_and_abort(
&preempt_and_abort_list, pr_reg_n);
}
spin_unlock(&dev->dev_reservation_lock);
- if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
- pr_debug("SPC-3 PR: Updated APTPL"
- " metadata for PREEMPT%s\n", (abort) ?
- "_AND_ABORT" : "");
- }
+ if (pr_tmpl->pr_aptpl_active)
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
core_scsi3_put_pr_reg(pr_reg_n);
core_scsi3_pr_generation(cmd->se_dev);
@@ -3225,7 +2969,7 @@ static int core_scsi3_pro_preempt(
pr_reg_nacl = pr_reg->pr_reg_nacl;
pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
__core_scsi3_free_registration(dev, pr_reg,
- (abort) ? &preempt_and_abort_list : NULL,
+ (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
calling_it_nexus);
/*
* e) Establish a unit attention condition for the initiator
@@ -3242,8 +2986,8 @@ static int core_scsi3_pro_preempt(
* I_T nexus using the contents of the SCOPE and TYPE fields;
*/
__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
- (abort) ? &preempt_and_abort_list : NULL,
- type, scope, abort);
+ (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
+ type, scope, preempt_type);
/*
* d) Process tasks as defined in 5.7.1;
* e) See above..
@@ -3283,36 +3027,24 @@ static int core_scsi3_pro_preempt(
* been removed from the primary pr_reg list), except the
* new persistent reservation holder, the calling Initiator Port.
*/
- if (abort) {
+ if (preempt_type == PREEMPT_AND_ABORT) {
core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
pr_reg_n);
}
- if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
- pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
- "%s\n", (abort) ? "_AND_ABORT" : "");
- }
+ if (pr_tmpl->pr_aptpl_active)
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
core_scsi3_put_pr_reg(pr_reg_n);
core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
-static int core_scsi3_emulate_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
+ u64 res_key, u64 sa_res_key, enum preempt_type preempt_type)
{
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -3320,55 +3052,48 @@ static int core_scsi3_emulate_pro_preempt(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_preempt(cmd, type, scope,
- res_key, sa_res_key, abort);
- break;
+ return core_scsi3_pro_preempt(cmd, type, scope, res_key,
+ sa_res_key, preempt_type);
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
- " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ " Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type);
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
-static int core_scsi3_emulate_pro_register_and_move(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int unreg)
+static sense_reason_t
+core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+ u64 sa_res_key, int aptpl, int unreg)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
- struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+ struct se_dev_entry *dest_se_deve = NULL;
struct se_lun *se_lun = cmd->se_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
- int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+ int new_reg = 0, type, scope, matching_iname;
+ sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
- se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Follow logic from spc4r17 Section 5.7.8, Table 50 --
* Register behaviors for a REGISTER AND MOVE service action
@@ -3380,8 +3105,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3391,9 +3115,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* The service active reservation key needs to be non zero
@@ -3401,9 +3124,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!sa_res_key) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
@@ -3412,6 +3134,11 @@ static int core_scsi3_emulate_pro_register_and_move(
* information.
*/
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
+
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
@@ -3425,9 +3152,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3442,18 +3168,16 @@ static int core_scsi3_emulate_pro_register_and_move(
continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(dest_se_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ smp_mb__after_atomic();
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3465,33 +3189,34 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
proto_ident = (buf[24] & 0x0f);
-#if 0
+
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
-#endif
+
if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3499,8 +3224,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3529,8 +3253,7 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3538,8 +3261,7 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
after_iport_check:
@@ -3551,7 +3273,7 @@ after_iport_check:
initiator_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
spin_unlock_irq(&dest_se_tpg->acl_node_lock);
@@ -3559,26 +3281,24 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dest_node_acl = NULL;
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
-#if 0
+
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname);
-#endif
+
/*
* Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
* PORT IDENTIFIER.
@@ -3587,27 +3307,24 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dest_se_deve = NULL;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
-#if 0
+
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %u\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
-#endif
+
/*
* A persistent reservation needs to already existing in order to
* successfully complete the REGISTER_AND_MOVE service action..
@@ -3618,8 +3335,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
goto out;
}
/*
@@ -3632,8 +3348,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
/*
@@ -3651,8 +3366,7 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3684,13 +3398,11 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, 0, aptpl, 2, 1);
- if (ret != 0) {
+ sa_res_key, 0, aptpl, 2, 1)) {
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3712,8 +3424,7 @@ after_iport_check:
dest_pr_reg->pr_res_holder = 1;
dest_pr_reg->pr_res_type = type;
pr_reg->pr_res_scope = scope;
- prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
- PR_REG_ISID_ID_LEN);
+ core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Increment PRGeneration for existing registrations..
*/
@@ -3729,7 +3440,7 @@ after_iport_check:
pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
- (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+ i_buf, dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@@ -3750,24 +3461,7 @@ after_iport_check:
} else
core_scsi3_put_pr_reg(pr_reg);
- /*
- * Clear the APTPL metadata if APTPL has been disabled, otherwise
- * write out the updated metadata to struct file for this SCSI device.
- */
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
- " REGISTER_AND_MOVE\n");
- } else {
- pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &dest_pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
- pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
- " REGISTER_AND_MOVE\n");
- }
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
transport_kunmap_data_sg(cmd);
@@ -3781,6 +3475,8 @@ out:
if (dest_node_acl)
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_se_tpg);
+
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
return ret;
}
@@ -3798,15 +3494,15 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
-int target_scsi3_emulate_pr_out(struct se_task *task)
+sense_reason_t
+target_scsi3_emulate_pr_out(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -3817,31 +3513,26 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = EINVAL;
- goto out;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!cmd->se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
*/
@@ -3850,6 +3541,9 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
type = (cdb[2] & 0x0f);
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3873,11 +3567,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
- }
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ return TCM_INVALID_PARAMETER_LIST;
/*
* From spc4r17 section 6.14:
@@ -3892,10 +3583,9 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* (core_scsi3_emulate_pro_* function parameters
* are defined by spc4r17 Table 174:
@@ -3904,7 +3594,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
switch (sa) {
case PRO_REGISTER:
ret = core_scsi3_emulate_pro_register(cmd,
- res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+ res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER);
break;
case PRO_RESERVE:
ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
@@ -3917,15 +3607,15 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
break;
case PRO_PREEMPT:
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
- res_key, sa_res_key, 0);
+ res_key, sa_res_key, PREEMPT);
break;
case PRO_PREEMPT_AND_ABORT:
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
- res_key, sa_res_key, 1);
+ res_key, sa_res_key, PREEMPT_AND_ABORT);
break;
case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
ret = core_scsi3_emulate_pro_register(cmd,
- 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+ 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER_AND_IGNORE_EXISTING_KEY);
break;
case PRO_REGISTER_AND_MOVE:
ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
@@ -3934,16 +3624,11 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
-out:
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
return ret;
}
@@ -3952,10 +3637,10 @@ out:
*
* See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
*/
-static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u32 add_len = 0, off = 8;
@@ -3963,18 +3648,20 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3994,7 +3681,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
@@ -4011,10 +3698,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*
* See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
*/
-static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u64 pr_res_key;
@@ -4023,19 +3710,21 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
-
- spin_lock(&se_dev->dev_reservation_lock);
- pr_reg = se_dev->dev_pr_res_holder;
- if ((pr_reg)) {
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (pr_reg) {
/*
* Set the hardcoded Additional Length
*/
@@ -4085,7 +3774,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
}
err:
- spin_unlock(&se_dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
transport_kunmap_data_sg(cmd);
return 0;
@@ -4096,21 +3785,23 @@ err:
*
* See spc4r17 section 6.13.4 Table 165
*/
-static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
@@ -4152,14 +3843,14 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*
* See spc4r17 section 6.13.5 Table 168 and 169
*/
-static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
@@ -4168,16 +3859,17 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4188,7 +3880,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
add_desc_len = 0;
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
/*
* Determine expected length of $FABRIC_MOD specific
@@ -4202,7 +3894,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
break;
}
/*
@@ -4254,7 +3946,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
buf[off++] = (port->sep_rtpi & 0xff);
} else
- off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+ off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
/*
* Now, have the $FABRIC_MOD fill in the protocol identifier
@@ -4264,7 +3956,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
/*
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
@@ -4298,10 +3990,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
-int target_scsi3_emulate_pr_in(struct se_task *task)
+sense_reason_t
+target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
- struct se_cmd *cmd = task->task_se_cmd;
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4312,12 +4004,11 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4336,68 +4027,33 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
- if (!ret) {
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- }
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
return ret;
}
-static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
-{
- return 0;
-}
-
-static int core_pt_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+sense_reason_t
+target_check_reservation(struct se_cmd *cmd)
{
- return 0;
-}
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
-int core_setup_reservations(struct se_device *dev, int force_pt)
-{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation *rest = &su_dev->t10_pr;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the reservations
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but to emulate reservations themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
- rest->res_type = SPC_PASSTHROUGH;
- rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", dev->transport->name);
+ if (!cmd->se_sess)
+ return 0;
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated Persistent Reservations.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", dev->transport->name);
- } else {
- rest->res_type = SPC2_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
- rest->pr_ops.t10_seq_non_holder =
- &core_scsi2_reservation_seq_non_holder;
- pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
- dev->transport->name);
- }
- return 0;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_scsi2_reservation_check(cmd);
+ else
+ ret = target_scsi3_pr_reservation_check(cmd);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 7a233feb7e9..2ee2936fa0b 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -43,12 +43,17 @@
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
+/*
+ * Function defined in target_core_spc.c
+ */
+void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
extern struct kmem_cache *t10_pr_reg_cache;
-extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
-extern int target_scsi2_reservation_release(struct se_task *task);
-extern int target_scsi2_reservation_reserve(struct se_task *task);
+extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@@ -61,8 +66,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int target_scsi3_emulate_pr_in(struct se_task *task);
-extern int target_scsi3_emulate_pr_out(struct se_task *task);
-extern int core_setup_reservations(struct se_device *, int);
+extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
+extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
+extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8d4def30e9e..94d00df28f3 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -3,10 +3,7 @@
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -35,8 +32,10 @@
#include <linux/spinlock.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
-#include <linux/file.h>
+#include <linux/ratelimit.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -46,12 +45,19 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include "target_core_alua.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct pscsi_dev_virt, dev);
+}
+
static struct se_subsystem_api pscsi_template;
+static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@@ -69,7 +75,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
return -ENOMEM;
}
phv->phv_host_id = host_id;
- phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ phv->phv_mode = PHV_VIRTUAL_HOST_ID;
hba->hba_ptr = phv;
@@ -114,7 +120,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
return 0;
phv->phv_lld_host = NULL;
- phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ phv->phv_mode = PHV_VIRTUAL_HOST_ID;
pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
@@ -128,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
phv->phv_lld_host = sh;
@@ -215,7 +221,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
- wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+ wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf);
return 0;
@@ -260,7 +266,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
" length zero!\n");
break;
}
- pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
+ pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
@@ -295,23 +301,13 @@ out:
kfree(buf);
}
-/* pscsi_add_device_to_list():
- *
- *
- */
-static struct se_device *pscsi_add_device_to_list(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- struct pscsi_dev_virt *pdv,
- struct scsi_device *sd,
- int dev_flags)
+static int pscsi_add_device_to_list(struct se_device *dev,
+ struct scsi_device *sd)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct request_queue *q;
- struct queue_limits *limits;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct request_queue *q = sd->request_queue;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ pdv->pdv_sd = sd;
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@@ -320,54 +316,27 @@ static struct se_device *pscsi_add_device_to_list(
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = sd->request_queue;
- limits = &dev_limits.limits;
- limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
- limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
- dev_limits.hw_queue_depth = sd->queue_depth;
- dev_limits.queue_depth = sd->queue_depth;
+
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_max_sectors =
+ min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+
/*
* Setup our standard INQUIRY info into se_dev->t10_wwn
*/
- pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
-
- /*
- * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
- * which has already been referenced with Linux SCSI code with
- * scsi_device_get() in this file's pscsi_create_virtdevice().
- *
- * The passthrough operations called by the transport_add_device_*
- * function below will require this pointer to be set for passthroug
- * ops.
- *
- * For the shutdown case in pscsi_free_device(), this struct
- * scsi_device reference is released with Linux SCSI code
- * scsi_device_put() and the pdv->pdv_sd cleared.
- */
- pdv->pdv_sd = sd;
- dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, pdv,
- &dev_limits, NULL, NULL);
- if (!dev) {
- pdv->pdv_sd = NULL;
- return NULL;
- }
+ pscsi_set_inquiry_info(sd, &dev->t10_wwn);
/*
* Locate VPD WWN Information used for various purposes within
* the Storage Engine.
*/
- if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+ if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
/*
* If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83).
*/
- pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+ pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
}
/*
@@ -375,10 +344,11 @@ static struct se_device *pscsi_add_device_to_list(
*/
if (sd->type == TYPE_TAPE)
pscsi_tape_read_blocksize(dev, sd);
- return dev;
+ return 0;
}
-static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *pscsi_alloc_device(struct se_hba *hba,
+ const char *name)
{
struct pscsi_dev_virt *pdv;
@@ -387,139 +357,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
- pdv->pdv_se_hba = hba;
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return pdv;
+ return &pdv->dev;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_disk(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK using supplied udev_path
*/
- bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+ bd = blkdev_get_by_path(dev->udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
- return NULL;
+ return PTR_ERR(bd);
}
pdv->pdv_bd = bd;
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
- return NULL;
+ return ret;
}
+
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_rom(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
scsi_device_put(sd);
- return NULL;
+ return ret;
}
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
- return dev;
+ return 0;
}
/*
- *Called with struct Scsi_Host->host_lock called.
+ * Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_other(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_other(struct se_device *dev,
+ struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev)
- return NULL;
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret)
+ return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
-static struct se_device *pscsi_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int pscsi_configure_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct se_device *dev;
+ struct se_hba *hba = dev->se_hba;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
+ int ret;
- if (!pdv) {
- pr_err("Unable to locate struct pscsi_dev_virt"
- " parameter\n");
- return ERR_PTR(-EINVAL);
+ if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+ " scsi_lun_id= parameters\n");
+ return -EINVAL;
}
+
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
@@ -528,51 +484,48 @@ static struct se_device *pscsi_create_virtdevice(
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
/*
- * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+ * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set
*/
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
- * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+ * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
* use the original TCM hba ID to reference Linux/SCSI Host No
* and enable for PHV_LLD_SCSI_HOST_NO mode.
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
- spin_unlock(&hba->device_lock);
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
- spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_CAST(sh);
+ return -EINVAL;
}
}
} else {
- if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
- pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
+ pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
" struct Scsi_Host exists\n");
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
}
@@ -589,51 +542,47 @@ static struct se_device *pscsi_create_virtdevice(
*/
switch (sd->type) {
case TYPE_DISK:
- dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_disk(dev, sd);
break;
case TYPE_ROM:
- dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_rom(dev, sd);
break;
default:
- dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_other(dev, sd);
break;
}
- if (!dev) {
- if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ if (ret) {
+ if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
- return ERR_PTR(-ENODEV);
+ return ret;
}
- return dev;
+ return 0;
}
spin_unlock_irq(sh->host_lock);
pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
- if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-/* pscsi_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void pscsi_free_device(void *p)
+static void pscsi_free_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
if (sd) {
@@ -663,37 +612,38 @@ static void pscsi_free_device(void *p)
kfree(pdv);
}
-static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
+ unsigned char *sense_buffer)
{
- return container_of(task, struct pscsi_plugin_task, pscsi_task);
-}
-
-
-/* pscsi_transport_complete():
- *
- *
- */
-static int pscsi_transport_complete(struct se_task *task)
-{
- struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
int result;
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- unsigned char *cdb = &pt->pscsi_cdb[0];
+ struct pscsi_plugin_task *pt = cmd->priv;
+ unsigned char *cdb;
+ /*
+ * Special case for REPORT_LUNs handling where pscsi_plugin_task has
+ * not been allocated because TCM is handling the emulation directly.
+ */
+ if (!pt)
+ return;
+ cdb = &pt->pscsi_cdb[0];
result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
+ if (!cmd->se_deve || !cmd->data_length)
+ goto after_mode_sense;
+
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (!task->task_se_cmd->se_deve)
- goto after_mode_sense;
+ if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+ unsigned char *buf;
- if (task->task_se_cmd->se_deve->lun_flags &
- TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -703,12 +653,12 @@ static int pscsi_transport_complete(struct se_task *task)
buf[2] |= 0x80;
}
- transport_kunmap_data_sg(task->task_se_cmd);
+ transport_kunmap_data_sg(cmd);
}
}
after_mode_sense:
- if (sd->type != TYPE_TAPE)
+ if (sd->type != TYPE_TAPE || !cmd->data_length)
goto after_mode_select;
/*
@@ -722,7 +672,6 @@ after_mode_sense:
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
unsigned char *buf;
- struct scatterlist *sg = task->task_sg;
u16 bdl;
u32 blocksize;
@@ -751,39 +700,10 @@ after_mode_sense:
}
after_mode_select:
- if (status_byte(result) & CHECK_CONDITION)
- return 1;
-
- return 0;
-}
-
-static struct se_task *
-pscsi_alloc_task(unsigned char *cdb)
-{
- struct pscsi_plugin_task *pt;
-
- /*
- * Dynamically alloc cdb space, since it may be larger than
- * TCM_MAX_COMMAND_SIZE
- */
- pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
- if (!pt) {
- pr_err("Unable to allocate struct pscsi_plugin_task\n");
- return NULL;
+ if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
+ memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
+ cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
}
-
- return &pt->pscsi_task;
-}
-
-static void pscsi_free_task(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-
- /*
- * We do not release the bio(s) here associated with this task, as
- * this is handled by bio_put() and pscsi_bi_endio().
- */
- kfree(pt);
}
enum {
@@ -799,13 +719,11 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -870,35 +788,16 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t pscsi_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
-{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
-
- if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
- !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
- !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- pr_err("Missing scsi_channel_id=, scsi_target_id= and"
- " scsi_lun_id= parameters\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
+static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct pscsi_hba_virt *phv = hba->hba_ptr;
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
int i;
- if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
snprintf(host_id, 16, "%d", pdv->pdv_host_id);
else
snprintf(host_id, 16, "PHBA Mode");
@@ -941,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(int sg_num)
+static inline struct bio *pscsi_get_bio(int nr_vecs)
{
struct bio *bio;
/*
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
- bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
@@ -958,26 +857,25 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
-static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
- struct bio **hbio)
+static sense_reason_t
+pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction, struct bio **hbio)
{
- struct se_cmd *cmd = task->task_se_cmd;
- struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
- u32 task_sg_num = task->task_sg_nents;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
- u32 data_len = task->task_size, i, len, bytes, off;
- int nr_pages = (task->task_size + task_sg[0].offset +
+ u32 data_len = cmd->data_length, i, len, bytes, off;
+ int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
- int rw = (task->task_data_direction == DMA_TO_DEVICE);
+ int rw = (data_direction == DMA_TO_DEVICE);
*hbio = NULL;
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
- for_each_sg(task_sg, sg, task_sg_num, i) {
+ for_each_sg(sgl, sg, sgl_nents, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
@@ -985,7 +883,14 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
- while (len > 0 && data_len > 0) {
+ /*
+ * We only have one page of data in each sg element,
+ * we can not cross a page boundary.
+ */
+ if (off + len > PAGE_SIZE)
+ goto fail;
+
+ if (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
@@ -1009,7 +914,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
* Set *hbio pointer to handle the case:
* nr_pages > BIO_MAX_PAGES, where additional
* bios need to be added to complete a given
- * struct se_task
+ * command.
*/
if (!*hbio)
*hbio = tbio = bio;
@@ -1042,71 +947,134 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
bio = NULL;
}
- page++;
- len -= bytes;
data_len -= bytes;
- off = 0;
}
}
- return task->task_sg_nents;
+ return 0;
fail:
while (*hbio) {
bio = *hbio;
*hbio = (*hbio)->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
-static int pscsi_do_task(struct se_task *task)
+/*
+ * Clear a lun set in the cdb if the initiator talking to use spoke
+ * and old standards version, as we can't assume the underlying device
+ * won't choke up on it.
+ */
+static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
{
- struct se_cmd *cmd = task->task_se_cmd;
- struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ switch (cdb[0]) {
+ case READ_10: /* SBC - RDProtect */
+ case READ_12: /* SBC - RDProtect */
+ case READ_16: /* SBC - RDProtect */
+ case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+ case VERIFY: /* SBC - VRProtect */
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
+ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+ break;
+ }
+}
+
+static sense_reason_t
+pscsi_parse_cdb(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+
+ if (cmd->se_cmd_flags & SCF_BIDI)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ pscsi_clear_cdb_lun(cdb);
+
+ /*
+ * For REPORT LUNS we always need to emulate the response, for everything
+ * else the default for pSCSI is to pass the command to the underlying
+ * LLD / physical hardware.
+ */
+ switch (cdb[0]) {
+ case REPORT_LUNS:
+ cmd->execute_cmd = spc_emulate_report_luns;
+ return 0;
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_VERIFY:
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ /* FALLTHROUGH*/
+ default:
+ cmd->execute_cmd = pscsi_execute_cmd;
+ return 0;
+ }
+}
+
+static sense_reason_t
+pscsi_execute_cmd(struct se_cmd *cmd)
+{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
+ struct pscsi_plugin_task *pt;
struct request *req;
struct bio *hbio;
- int ret;
+ sense_reason_t ret;
+
+ /*
+ * Dynamically alloc cdb space, since it may be larger than
+ * TCM_MAX_COMMAND_SIZE
+ */
+ pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
+ if (!pt) {
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ cmd->priv = pt;
- target_get_task_cdb(task, pt->pscsi_cdb);
+ memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
- if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ if (!sgl) {
req = blk_get_request(pdv->pdv_sd->request_queue,
- (task->task_data_direction == DMA_TO_DEVICE),
+ (data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
- if (!req || IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed: %ld\n",
- req ? IS_ERR(req) : -ENOMEM);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENODEV;
+ if (!req) {
+ pr_err("PSCSI: blk_get_request() failed\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail;
}
+
+ blk_rq_set_block_pc(req);
} else {
- BUG_ON(!task->task_size);
+ BUG_ON(!cmd->data_length);
- /*
- * Setup the main struct request for the task->task_sg[] payload
- */
- ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0) {
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return ret;
- }
+ ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
+ if (ret)
+ goto fail;
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
- goto fail;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail_free_bio;
}
}
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->end_io = pscsi_req_done;
- req->end_io_data = task;
+ req->end_io_data = cmd;
req->cmd_len = scsi_command_size(pt->pscsi_cdb);
req->cmd = &pt->pscsi_cdb[0];
req->sense = &pt->pscsi_sense[0];
@@ -1118,43 +1086,21 @@ static int pscsi_do_task(struct se_task *task)
req->retries = PS_RETRY;
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
- (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
+ (cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return 0;
-fail:
+fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
-}
-
-/* pscsi_get_sense_buffer():
- *
- *
- */
-static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-
- return pt->pscsi_sense;
-}
-
-/* pscsi_get_device_rev():
- *
- *
- */
-static u32 pscsi_get_device_rev(struct se_device *dev)
-{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
- struct scsi_device *sd = pdv->pdv_sd;
-
- return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail:
+ kfree(pt);
+ return ret;
}
/* pscsi_get_device_type():
@@ -1163,7 +1109,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
*/
static u32 pscsi_get_device_type(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
return sd->type;
@@ -1171,7 +1117,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
static sector_t pscsi_get_blocks(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
@@ -1180,48 +1126,35 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
return 0;
}
-/* pscsi_handle_SAM_STATUS_failures():
- *
- *
- */
-static inline void pscsi_process_SAM_status(
- struct se_task *task,
- struct pscsi_plugin_task *pt)
+static void pscsi_req_done(struct request *req, int uptodate)
{
- task->task_scsi_status = status_byte(pt->pscsi_result);
- if (task->task_scsi_status) {
- task->task_scsi_status <<= 1;
- pr_debug("PSCSI Status Byte exception at task: %p CDB:"
- " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ struct se_cmd *cmd = req->end_io_data;
+ struct pscsi_plugin_task *pt = cmd->priv;
+
+ pt->pscsi_result = req->errors;
+ pt->pscsi_resid = req->resid_len;
+
+ cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
+ if (cmd->scsi_status) {
+ pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
}
switch (host_byte(pt->pscsi_result)) {
case DID_OK:
- transport_complete_task(task, (!task->task_scsi_status));
+ target_complete_cmd(cmd, cmd->scsi_status);
break;
default:
- pr_debug("PSCSI Host Byte exception at task: %p CDB:"
- " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
- task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_se_cmd->scsi_sense_reason =
- TCM_UNSUPPORTED_SCSI_OPCODE;
- transport_complete_task(task, 0);
+ target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
-}
-static void pscsi_req_done(struct request *req, int uptodate)
-{
- struct se_task *task = req->end_io_data;
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-
- pt->pscsi_result = req->errors;
- pt->pscsi_resid = req->resid_len;
-
- pscsi_process_SAM_status(task, pt);
__blk_put_request(req->q, req);
+ kfree(pt);
}
static struct se_subsystem_api pscsi_template = {
@@ -1231,18 +1164,13 @@ static struct se_subsystem_api pscsi_template = {
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
- .allocate_virtdevice = pscsi_allocate_virtdevice,
- .create_virtdevice = pscsi_create_virtdevice,
+ .alloc_device = pscsi_alloc_device,
+ .configure_device = pscsi_configure_device,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
- .alloc_task = pscsi_alloc_task,
- .do_task = pscsi_do_task,
- .free_task = pscsi_free_task,
- .check_configfs_dev_params = pscsi_check_configfs_dev_params,
+ .parse_cdb = pscsi_parse_cdb,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
- .get_sense_buffer = pscsi_get_sense_buffer,
- .get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
};
@@ -1252,7 +1180,7 @@ static int __init pscsi_module_init(void)
return transport_subsystem_register(&pscsi_template);
}
-static void pscsi_module_exit(void)
+static void __exit pscsi_module_exit(void)
{
transport_subsystem_release(&pscsi_template);
}
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index fdc17b6aefb..1bd757dff8e 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -22,7 +22,6 @@
#include <linux/kobject.h>
struct pscsi_plugin_task {
- struct se_task pscsi_task;
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
@@ -38,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
+ struct se_device dev;
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
@@ -45,11 +45,10 @@ struct pscsi_dev_virt {
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
- struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
- PHV_VIRUTAL_HOST_ID,
+ PHV_VIRTUAL_HOST_ID,
PHV_LLD_SCSI_HOST_NO
} phv_modes_t;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 8b68f7b8263..b920db3388c 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -30,7 +27,6 @@
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
-#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
@@ -41,7 +37,10 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_mcp_template;
+static inline struct rd_dev *RD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct rd_dev, dev);
+}
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -64,9 +63,6 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
- " MaxSectors: %u\n", hba->hba_id,
- rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
@@ -82,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-/* rd_release_device_space():
- *
- *
- */
-static void rd_release_device_space(struct rd_dev *rd_dev)
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 sg_table_count)
{
- u32 i, j, page_count = 0, sg_per_table;
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
+ u32 i, j, page_count = 0, sg_per_table;
- if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
- return;
-
- sg_table = rd_dev->sg_table_array;
-
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
@@ -109,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
page_count++;
}
}
-
kfree(sg);
}
+ kfree(sg_table);
+ return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+ rd_dev->sg_table_count);
+
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
- kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
@@ -128,33 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
*
*
*/
-static int rd_build_device_space(struct rd_dev *rd_dev)
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 total_sg_needed, unsigned char init_payload)
{
- u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
-
- if (rd_dev->rd_page_count <= 0) {
- pr_err("Illegal page count: %u for Ramdisk device\n",
- rd_dev->rd_page_count);
- return -EINVAL;
- }
- total_sg_needed = rd_dev->rd_page_count;
-
- sg_tables = (total_sg_needed / max_sg_per_table) + 1;
-
- sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!sg_table) {
- pr_err("Unable to allocate memory for Ramdisk"
- " scatterlist tables\n");
- return -ENOMEM;
- }
-
- rd_dev->sg_table_array = sg_table;
- rd_dev->sg_table_count = sg_tables;
+ unsigned char *p;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -185,24 +166,124 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
+
+ p = kmap(pg);
+ memset(p, init_payload, PAGE_SIZE);
+ kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
+ return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_page_count <= 0) {
+ pr_err("Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -EINVAL;
+ }
+
+ /* Don't need backing pages for NULLIO */
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+ if (rc)
+ return rc;
+
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
- " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
- rd_dev->rd_dev_id, rd_dev->rd_page_count,
- rd_dev->sg_table_count);
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+ rd_dev->sg_prot_count);
+
+ pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ rd_dev->sg_prot_array = NULL;
+ rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 total_sg_needed, sg_tables;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+ /*
+ * prot_length=8byte dif data
+ * tot sg needed = rd_page_count * (PGSZ/block_size) *
+ * (prot_length/block_size) + pad
+ * PGSZ canceled each other.
+ */
+ total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk protection"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_prot_array = sg_table;
+ rd_dev->sg_prot_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+ if (rc)
+ return rc;
+
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
-static void *rd_allocate_virtdevice(
- struct se_hba *hba,
- const char *name,
- int rd_direct)
+static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
@@ -214,174 +295,184 @@ static void *rd_allocate_virtdevice(
}
rd_dev->rd_host = rd_host;
- rd_dev->rd_direct = rd_direct;
- return rd_dev;
+ return &rd_dev->dev;
}
-static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+static int rd_configure_device(struct se_device *dev)
{
- return rd_allocate_virtdevice(hba, name, 0);
-}
-
-/* rd_create_virtdevice():
- *
- *
- */
-static struct se_device *rd_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p,
- int rd_direct)
-{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct rd_dev *rd_dev = p;
- struct rd_host *rd_host = hba->hba_ptr;
- int dev_flags = 0, ret;
- char prod[16], rev[4];
+ struct rd_dev *rd_dev = RD_DEV(dev);
+ struct rd_host *rd_host = dev->se_hba->hba_ptr;
+ int ret;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
+ }
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
- snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
- snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
- RD_MCP_VERSION);
-
- dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
- dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
- dev_limits.limits.max_sectors = RD_MAX_SECTORS;
- dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
-
- dev = transport_add_device_to_core_hba(hba,
- &rd_mcp_template, se_dev, dev_flags, rd_dev,
- &dev_limits, prod, rev);
- if (!dev)
- goto fail;
+ dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
- rd_dev->rd_queue_depth = dev->queue_depth;
- pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
- rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
- "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
- return dev;
+ return 0;
fail:
rd_release_device_space(rd_dev);
- return ERR_PTR(ret);
-}
-
-static struct se_device *rd_MEMCPY_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
-{
- return rd_create_virtdevice(hba, se_dev, p, 0);
+ return ret;
}
-/* rd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void rd_free_device(void *p)
+static void rd_free_device(struct se_device *dev)
{
- struct rd_dev *rd_dev = p;
+ struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
kfree(rd_dev);
}
-static inline struct rd_request *RD_REQ(struct se_task *task)
-{
- return container_of(task, struct rd_request, rd_task);
-}
-
-static struct se_task *
-rd_alloc_task(unsigned char *cdb)
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
- struct rd_request *rd_req;
+ struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
- rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
- if (!rd_req) {
- pr_err("Unable to allocate struct rd_request\n");
- return NULL;
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_table_count) {
+ sg_table = &rd_dev->sg_table_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
}
- return &rd_req->rd_task;
+ pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
}
-/* rd_get_sg_table():
- *
- *
- */
-static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
- u32 i;
struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
- for (i = 0; i < rd_dev->sg_table_count; i++) {
- sg_table = &rd_dev->sg_table_array[i];
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_prot_count) {
+ sg_table = &rd_dev->sg_prot_array[i];
if ((sg_table->page_start_offset <= page) &&
- (sg_table->page_end_offset >= page))
+ (sg_table->page_end_offset >= page))
return sg_table;
}
- pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
+ pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
-static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
+static sense_reason_t
+rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
+ struct se_device *se_dev = cmd->se_dev;
+ struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
- u32 rd_offset = req->rd_offset;
+ u32 rd_offset;
+ u32 rd_size;
+ u32 rd_page;
u32 src_len;
+ u64 tmp;
+ sense_reason_t rc;
+
+ if (dev->rd_flags & RDF_NULLIO) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
+ tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+ rd_offset = do_div(tmp, PAGE_SIZE);
+ rd_page = tmp;
+ rd_size = cmd->data_length;
- table = rd_get_sg_table(dev, req->rd_page);
+ table = rd_get_sg_table(dev, rd_page);
if (!table)
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[rd_page - table->page_start_offset];
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
- dev->rd_dev_id, read_rd ? "Read" : "Write",
- task->task_lba, req->rd_size, req->rd_page,
- rd_offset);
+ dev->rd_dev_id,
+ data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
+ cmd->t_task_lba, rd_size, rd_page, rd_offset);
+
+ if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
src_len = PAGE_SIZE - rd_offset;
- sg_miter_start(&m, task->task_sg, task->task_sg_nents,
- read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
- while (req->rd_size) {
+ sg_miter_start(&m, sgl, sgl_nents,
+ data_direction == DMA_FROM_DEVICE ?
+ SG_MITER_TO_SG : SG_MITER_FROM_SG);
+ while (rd_size) {
u32 len;
void *rd_addr;
sg_miter_next(&m);
+ if (!(u32)m.length) {
+ pr_debug("RD[%u]: invalid sgl %p len %zu\n",
+ dev->rd_dev_id, m.addr, m.length);
+ sg_miter_stop(&m);
+ return TCM_INCORRECT_AMOUNT_OF_DATA;
+ }
len = min((u32)m.length, src_len);
+ if (len > rd_size) {
+ pr_debug("RD[%u]: size underrun page %d offset %d "
+ "size %d\n", dev->rd_dev_id,
+ rd_page, rd_offset, rd_size);
+ len = rd_size;
+ }
m.consumed = len;
rd_addr = sg_virt(rd_sg) + rd_offset;
- if (read_rd)
+ if (data_direction == DMA_FROM_DEVICE)
memcpy(m.addr, rd_addr, len);
else
memcpy(rd_addr, m.addr, len);
- req->rd_size -= len;
- if (!req->rd_size)
+ rd_size -= len;
+ if (!rd_size)
continue;
src_len -= len;
@@ -391,77 +482,65 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
}
/* rd page completed, next one please */
- req->rd_page++;
+ rd_page++;
rd_offset = 0;
src_len = PAGE_SIZE;
- if (req->rd_page <= table->page_end_offset) {
+ if (rd_page <= table->page_end_offset) {
rd_sg++;
continue;
}
- table = rd_get_sg_table(dev, req->rd_page);
+ table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
rd_sg = table->sg_table;
}
sg_miter_stop(&m);
- return 0;
-}
-/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_MEMCPY_do_task(struct se_task *task)
-{
- struct se_device *dev = task->task_se_cmd->se_dev;
- struct rd_request *req = RD_REQ(task);
- u64 tmp;
- int ret;
+ if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
- tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
- req->rd_offset = do_div(tmp, PAGE_SIZE);
- req->rd_page = tmp;
- req->rd_size = task->task_size;
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
- ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
- if (ret != 0)
- return ret;
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
- return 0;
-}
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
-/* rd_free_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void rd_free_task(struct se_task *task)
-{
- kfree(RD_REQ(task));
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
}
enum {
- Opt_rd_pages, Opt_err
+ Opt_rd_pages, Opt_rd_nullio, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
+ {Opt_rd_nullio, "rd_nullio=%d"},
{Opt_err, NULL}
};
-static ssize_t rd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -485,6 +564,14 @@ static ssize_t rd_set_configfs_dev_params(
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
+ case Opt_rd_nullio:
+ match_int(args, &arg);
+ if (arg != 1)
+ break;
+
+ pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
+ rd_dev->rd_flags |= RDF_NULLIO;
+ break;
default:
break;
}
@@ -494,69 +581,74 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret;
}
-static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
- if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- pr_debug("Missing rd_pages= parameter\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t rd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
- ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
- rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
- "rd_direct" : "rd_mcp");
+ ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
+ rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
- " SG_table_count: %u\n", rd_dev->rd_page_count,
- PAGE_SIZE, rd_dev->sg_table_count);
+ " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
+ PAGE_SIZE, rd_dev->sg_table_count,
+ !!(rd_dev->rd_flags & RDF_NULLIO));
return bl;
}
-static u32 rd_get_device_rev(struct se_device *dev)
+static sector_t rd_get_blocks(struct se_device *dev)
{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+ dev->dev_attrib.block_size) - 1;
+
+ return blocks_long;
}
-static u32 rd_get_device_type(struct se_device *dev)
+static int rd_init_prot(struct se_device *dev)
{
- return TYPE_DISK;
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ if (!dev->dev_attrib.pi_prot_type)
+ return 0;
+
+ return rd_build_prot_space(rd_dev, dev->prot_length,
+ dev->dev_attrib.block_size);
}
-static sector_t rd_get_blocks(struct se_device *dev)
+static void rd_free_prot(struct se_device *dev)
{
- struct rd_dev *rd_dev = dev->dev_ptr;
- unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- dev->se_sub_dev->se_dev_attrib.block_size) - 1;
+ struct rd_dev *rd_dev = RD_DEV(dev);
- return blocks_long;
+ rd_release_prot_space(rd_dev);
+}
+
+static struct sbc_ops rd_sbc_ops = {
+ .execute_rw = rd_execute_rw,
+};
+
+static sense_reason_t
+rd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
+ .inquiry_prod = "RAMDISK-MCP",
+ .inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
- .create_virtdevice = rd_MEMCPY_create_virtdevice,
+ .alloc_device = rd_alloc_device,
+ .configure_device = rd_configure_device,
.free_device = rd_free_device,
- .alloc_task = rd_alloc_task,
- .do_task = rd_MEMCPY_do_task,
- .free_task = rd_free_task,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
+ .parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
+ .init_prot = rd_init_prot,
+ .free_prot = rd_free_prot,
};
int __init rd_module_init(void)
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 784e56a0410..cc46a6a89b3 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -2,7 +2,6 @@
#define TARGET_CORE_RD_H
#define RD_HBA_VERSION "v4.0"
-#define RD_DR_VERSION "4.0"
#define RD_MCP_VERSION "4.0"
/* Largest piece of memory kmalloc can allocate */
@@ -10,28 +9,11 @@
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
-#define RD_MAX_SECTORS 1024
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
-#define RRF_EMULATE_CDB 0x01
-#define RRF_GOT_LBA 0x02
-
-struct rd_request {
- struct se_task rd_task;
-
- /* Offset from start of page */
- u32 rd_offset;
- /* Starting page in Ramdisk for request */
- u32 rd_page;
- /* Total number of pages needed for request */
- u32 rd_page_count;
- /* Scatterlist count */
- u32 rd_size;
-} ____cacheline_aligned;
-
struct rd_dev_sg_table {
u32 page_start_offset;
u32 page_end_offset;
@@ -40,9 +22,10 @@ struct rd_dev_sg_table {
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
+#define RDF_NULLIO 0x02
struct rd_dev {
- int rd_direct;
+ struct se_device dev;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
@@ -50,9 +33,12 @@ struct rd_dev {
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
- u32 rd_queue_depth;
+ /* Number of SG tables in sg_prot_array */
+ u32 sg_prot_count;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
+ /* Array of rd_dev_sg_table containing protection scatterlists */
+ struct rd_dev_sg_table *sg_prot_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
new file mode 100644
index 00000000000..bd78d9235ac
--- /dev/null
+++ b/drivers/target/target_core_sbc.c
@@ -0,0 +1,1336 @@
+/*
+ * SCSI Block Commands (SBC) parsing and emulation.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-t10dif.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_ua.h"
+#include "target_core_alua.h"
+
+static sense_reason_t
+sbc_emulate_readcapacity(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ unsigned char *rbuf;
+ unsigned char buf[8];
+ u32 blocks;
+
+ /*
+ * SBC-2 says:
+ * If the PMI bit is set to zero and the LOGICAL BLOCK
+ * ADDRESS field is not set to zero, the device server shall
+ * terminate the command with CHECK CONDITION status with
+ * the sense key set to ILLEGAL REQUEST and the additional
+ * sense code set to INVALID FIELD IN CDB.
+ *
+ * In SBC-3, these fields are obsolete, but some SCSI
+ * compliance tests actually check this, so we might as well
+ * follow SBC-2.
+ */
+ if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
+ return TCM_INVALID_CDB_FIELD;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+ buf[2] = (blocks >> 8) & 0xff;
+ buf[3] = blocks & 0xff;
+ buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->dev_attrib.block_size & 0xff;
+
+ rbuf = transport_kmap_data_sg(cmd);
+ if (rbuf) {
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
+
+ target_complete_cmd_with_length(cmd, GOOD, 8);
+ return 0;
+}
+
+static sense_reason_t
+sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ unsigned char *rbuf;
+ unsigned char buf[32];
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+ memset(buf, 0, sizeof(buf));
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+ buf[2] = (blocks >> 40) & 0xff;
+ buf[3] = (blocks >> 32) & 0xff;
+ buf[4] = (blocks >> 24) & 0xff;
+ buf[5] = (blocks >> 16) & 0xff;
+ buf[6] = (blocks >> 8) & 0xff;
+ buf[7] = blocks & 0xff;
+ buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->dev_attrib.block_size & 0xff;
+ /*
+ * Set P_TYPE and PROT_EN bits for DIF support
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+ }
+
+ if (dev->transport->get_lbppbe)
+ buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+
+ if (dev->transport->get_alignment_offset_lbas) {
+ u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+ buf[14] = (lalba >> 8) & 0x3f;
+ buf[15] = lalba & 0xff;
+ }
+
+ /*
+ * Set Thin Provisioning Enable bit following sbc3r22 in section
+ * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+ */
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
+ buf[14] |= 0x80;
+
+ rbuf = transport_kmap_data_sg(cmd);
+ if (rbuf) {
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
+
+ target_complete_cmd_with_length(cmd, GOOD, 32);
+ return 0;
+}
+
+sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
+{
+ u32 num_blocks;
+
+ if (cmd->t_task_cdb[0] == WRITE_SAME)
+ num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+ else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+ else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
+ /*
+ * Use the explicit range when non zero is supplied, otherwise calculate
+ * the remaining range based on ->get_blocks() - starting LBA.
+ */
+ if (num_blocks)
+ return num_blocks;
+
+ return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
+ cmd->t_task_lba + 1;
+}
+EXPORT_SYMBOL(sbc_get_write_same_sectors);
+
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
+{
+ return cmd->se_dev->dev_attrib.block_size * sectors;
+}
+
+static inline u32 transport_get_sectors_6(unsigned char *cdb)
+{
+ /*
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
+ */
+ return cdb[4] ? : 256;
+}
+
+static inline u32 transport_get_sectors_10(unsigned char *cdb)
+{
+ return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(unsigned char *cdb)
+{
+ return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(unsigned char *cdb)
+{
+ return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+ (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(unsigned char *cdb)
+{
+ return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+ (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+ return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+ return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+ __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static sense_reason_t
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+{
+ unsigned int sectors = sbc_get_write_same_sectors(cmd);
+
+ if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ pr_err("WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
+ pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
+ sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ if (flags[0] & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+ * translated into block discard requests within backend code.
+ */
+ if (flags[0] & 0x08) {
+ if (!ops->execute_write_same_unmap)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->execute_cmd = ops->execute_write_same_unmap;
+ return 0;
+ }
+ if (!ops->execute_write_same)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->execute_cmd = ops->execute_write_same;
+ return 0;
+}
+
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
+{
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+ unsigned int offset;
+ sense_reason_t ret = TCM_NO_SENSE;
+ int i, count;
+ /*
+ * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+ *
+ * 1) read the specified logical block(s);
+ * 2) transfer logical blocks from the data-out buffer;
+ * 3) XOR the logical blocks transferred from the data-out buffer with
+ * the logical blocks read, storing the resulting XOR data in a buffer;
+ * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+ * blocks transferred from the data-out buffer; and
+ * 5) transfer the resulting XOR data to the data-in buffer.
+ */
+ buf = kmalloc(cmd->data_length, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+ /*
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
+ * into the locally allocated *buf
+ */
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
+ /*
+ * Now perform the XOR against the BIDI read memory located at
+ * cmd->t_mem_bidi_list
+ */
+
+ offset = 0;
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg));
+ if (!addr) {
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
+
+ offset += sg->length;
+ kunmap_atomic(addr);
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static sense_reason_t
+sbc_execute_rw(struct se_cmd *cmd)
+{
+ return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+ cmd->data_direction);
+}
+
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ /*
+ * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
+ * within target_complete_ok_work() if the command was successfully
+ * sent to the backend driver.
+ */
+ spin_lock_irq(&cmd->t_state_lock);
+ if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
+ cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ /*
+ * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
+ * before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+ unsigned char *buf = NULL, *addr;
+ struct sg_mapping_iter m;
+ unsigned int offset = 0, len;
+ unsigned int nlbas = cmd->t_task_nolb;
+ unsigned int block_size = dev->dev_attrib.block_size;
+ unsigned int compare_len = (nlbas * block_size);
+ sense_reason_t ret = TCM_NO_SENSE;
+ int rc, i;
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+ * which will not have taken ->caw_mutex yet..
+ */
+ if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ return TCM_NO_SENSE;
+ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+ if (cmd->scsi_status) {
+ pr_err("compare_and_write_callback: non zero scsi_status:"
+ " 0x%02x\n", cmd->scsi_status);
+ goto out;
+ }
+
+ buf = kzalloc(cmd->data_length, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate compare_and_write buf\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!write_sg) {
+ pr_err("Unable to allocate compare_and_write sg\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ sg_init_table(write_sg, cmd->t_data_nents);
+ /*
+ * Setup verify and write data payloads from total NumberLBAs.
+ */
+ rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
+ cmd->data_length);
+ if (!rc) {
+ pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ /*
+ * Compare against SCSI READ payload against verify payload
+ */
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
+ addr = (unsigned char *)kmap_atomic(sg_page(sg));
+ if (!addr) {
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ len = min(sg->length, compare_len);
+
+ if (memcmp(addr, buf + offset, len)) {
+ pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
+ addr, buf + offset);
+ kunmap_atomic(addr);
+ goto miscompare;
+ }
+ kunmap_atomic(addr);
+
+ offset += len;
+ compare_len -= len;
+ if (!compare_len)
+ break;
+ }
+
+ i = 0;
+ len = cmd->t_task_nolb * block_size;
+ sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
+ /*
+ * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
+ */
+ while (len) {
+ sg_miter_next(&m);
+
+ if (block_size < PAGE_SIZE) {
+ sg_set_page(&write_sg[i], m.page, block_size,
+ block_size);
+ } else {
+ sg_miter_next(&m);
+ sg_set_page(&write_sg[i], m.page, block_size,
+ 0);
+ }
+ len -= block_size;
+ i++;
+ }
+ sg_miter_stop(&m);
+ /*
+ * Save the original SGL + nents values before updating to new
+ * assignments, to be released in transport_free_pages() ->
+ * transport_reset_sgl_orig()
+ */
+ cmd->t_data_sg_orig = cmd->t_data_sg;
+ cmd->t_data_sg = write_sg;
+ cmd->t_data_nents_orig = cmd->t_data_nents;
+ cmd->t_data_nents = 1;
+
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->transport_complete_callback = compare_and_write_post;
+ /*
+ * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
+ * for submitting the adjusted SGL to write instance user-data.
+ */
+ cmd->execute_cmd = sbc_execute_rw;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ __target_execute_cmd(cmd);
+
+ kfree(buf);
+ return ret;
+
+miscompare:
+ pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
+ dev->transport->name);
+ ret = TCM_MISCOMPARE_VERIFY;
+out:
+ /*
+ * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
+ * sbc_compare_and_write() before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+ kfree(write_sg);
+ kfree(buf);
+ return ret;
+}
+
+static sense_reason_t
+sbc_compare_and_write(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
+ int rc;
+ /*
+ * Submit the READ first for COMPARE_AND_WRITE to perform the
+ * comparision using SGLs at cmd->t_bidi_data_sg..
+ */
+ rc = down_interruptible(&dev->caw_sem);
+ if ((rc != 0) || signal_pending(current)) {
+ cmd->transport_complete_callback = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * Reset cmd->data_length to individual block_size in order to not
+ * confuse backend drivers that depend on this value matching the
+ * size of the I/O being submitted.
+ */
+ cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
+
+ ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ cmd->transport_complete_callback = NULL;
+ up(&dev->caw_sem);
+ return ret;
+ }
+ /*
+ * Unlock of dev->caw_sem to occur in compare_and_write_callback()
+ * upon MISCOMPARE, or in compare_and_write_done() upon completion
+ * of WRITE instance user-data.
+ */
+ return TCM_NO_SENSE;
+}
+
+static int
+sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+ bool is_write, struct se_cmd *cmd)
+{
+ if (is_write) {
+ cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
+ TARGET_PROT_DOUT_INSERT;
+ switch (protect) {
+ case 0x0:
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ } else {
+ cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
+ TARGET_PROT_DIN_STRIP;
+ switch (protect) {
+ case 0x0:
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+ u32 sectors, bool is_write)
+{
+ u8 protect = cdb[1] >> 5;
+
+ if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
+ return true;
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ cmd->reftag_seed = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ if (protect)
+ return false;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE1_PROT:
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE0_PROT:
+ default:
+ return true;
+ }
+
+ if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
+ is_write, cmd))
+ return false;
+
+ cmd->prot_type = dev->dev_attrib.pi_prot_type;
+ cmd->prot_length = dev->prot_length * sectors;
+
+ /**
+ * In case protection information exists over the wire
+ * we modify command data length to describe pure data.
+ * The actual transfer length is data length + protection
+ * length
+ **/
+ if (protect)
+ cmd->data_length = sectors * dev->dev_attrib.block_size;
+
+ pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
+ "prot_op=%d prot_checks=%d\n",
+ __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
+ cmd->prot_op, cmd->prot_checks);
+
+ return true;
+}
+
+sense_reason_t
+sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned int size;
+ u32 sectors = 0;
+ sense_reason_t ret;
+
+ switch (cdb[0]) {
+ case READ_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case READ_10:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case READ_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case READ_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case WRITE_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case WRITE_10:
+ case WRITE_VERIFY:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case WRITE_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case WRITE_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ break;
+ case XDWRITEREAD_10:
+ if (cmd->data_direction != DMA_TO_DEVICE ||
+ !(cmd->se_cmd_flags & SCF_BIDI))
+ return TCM_INVALID_CDB_FIELD;
+ sectors = transport_get_sectors_10(cdb);
+
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run after I/O completion.
+ */
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case VARIABLE_LENGTH_CMD:
+ {
+ u16 service_action = get_unaligned_be16(&cdb[8]);
+ switch (service_action) {
+ case XDWRITEREAD_32:
+ sectors = transport_get_sectors_32(cdb);
+
+ /*
+ * Use WRITE_32 and READ_32 opcodes for the emulated
+ * XDWRITE_READ_32 logic.
+ */
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
+ */
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case WRITE_SAME_32:
+ sectors = transport_get_sectors_32(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ if (ret)
+ return ret;
+ break;
+ default:
+ pr_err("VARIABLE_LENGTH_CMD service action"
+ " 0x%04x not supported\n", service_action);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ break;
+ }
+ case COMPARE_AND_WRITE:
+ sectors = cdb[13];
+ /*
+ * Currently enforce COMPARE_AND_WRITE for a single sector
+ */
+ if (sectors > 1) {
+ pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
+ " than 1\n", sectors);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Double size because we have two buffers, note that
+ * zero is not an error..
+ */
+ size = 2 * sbc_get_size(cmd, sectors);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ cmd->t_task_nolb = sectors;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_compare_and_write;
+ cmd->transport_complete_callback = compare_and_write_callback;
+ break;
+ case READ_CAPACITY:
+ size = READ_CAP_LEN;
+ cmd->execute_cmd = sbc_emulate_readcapacity;
+ break;
+ case SERVICE_ACTION_IN:
+ switch (cmd->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ cmd->execute_cmd = sbc_emulate_readcapacity_16;
+ break;
+ case SAI_REPORT_REFERRALS:
+ cmd->execute_cmd = target_emulate_report_referrals;
+ break;
+ default:
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ if (cdb[0] == SYNCHRONIZE_CACHE) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
+ if (ops->execute_sync_cache) {
+ cmd->execute_cmd = ops->execute_sync_cache;
+ goto check_lba;
+ }
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
+ break;
+ case UNMAP:
+ if (!ops->execute_unmap)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ size = get_unaligned_be16(&cdb[7]);
+ cmd->execute_cmd = ops->execute_unmap;
+ break;
+ case WRITE_SAME_16:
+ sectors = transport_get_sectors_16(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
+ break;
+ case WRITE_SAME:
+ sectors = transport_get_sectors_10(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
+
+ /*
+ * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
+ break;
+ case VERIFY:
+ size = 0;
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->execute_cmd = sbc_emulate_noop;
+ goto check_lba;
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ /*
+ * There are still clients out there which use these old SCSI-2
+ * commands. This mainly happens when running VMs with legacy
+ * guest systems, connected via SCSI command pass-through to
+ * iSCSI targets. Make them happy and return status GOOD.
+ */
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
+ break;
+ default:
+ ret = spc_parse_cdb(cmd, &size);
+ if (ret)
+ return ret;
+ }
+
+ /* reject any command that we don't have a handler for */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ unsigned long long end_lba;
+
+ if (sectors > dev->dev_attrib.fabric_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds fabric_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ dev->dev_attrib.fabric_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ if (sectors > dev->dev_attrib.hw_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds backend hw_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ dev->dev_attrib.hw_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
+ }
+check_lba:
+ end_lba = dev->transport->get_blocks(dev) + 1;
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("cmd exceeds last lba %llu "
+ "(lba %llu, sectors %u)\n",
+ end_lba, cmd->t_task_lba, sectors);
+ return TCM_ADDRESS_OUT_OF_RANGE;
+ }
+
+ if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
+ size = sbc_get_size(cmd, sectors);
+ }
+
+ return target_cmd_size_check(cmd, size);
+}
+EXPORT_SYMBOL(sbc_parse_cdb);
+
+u32 sbc_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+EXPORT_SYMBOL(sbc_get_device_type);
+
+sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd,
+ sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
+ sector_t, sector_t),
+ void *priv)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+ sector_t lba;
+ int size;
+ u32 range;
+ sense_reason_t ret = 0;
+ int dl, bd_dl;
+
+ /* We never set ANC_SUP */
+ if (cmd->t_task_cdb[1])
+ return TCM_INVALID_CDB_FIELD;
+
+ if (cmd->data_length == 0) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
+ if (cmd->data_length < 8) {
+ pr_warn("UNMAP parameter list length %u too small\n",
+ cmd->data_length);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ dl = get_unaligned_be16(&buf[0]);
+ bd_dl = get_unaligned_be16(&buf[2]);
+
+ size = cmd->data_length - 8;
+ if (bd_dl > size)
+ pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
+ cmd->data_length, bd_dl);
+ else
+ size = bd_dl;
+
+ if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto err;
+ }
+
+ /* First UNMAP block descriptor starts at 8 byte offset */
+ ptr = &buf[8];
+ pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+ while (size >= 16) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
+ if (range > dev->dev_attrib.max_unmap_lba_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto err;
+ }
+
+ if (lba + range > dev->transport->get_blocks(dev) + 1) {
+ ret = TCM_ADDRESS_OUT_OF_RANGE;
+ goto err;
+ }
+
+ ret = do_unmap_fn(cmd, priv, lba, range);
+ if (ret)
+ goto err;
+
+ ptr += 16;
+ size -= 16;
+ }
+
+err:
+ transport_kunmap_data_sg(cmd);
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+EXPORT_SYMBOL(sbc_execute_unmap);
+
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ sector_t sector = cmd->t_task_lba;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+ sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
+ dev->dev_attrib.block_size));
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+ sdt->app_tag = 0;
+
+ pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ sector++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+}
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+ const void *p, sector_t sector, unsigned int ei_lba)
+{
+ int block_size = dev->dev_attrib.block_size;
+ __be16 csum;
+
+ csum = cpu_to_be16(crc_t10dif(p, block_size));
+
+ if (sdt->guard_tag != csum) {
+ pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+ " csum 0x%04x\n", (unsigned long long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+ be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+ " sector MSB: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+ be32_to_cpu(sdt->ref_tag) != ei_lba) {
+ pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+ " ei_lba: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), ei_lba);
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ return 0;
+}
+
+static void
+sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+ struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *psg;
+ void *paddr, *addr;
+ unsigned int i, len, left;
+ unsigned int offset = sg_off;
+
+ left = sectors * dev->prot_length;
+
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+ unsigned int psg_len, copied = 0;
+
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
+
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ kunmap_atomic(addr);
+ }
+ kunmap_atomic(paddr);
+ }
+}
+
+sense_reason_t
+sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+ sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_write);
+
+static sense_reason_t
+__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = sg_off;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + sg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ if (sdt->app_tag == cpu_to_be16(0xffff)) {
+ sector++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ continue;
+ }
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+
+ return 0;
+}
+
+sense_reason_t
+sbc_dif_read_strip(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 sectors = cmd->prot_length / dev->prot_length;
+
+ return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+ cmd->t_prot_sg, 0);
+}
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ sense_reason_t rc;
+
+ rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
+ if (rc)
+ return rc;
+
+ sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
new file mode 100644
index 00000000000..6cd7222738f
--- /dev/null
+++ b/drivers/target/target_core_spc.c
@@ -0,0 +1,1445 @@
+/*
+ * SCSI Primary Commands (SPC) parsing and emulation.
+ *
+ * (c) Copyright 2002-2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ /*
+ * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+ */
+ buf[5] = 0x80;
+
+ /*
+ * Set TPGS field for explicit and/or implicit ALUA access type
+ * and opteration.
+ *
+ * See spc4r17 section 6.4.2 Table 135
+ */
+ if (!port)
+ return;
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (tg_pt_gp)
+ buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_lun *lun = cmd->se_lun;
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ /* Set RMB (removable media) for tape devices */
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
+ buf[1] = 0x80;
+
+ buf[2] = 0x05; /* SPC-3 */
+
+ /*
+ * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+ *
+ * SPC4 says:
+ * A RESPONSE DATA FORMAT field set to 2h indicates that the
+ * standard INQUIRY data is in the format defined in this
+ * standard. Response data format values less than 2h are
+ * obsolete. Response data format values greater than 2h are
+ * reserved.
+ */
+ buf[3] = 2;
+
+ /*
+ * Enable SCCS and TPGS fields for Emulated ALUA
+ */
+ spc_fill_alua_data(lun->lun_sep, buf);
+
+ /*
+ * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
+ */
+ if (dev->dev_attrib.emulate_3pc)
+ buf[5] |= 0x8;
+ /*
+ * Set Protection (PROTECT) bit when DIF has been enabled on the
+ * device, and the transport supports VERIFY + PASS.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ buf[5] |= 0x1;
+ }
+
+ buf[7] = 0x2; /* CmdQue=1 */
+
+ memcpy(&buf[8], "LIO-ORG ", 8);
+ memset(&buf[16], 0x20, 16);
+ memcpy(&buf[16], dev->t10_wwn.model,
+ min_t(size_t, strlen(dev->t10_wwn.model), 16));
+ memcpy(&buf[32], dev->t10_wwn.revision,
+ min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+ buf[4] = 31; /* Set additional length to 31 */
+
+ return 0;
+}
+EXPORT_SYMBOL(spc_emulate_inquiry_std);
+
+/* unit serial number */
+static sense_reason_t
+spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+ u16 len;
+
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+ len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
+ len++; /* Extra Byte for NULL Terminator */
+ buf[3] = len;
+ }
+ return 0;
+}
+
+void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
+{
+ unsigned char *p = &dev->t10_wwn.unit_serial[0];
+ int cnt;
+ bool next = true;
+
+ /*
+ * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+ * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+ * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+ * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
+ * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+ * per device uniqeness.
+ */
+ for (cnt = 0; *p && cnt < 13; p++) {
+ int val = hex_to_bin(*p);
+
+ if (val < 0)
+ continue;
+
+ if (next) {
+ next = false;
+ buf[cnt++] |= val;
+ } else {
+ next = true;
+ buf[cnt] = val << 4;
+ }
+ }
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+sense_reason_t
+spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_lun *lun = cmd->se_lun;
+ struct se_port *port = NULL;
+ struct se_portal_group *tpg = NULL;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char *prod = &dev->t10_wwn.model[0];
+ u32 prod_len;
+ u32 unit_serial_len, off = 0;
+ u16 len = 0, id_len;
+
+ off = 4;
+
+ /*
+ * NAA IEEE Registered Extended Assigned designator format, see
+ * spc4r17 section 7.7.3.6.5
+ *
+ * We depend upon a target_core_mod/ConfigFS provided
+ * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+ * value in order to return the NAA id.
+ */
+ if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
+ goto check_t10_vend_desc;
+
+ /* CODE SET == Binary */
+ buf[off++] = 0x1;
+
+ /* Set ASSOCIATION == addressed logical unit: 0)b */
+ buf[off] = 0x00;
+
+ /* Identifier/Designator type == NAA identifier */
+ buf[off++] |= 0x3;
+ off++;
+
+ /* Identifier/Designator length */
+ buf[off++] = 0x10;
+
+ /*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off++] = (0x6 << 4);
+
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ buf[off++] = 0x01;
+ buf[off++] = 0x40;
+ buf[off] = (0x5 << 4);
+
+ /*
+ * Return ConfigFS Unit Serial Number information for
+ * VENDOR_SPECIFIC_IDENTIFIER and
+ * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+ */
+ spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+
+ len = 20;
+ off = (len + 4);
+
+check_t10_vend_desc:
+ /*
+ * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+ */
+ id_len = 8; /* For Vendor field */
+ prod_len = 4; /* For VPD Header */
+ prod_len += 8; /* For Vendor field */
+ prod_len += strlen(prod);
+ prod_len++; /* For : */
+
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+ unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
+ unit_serial_len++; /* For NULL Terminator */
+
+ id_len += sprintf(&buf[off+12], "%s:%s", prod,
+ &dev->t10_wwn.unit_serial[0]);
+ }
+ buf[off] = 0x2; /* ASCII */
+ buf[off+1] = 0x1; /* T10 Vendor ID */
+ buf[off+2] = 0x0;
+ memcpy(&buf[off+4], "LIO-ORG", 8);
+ /* Extra Byte for NULL Terminator */
+ id_len++;
+ /* Identifier Length */
+ buf[off+3] = id_len;
+ /* Header size for Designation descriptor */
+ len += (id_len + 4);
+ off += (id_len + 4);
+ /*
+ * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+ */
+ port = lun->lun_sep;
+ if (port) {
+ struct t10_alua_lu_gp *lu_gp;
+ u32 padding, scsi_name_len, scsi_target_len;
+ u16 lu_gp_id = 0;
+ u16 tg_pt_gp_id = 0;
+ u16 tpgt;
+
+ tpg = port->sep_tpg;
+ /*
+ * Relative target port identifer, see spc4r17
+ * section 7.7.3.7
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Relative target port identifer */
+ buf[off++] |= 0x4;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ /* Skip over Obsolete field in RTPI payload
+ * in Table 472 */
+ off += 2;
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Target port group identifier, see spc4r17
+ * section 7.7.3.8
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ goto check_lu_gp;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (!tg_pt_gp) {
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ goto check_lu_gp;
+ }
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Target port group identifier */
+ buf[off++] |= 0x5;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+ buf[off++] = (tg_pt_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Logical Unit Group identifier, see spc4r17
+ * section 7.7.3.8
+ */
+check_lu_gp:
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!lu_gp_mem)
+ goto check_scsi_name;
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if (!lu_gp) {
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+ goto check_scsi_name;
+ }
+ lu_gp_id = lu_gp->lu_gp_id;
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ /* DESIGNATOR TYPE == Logical Unit Group identifier */
+ buf[off++] |= 0x6;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((lu_gp_id >> 8) & 0xff);
+ buf[off++] = (lu_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * SCSI name string designator, see spc4r17
+ * section 7.7.3.11
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+check_scsi_name:
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+ * UTF-8 encoding.
+ */
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
+ scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
+ scsi_name_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ padding = ((-scsi_name_len) & 3);
+ if (padding)
+ scsi_name_len += padding;
+ if (scsi_name_len > 256)
+ scsi_name_len = 256;
+
+ buf[off-1] = scsi_name_len;
+ off += scsi_name_len;
+ /* Header size + Designation descriptor */
+ len += (scsi_name_len + 4);
+
+ /*
+ * Target device designator
+ */
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target device: 10b */
+ buf[off] |= 0x20;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>" in
+ * UTF-8 encoding.
+ */
+ scsi_target_len = sprintf(&buf[off], "%s",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ scsi_target_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ padding = ((-scsi_target_len) & 3);
+ if (padding)
+ scsi_target_len += padding;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
+
+ buf[off-1] = scsi_target_len;
+ off += scsi_target_len;
+
+ /* Header size + Designation descriptor */
+ len += (scsi_target_len + 4);
+ }
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+ return 0;
+}
+EXPORT_SYMBOL(spc_emulate_evpd_83);
+
+static bool
+spc_check_dev_wce(struct se_device *dev)
+{
+ bool wce = false;
+
+ if (dev->transport->get_write_cache)
+ wce = dev->transport->get_write_cache(dev);
+ else if (dev->dev_attrib.emulate_write_cache > 0)
+ wce = true;
+
+ return wce;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static sense_reason_t
+spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ buf[3] = 0x3c;
+ /*
+ * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+ * only for TYPE3 protection.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ buf[4] = 0x5;
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ buf[4] = 0x4;
+ }
+
+ /* Set HEADSUP, ORDSUP, SIMPSUP */
+ buf[5] = 0x07;
+
+ /* If WriteCache emulation is enabled, set V_SUP */
+ if (spc_check_dev_wce(dev))
+ buf[6] = 0x01;
+ /* If an LBA map is present set R_SUP */
+ spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ buf[8] = 0x10;
+ spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
+ return 0;
+}
+
+/* Block Limits VPD page */
+static sense_reason_t
+spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 max_sectors;
+ int have_tp = 0;
+ int opt, min;
+
+ /*
+ * Following spc3r22 section 6.5.3 Block Limits VPD page, when
+ * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+ * different page length for Thin Provisioning.
+ */
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
+ have_tp = 1;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = have_tp ? 0x3c : 0x10;
+
+ /* Set WSNZ to 1 */
+ buf[4] = 0x01;
+ /*
+ * Set MAXIMUM COMPARE AND WRITE LENGTH
+ */
+ if (dev->dev_attrib.emulate_caw)
+ buf[5] = 0x01;
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+ */
+ if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
+ put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
+ else
+ put_unaligned_be16(1, &buf[6]);
+
+ /*
+ * Set MAXIMUM TRANSFER LENGTH
+ */
+ max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+ dev->dev_attrib.hw_max_sectors);
+ put_unaligned_be32(max_sectors, &buf[8]);
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH
+ */
+ if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
+ put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
+ else
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+
+ /*
+ * Exit now if we don't support TP.
+ */
+ if (!have_tp)
+ goto max_write_same;
+
+ /*
+ * Set MAXIMUM UNMAP LBA COUNT
+ */
+ put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
+
+ /*
+ * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+ */
+ put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
+ &buf[24]);
+
+ /*
+ * Set OPTIMAL UNMAP GRANULARITY
+ */
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
+
+ /*
+ * UNMAP GRANULARITY ALIGNMENT
+ */
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
+ &buf[32]);
+ if (dev->dev_attrib.unmap_granularity_alignment != 0)
+ buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+ /*
+ * MAXIMUM WRITE SAME LENGTH
+ */
+max_write_same:
+ put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+
+ return 0;
+}
+
+/* Block Device Characteristics VPD page */
+static sense_reason_t
+spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x3c;
+ buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
+
+ return 0;
+}
+
+/* Thin Provisioning VPD */
+static sense_reason_t
+spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ /*
+ * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
+ *
+ * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+ * zero, then the page length shall be set to 0004h. If the DP bit
+ * is set to one, then the page length shall be set to the value
+ * defined in table 162.
+ */
+ buf[0] = dev->transport->get_device_type(dev);
+
+ /*
+ * Set Hardcoded length mentioned above for DP=0
+ */
+ put_unaligned_be16(0x0004, &buf[2]);
+
+ /*
+ * The THRESHOLD EXPONENT field indicates the threshold set size in
+ * LBAs as a power of 2 (i.e., the threshold set size is equal to
+ * 2(threshold exponent)).
+ *
+ * Note that this is currently set to 0x00 as mkp says it will be
+ * changing again. We can enable this once it has settled in T10
+ * and is actually used by Linux/SCSI ML code.
+ */
+ buf[4] = 0x00;
+
+ /*
+ * A TPU bit set to one indicates that the device server supports
+ * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+ * that the device server does not support the UNMAP command.
+ */
+ if (dev->dev_attrib.emulate_tpu != 0)
+ buf[5] = 0x80;
+
+ /*
+ * A TPWS bit set to one indicates that the device server supports
+ * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+ * A TPWS bit set to zero indicates that the device server does not
+ * support the use of the WRITE SAME (16) command to unmap LBAs.
+ */
+ if (dev->dev_attrib.emulate_tpws != 0)
+ buf[5] |= 0x40;
+
+ return 0;
+}
+
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x0c;
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+
+ return 0;
+}
+
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+ uint8_t page;
+ sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+ { .page = 0x00, .emulate = spc_emulate_evpd_00 },
+ { .page = 0x80, .emulate = spc_emulate_evpd_80 },
+ { .page = 0x83, .emulate = spc_emulate_evpd_83 },
+ { .page = 0x86, .emulate = spc_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+ { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
+};
+
+/* supported vital product data pages */
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ int p;
+
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = ARRAY_SIZE(evpd_handlers);
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+ buf[p + 4] = evpd_handlers[p].page;
+ }
+
+ return 0;
+}
+
+static sense_reason_t
+spc_emulate_inquiry(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+ unsigned char *rbuf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned char *buf;
+ sense_reason_t ret;
+ int p;
+ int len = 0;
+
+ buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate response buffer for INQUIRY\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+ buf[0] = 0x3f; /* Not connected */
+ else
+ buf[0] = dev->transport->get_device_type(dev);
+
+ if (!(cdb[1] & 0x1)) {
+ if (cdb[2]) {
+ pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
+ cdb[2]);
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out;
+ }
+
+ ret = spc_emulate_inquiry_std(cmd, buf);
+ len = buf[4] + 5;
+ goto out;
+ }
+
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
+ if (cdb[2] == evpd_handlers[p].page) {
+ buf[1] = cdb[2];
+ ret = evpd_handlers[p].emulate(cmd, buf);
+ len = get_unaligned_be16(&buf[2]) + 4;
+ goto out;
+ }
+ }
+
+ pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ ret = TCM_INVALID_CDB_FIELD;
+
+out:
+ rbuf = transport_kmap_data_sg(cmd);
+ if (rbuf) {
+ memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
+ kfree(buf);
+
+ if (!ret)
+ target_complete_cmd_with_length(cmd, GOOD, len);
+ return ret;
+}
+
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+ p[0] = 0x01;
+ p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
+ return 12;
+}
+
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ p[0] = 0x0a;
+ p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+ p[2] = 2;
+ /*
+ * From spc4r23, 7.4.7 Control mode page
+ *
+ * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+ * restrictions on the algorithm used for reordering commands
+ * having the SIMPLE task attribute (see SAM-4).
+ *
+ * Table 368 -- QUEUE ALGORITHM MODIFIER field
+ * Code Description
+ * 0h Restricted reordering
+ * 1h Unrestricted reordering allowed
+ * 2h to 7h Reserved
+ * 8h to Fh Vendor specific
+ *
+ * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+ * the device server shall order the processing sequence of commands
+ * having the SIMPLE task attribute such that data integrity is maintained
+ * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+ * requests is halted at any time, the final value of all data observable
+ * on the medium shall be the same as if all the commands had been processed
+ * with the ORDERED task attribute).
+ *
+ * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+ * device server may reorder the processing sequence of commands having the
+ * SIMPLE task attribute in any manner. Any data integrity exposures related to
+ * command sequence order shall be explicitly handled by the application client
+ * through the selection of appropriate ommands and task attributes.
+ */
+ p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+ *
+ * 00b: The logical unit shall clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when a com-
+ * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+ * status.
+ *
+ * 10b: The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when
+ * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+ * CONFLICT status.
+ *
+ * 11b a The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall establish a unit attention condition for the
+ * initiator port associated with the I_T nexus on which the BUSY,
+ * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+ * Depending on the status, the additional sense code shall be set to
+ * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+ * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+ * command, a unit attention condition shall be established only once
+ * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+ * to the number of commands completed with one of those status codes.
+ */
+ p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Task Aborted Status (TAS) bit set to zero.
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ /*
+ * From spc4r30, section 7.5.7 Control mode page
+ *
+ * Application Tag Owner (ATO) bit set to one.
+ *
+ * If the ATO bit is set to one the device server shall not modify the
+ * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+ * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+ * TAG field.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type)
+ p[5] |= 0x80;
+ }
+
+ p[8] = 0xff;
+ p[9] = 0xff;
+ p[11] = 30;
+
+out:
+ return 12;
+}
+
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ p[0] = 0x08;
+ p[1] = 0x12;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+ if (spc_check_dev_wce(dev))
+ p[2] = 0x04; /* Write Cache Enable */
+ p[12] = 0x20; /* Disabled Read Ahead */
+
+out:
+ return 20;
+}
+
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
+{
+ p[0] = 0x1c;
+ p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
+ return 12;
+}
+
+static struct {
+ uint8_t page;
+ uint8_t subpage;
+ int (*emulate)(struct se_cmd *, u8, unsigned char *);
+} modesense_handlers[] = {
+ { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
+ { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
+ { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
+ { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
+};
+
+static void spc_modesense_write_protect(unsigned char *buf, int type)
+{
+ /*
+ * I believe that the WP bit (bit 7) in the mode header is the same for
+ * all device types..
+ */
+ switch (type) {
+ case TYPE_DISK:
+ case TYPE_TAPE:
+ default:
+ buf[0] |= 0x80; /* WP bit */
+ break;
+ }
+}
+
+static void spc_modesense_dpofua(unsigned char *buf, int type)
+{
+ switch (type) {
+ case TYPE_DISK:
+ buf[0] |= 0x10; /* DPOFUA bit */
+ break;
+ default:
+ break;
+ }
+}
+
+static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ *buf++ = 8;
+ put_unaligned_be32(min(blocks, 0xffffffffull), buf);
+ buf += 4;
+ put_unaligned_be32(block_size, buf);
+ return 9;
+}
+
+static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ if (blocks <= 0xffffffff)
+ return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
+
+ *buf++ = 1; /* LONGLBA */
+ buf += 2;
+ *buf++ = 16;
+ put_unaligned_be64(blocks, buf);
+ buf += 12;
+ put_unaligned_be32(block_size, buf);
+
+ return 17;
+}
+
+static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
+ int type = dev->transport->get_device_type(dev);
+ int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+ bool dbd = !!(cdb[1] & 0x08);
+ bool llba = ten ? !!(cdb[1] & 0x10) : false;
+ u8 pc = cdb[2] >> 6;
+ u8 page = cdb[2] & 0x3f;
+ u8 subpage = cdb[3];
+ int length = 0;
+ int ret;
+ int i;
+
+ memset(buf, 0, SE_MODE_PAGE_BUF);
+
+ /*
+ * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
+ * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
+ */
+ length = ten ? 3 : 2;
+
+ /* DEVICE-SPECIFIC PARAMETER */
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ spc_modesense_write_protect(&buf[length], type);
+
+ if ((spc_check_dev_wce(dev)) &&
+ (dev->dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[length], type);
+
+ ++length;
+
+ /* BLOCK DESCRIPTOR */
+
+ /*
+ * For now we only include a block descriptor for disk (SBC)
+ * devices; other command sets use a slightly different format.
+ */
+ if (!dbd && type == TYPE_DISK) {
+ u64 blocks = dev->transport->get_blocks(dev);
+ u32 block_size = dev->dev_attrib.block_size;
+
+ if (ten) {
+ if (llba) {
+ length += spc_modesense_long_blockdesc(&buf[length],
+ blocks, block_size);
+ } else {
+ length += 3;
+ length += spc_modesense_blockdesc(&buf[length],
+ blocks, block_size);
+ }
+ } else {
+ length += spc_modesense_blockdesc(&buf[length], blocks,
+ block_size);
+ }
+ } else {
+ if (ten)
+ length += 4;
+ else
+ length += 1;
+ }
+
+ if (page == 0x3f) {
+ if (subpage != 0x00 && subpage != 0xff) {
+ pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
+ /*
+ * Tricky way to say all subpage 00h for
+ * subpage==0, all subpages for subpage==0xff
+ * (and we just checked above that those are
+ * the only two possibilities).
+ */
+ if ((modesense_handlers[i].subpage & ~subpage) == 0) {
+ ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
+ if (!ten && length + ret >= 255)
+ break;
+ length += ret;
+ }
+ }
+
+ goto set_length;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
+ goto set_length;
+ }
+
+ /*
+ * We don't intend to implement:
+ * - obsolete page 03h "format parameters" (checked by Solaris)
+ */
+ if (page != 0x03)
+ pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+ page, subpage);
+
+ return TCM_UNKNOWN_MODE_PAGE;
+
+set_length:
+ if (ten)
+ put_unaligned_be16(length - 2, buf);
+ else
+ buf[0] = length - 1;
+
+ rbuf = transport_kmap_data_sg(cmd);
+ if (rbuf) {
+ memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
+
+ target_complete_cmd_with_length(cmd, GOOD, length);
+ return 0;
+}
+
+static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
+{
+ char *cdb = cmd->t_task_cdb;
+ bool ten = cdb[0] == MODE_SELECT_10;
+ int off = ten ? 8 : 4;
+ bool pf = !!(cdb[1] & 0x10);
+ u8 page, subpage;
+ unsigned char *buf;
+ unsigned char tbuf[SE_MODE_PAGE_BUF];
+ int length;
+ int ret = 0;
+ int i;
+
+ if (!cmd->data_length) {
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+ }
+
+ if (cmd->data_length < off + 2)
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!pf) {
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out;
+ }
+
+ page = buf[off] & 0x3f;
+ subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ memset(tbuf, 0, SE_MODE_PAGE_BUF);
+ length = modesense_handlers[i].emulate(cmd, 0, tbuf);
+ goto check_contents;
+ }
+
+ ret = TCM_UNKNOWN_MODE_PAGE;
+ goto out;
+
+check_contents:
+ if (cmd->data_length < off + length) {
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
+ if (memcmp(buf + off, tbuf, length))
+ ret = TCM_INVALID_PARAMETER_LIST;
+
+out:
+ transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+
+static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned char *rbuf;
+ u8 ua_asc = 0, ua_ascq = 0;
+ unsigned char buf[SE_SENSE_BUF];
+
+ memset(buf, 0, SE_SENSE_BUF);
+
+ if (cdb[1] & 0x01) {
+ pr_err("REQUEST_SENSE description emulation not"
+ " supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ rbuf = transport_kmap_data_sg(cmd);
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+ /*
+ * CURRENT ERROR, UNIT ATTENTION
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+
+ /*
+ * The Additional Sense Code (ASC) from the UNIT ATTENTION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+ buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+ buf[7] = 0x0A;
+ } else {
+ /*
+ * CURRENT ERROR, NO SENSE
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+
+ /*
+ * NO ADDITIONAL SENSE INFORMATION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = 0x00;
+ buf[7] = 0x0A;
+ }
+
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ unsigned char *buf;
+ u32 lun_count = 0, offset = 8, i;
+
+ if (cmd->data_length < 16) {
+ pr_warn("REPORT LUNS allocation length %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+ * coming via a target_core_mod PASSTHROUGH op, and not through
+ * a $FABRIC_MOD. In that case, report LUN=0 only.
+ */
+ if (!sess) {
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+ lun_count = 1;
+ goto done;
+ }
+
+ spin_lock_irq(&sess->se_node_acl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = sess->se_node_acl->device_list[i];
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+ /*
+ * We determine the correct LUN LIST LENGTH even once we
+ * have reached the initial allocation length.
+ * See SPC2-R20 7.19.
+ */
+ lun_count++;
+ if ((offset + 8) > cmd->data_length)
+ continue;
+
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
+ }
+ spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+
+ /*
+ * See SPC3 r07, page 159.
+ */
+done:
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+ return 0;
+}
+EXPORT_SYMBOL(spc_emulate_report_luns);
+
+static sense_reason_t
+spc_emulate_testunitready(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+sense_reason_t
+spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
+
+ switch (cdb[0]) {
+ case MODE_SELECT:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modeselect;
+ break;
+ case MODE_SELECT_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modeselect;
+ break;
+ case MODE_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case MODE_SENSE_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case LOG_SELECT:
+ case LOG_SENSE:
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case PERSISTENT_RESERVE_IN:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
+ break;
+ case PERSISTENT_RESERVE_OUT:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ if (cdb[0] == RELEASE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
+
+ cmd->execute_cmd = target_scsi2_reservation_release;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /*
+ * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RESERVE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
+
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
+ break;
+ case REQUEST_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_request_sense;
+ break;
+ case INQUIRY:
+ *size = (cdb[3] << 8) + cdb[4];
+
+ /*
+ * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
+ * See spc4r17 section 5.3
+ */
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->execute_cmd = spc_emulate_inquiry;
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ break;
+ case EXTENDED_COPY:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_xcopy;
+ break;
+ case RECEIVE_COPY_RESULTS:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_receive_copy_results;
+ break;
+ case READ_ATTRIBUTE:
+ case WRITE_ATTRIBUTE:
+ *size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ *size = (cdb[3] << 8) | cdb[4];
+ break;
+ case WRITE_BUFFER:
+ *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ break;
+ case REPORT_LUNS:
+ cmd->execute_cmd = spc_emulate_report_luns;
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ /*
+ * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
+ * See spc4r17 section 5.3
+ */
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case TEST_UNIT_READY:
+ cmd->execute_cmd = spc_emulate_testunitready;
+ *size = 0;
+ break;
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_IN from SCC-2
+ * Check for emulated MI_REPORT_TARGET_PGS
+ */
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
+ cmd->execute_cmd =
+ target_emulate_report_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_OUT from SCC-2
+ * Check for emulated MO_SET_TARGET_PGS.
+ */
+ if (cdb[1] == MO_SET_TARGET_PGS) {
+ cmd->execute_cmd =
+ target_emulate_set_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ default:
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ " 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(spc_parse_cdb);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index f8c2d2cc343..03538994d2f 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -1,13 +1,10 @@
/*******************************************************************************
* Filename: target_core_stat.c
*
- * Copyright (c) 2011 Rising Tide Systems
- * Copyright (c) 2011 Linux-iSCSI.org
- *
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
- * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
+ * (c) Copyright 2006-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -32,11 +29,9 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/blkdev.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -81,13 +76,9 @@ static struct target_stat_scsi_dev_attribute \
static ssize_t target_stat_scsi_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -96,12 +87,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -110,13 +97,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_dev_show_attr_role(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "Target\n");
}
DEV_STAT_SCSI_DEV_ATTR_RO(role);
@@ -124,12 +104,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
static ssize_t target_stat_scsi_dev_show_attr_ports(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
}
@@ -177,13 +153,9 @@ static struct target_stat_scsi_tgt_dev_attribute \
static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -192,12 +164,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -206,13 +174,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@@ -220,60 +181,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
- char status[16];
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
- strcpy(status, "activated");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- strcpy(status, "deactivated");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- strcpy(status, "shutdown");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- strcpy(status, "offline");
- break;
- default:
- sprintf(status, "unknown(%d)", dev->dev_status);
- break;
- }
-
- return snprintf(page, PAGE_SIZE, "%s\n", status);
+ if (dev->export_count)
+ return snprintf(page, PAGE_SIZE, "activated");
+ else
+ return snprintf(page, PAGE_SIZE, "deactivated");
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int non_accessible_lus;
- if (!dev)
- return -ENODEV;
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
non_accessible_lus = 0;
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- case TRANSPORT_DEVICE_SHUTDOWN:
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- default:
+ else
non_accessible_lus = 1;
- break;
- }
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
}
@@ -282,14 +210,11 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
-
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
@@ -336,13 +261,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
static ssize_t target_stat_scsi_lu_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -351,12 +272,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
static ssize_t target_stat_scsi_lu_show_attr_dev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -365,13 +282,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
static ssize_t target_stat_scsi_lu_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
}
DEV_STAT_SCSI_LU_ATTR_RO(indx);
@@ -379,12 +289,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
static ssize_t target_stat_scsi_lu_show_attr_lun(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
/* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
}
@@ -393,35 +297,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
static ssize_t target_stat_scsi_lu_show_attr_lu_name(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
- dev->se_sub_dev->t10_wwn.unit_serial : "None");
+ (strlen(dev->t10_wwn.unit_serial)) ?
+ dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.vendor)+1];
/* scsiLuVendorId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
- dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
+ dev->t10_wwn.vendor[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -430,19 +327,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.model)+1];
/* scsiLuProductId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
- dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+ dev->t10_wwn.model[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -451,19 +344,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.revision)+1];
/* scsiLuRevisionId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
- dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
+ dev->t10_wwn.revision[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -472,12 +361,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
static ssize_t target_stat_scsi_lu_show_attr_dev_type(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
@@ -488,30 +373,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
static ssize_t target_stat_scsi_lu_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n",
- (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
- "available" : "notavailable");
+ (dev->export_count) ? "available" : "notavailable");
}
DEV_STAT_SCSI_LU_ATTR_RO(status);
static ssize_t target_stat_scsi_lu_show_attr_state_bit(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n");
}
@@ -520,74 +393,53 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)dev->num_cmds);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_cmds));
}
DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->read_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->write_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuInResets */
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_LU_ATTR_RO(resets);
static ssize_t target_stat_scsi_lu_show_attr_full_stat(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -596,13 +448,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -611,12 +456,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_creation_time(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@@ -663,20 +504,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
* Called from target_core_configfs.c:target_core_make_subdev() to setup
* the target statistics groups + configfs CITs located in target_core_stat.c
*/
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+void target_stat_setup_dev_default_groups(struct se_device *dev)
{
- struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
+ struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
- dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -954,7 +795,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
struct se_port *sep;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock(&lun->lun_sep_lock);
@@ -963,7 +803,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
spin_unlock(&lun->lun_sep_lock);
return -ENODEV;
}
- tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
spin_unlock(&lun->lun_sep_lock);
@@ -976,7 +815,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
struct se_port *sep;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock(&lun->lun_sep_lock);
@@ -985,7 +823,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
spin_unlock(&lun->lun_sep_lock);
return -ENODEV;
}
- tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(sep->sep_stats.rx_data_octets >> 20));
@@ -999,7 +836,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
struct se_port *sep;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock(&lun->lun_sep_lock);
@@ -1008,7 +844,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
spin_unlock(&lun->lun_sep_lock);
return -ENODEV;
}
- tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(sep->sep_stats.tx_data_octets >> 20));
@@ -1022,7 +857,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
{
struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
struct se_port *sep;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock(&lun->lun_sep_lock);
@@ -1031,7 +865,6 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
spin_unlock(&lun->lun_sep_lock);
return -ENODEV;
}
- tpg = sep->sep_tpg;
/* FIXME: scsiTgtPortHsInCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
@@ -1170,7 +1003,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = &dev->se_sub_dev->t10_wwn;
+ wwn = &dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
@@ -1253,7 +1086,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1275,16 +1108,14 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
}
- tpg = nacl->se_tpg;
lun = deve->se_lun;
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
@@ -1304,7 +1135,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1327,7 +1158,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1349,7 +1180,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1371,7 +1202,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1393,7 +1224,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1415,7 +1246,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1437,7 +1268,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1459,7 +1290,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1481,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1503,7 +1334,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1525,7 +1356,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1548,7 +1379,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1621,7 +1452,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1643,16 +1474,14 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_portal_group *tpg;
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
}
- tpg = nacl->se_tpg;
lun = deve->se_lun;
/* scsiDeviceIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
@@ -1672,7 +1501,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
@@ -1721,7 +1550,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
ssize_t ret;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[lacl->mapped_lun];
+ deve = nacl->device_list[lacl->mapped_lun];
if (!deve->se_lun || !deve->se_lun_acl) {
spin_unlock_irq(&nacl->device_list_lock);
return -ENODEV;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index a5c2e41debf..f7cd95e8111 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 task management infrastructure
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -40,7 +39,7 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
-struct se_tmr_req *core_tmr_alloc_req(
+int core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function,
@@ -48,17 +47,20 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
+ tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+
+ se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
+ se_cmd->se_tmr_req = tmr;
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
- return tmr;
+ return 0;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
@@ -69,7 +71,7 @@ void core_tmr_release_req(
unsigned long flags;
if (!dev) {
- kmem_cache_free(se_tmr_req_cache, tmr);
+ kfree(tmr);
return;
}
@@ -77,27 +79,25 @@ void core_tmr_release_req(
list_del(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
- kmem_cache_free(se_tmr_req_cache, tmr);
+ kfree(tmr);
}
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
- int tas,
- int fe_count)
+ int tas)
{
- if (!fe_count) {
- transport_cmd_finish_abort(cmd, 1);
- return;
- }
+ bool remove = true;
/*
* TASK ABORTED status (TAS) bit support
*/
if ((tmr_nacl &&
- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+ (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+ remove = false;
transport_send_task_abort(cmd);
+ }
- transport_cmd_finish_abort(cmd, 0);
+ transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -115,6 +115,66 @@ static int target_check_cdb_and_preempt(struct list_head *list,
return 1;
}
+void core_tmr_abort_task(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct se_session *se_sess)
+{
+ struct se_cmd *se_cmd, *tmp_cmd;
+ unsigned long flags;
+ int ref_tag;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_cmd_list, se_cmd_list) {
+
+ if (dev != se_cmd->se_dev)
+ continue;
+
+ /* skip se_cmd associated with tmr */
+ if (tmr->task_cmd == se_cmd)
+ continue;
+
+ ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
+ if (tmr->ref_task_tag != ref_tag)
+ continue;
+
+ printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+ se_cmd->se_tfo->get_fabric_name(), ref_tag);
+
+ spin_lock(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & CMD_T_COMPLETE) {
+ printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ goto out;
+ }
+ se_cmd->transport_state |= CMD_T_ABORTED;
+ spin_unlock(&se_cmd->t_state_lock);
+
+ list_del_init(&se_cmd->se_cmd_list);
+ kref_get(&se_cmd->cmd_kref);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ cancel_work_sync(&se_cmd->work);
+ transport_wait_for_tasks(se_cmd);
+
+ target_put_sess_cmd(se_sess, se_cmd);
+ transport_cmd_finish_abort(se_cmd, true);
+
+ printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ " ref_tag: %d\n", ref_tag);
+ tmr->response = TMR_FUNCTION_COMPLETE;
+ return;
+ }
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+out:
+ printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
+ tmr->ref_task_tag);
+ tmr->response = TMR_TASK_DOES_NOT_EXIST;
+}
+
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -177,7 +237,7 @@ static void core_tmr_drain_tmr_list(
}
}
-static void core_tmr_drain_task_list(
+static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_node_acl *tmr_nacl,
@@ -185,12 +245,12 @@ static void core_tmr_drain_task_list(
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
- struct se_cmd *cmd;
- struct se_task *task, *task_tmp;
+ struct se_cmd *cmd, *next;
unsigned long flags;
- int fe_count;
+
/*
- * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+ * Complete outstanding commands with TASK_ABORTED SAM status.
+ *
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
@@ -211,56 +271,40 @@ static void core_tmr_drain_task_list(
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
- t_state_list) {
- if (!task->task_se_cmd) {
- pr_err("task->task_se_cmd is NULL!\n");
- continue;
- }
- cmd = task->task_se_cmd;
-
+ list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
+
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
- list_move_tail(&task->t_state_list, &drain_task_list);
- task->t_state_active = false;
- /*
- * Remove from task execute list before processing drain_task_list
- */
- if (!list_empty(&task->t_execute_list))
- __transport_remove_task_from_execute_queue(task, dev);
+ list_move_tail(&cmd->state_list, &drain_task_list);
+ cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
while (!list_empty(&drain_task_list)) {
- task = list_entry(drain_task_list.next, struct se_task, t_state_list);
- list_del(&task->t_state_list);
- cmd = task->task_se_cmd;
+ cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+ list_del(&cmd->state_list);
- pr_debug("LUN_RESET: %s cmd: %p task: %p"
+ pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
"cdb: 0x%02x\n",
- (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+ (preempt_and_abort_list) ? "Preempt" : "", cmd,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
- " t_task_cdbs: %d t_task_cdbs_left: %d"
- " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
+ " -- CMD_T_ACTIVE: %d"
" CMD_T_STOP: %d CMD_T_SENT: %d\n",
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
- cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
- atomic_read(&cmd->t_task_cdbs_sent),
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
(cmd->transport_state & CMD_T_SENT) != 0);
@@ -276,86 +320,12 @@ static void core_tmr_drain_task_list(
cancel_work_sync(&cmd->work);
spin_lock_irqsave(&cmd->t_state_lock, flags);
- target_stop_task(task, &flags);
+ target_stop_cmd(cmd, &flags);
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
- " t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&cmd->t_task_cdbs_ex_left));
- continue;
- }
- fe_count = atomic_read(&cmd->t_fe_count);
-
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
- " task: %p, t_fe_count: %d dev: %p\n", task,
- fe_count, dev);
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- continue;
- }
- pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p,"
- " t_fe_count: %d dev: %p\n", task, fe_count, dev);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- }
-}
-
-static void core_tmr_drain_cmd_list(
- struct se_device *dev,
- struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
- int tas,
- struct list_head *preempt_and_abort_list)
-{
- LIST_HEAD(drain_cmd_list);
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- struct se_cmd *cmd, *tcmd;
- unsigned long flags;
- /*
- * Release all commands remaining in the struct se_device cmd queue.
- *
- * This follows the same logic as above for the struct se_device
- * struct se_task state list, where commands are returned with
- * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
- * reference, otherwise the struct se_cmd is released.
- */
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
- /*
- * For PREEMPT_AND_ABORT usage, only process commands
- * with a matching reservation key.
- */
- if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
- continue;
- /*
- * Not aborting PROUT PREEMPT_AND_ABORT CDB..
- */
- if (prout_cmd == cmd)
- continue;
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
- }
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- while (!list_empty(&drain_cmd_list)) {
- cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
- list_del_init(&cmd->se_queue_node);
-
- pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
- " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, cmd->t_state,
- atomic_read(&cmd->t_fe_count));
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&cmd->t_fe_count));
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
}
}
@@ -379,7 +349,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+ tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -399,26 +369,23 @@ int core_tmr_lun_reset(
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
- core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
- preempt_and_abort_list);
- core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
+ core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
+
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
- (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+ (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock_irq(&dev->stats_lock);
- dev->num_resets++;
- spin_unlock_irq(&dev->stats_lock);
+ atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 06336ecd872..c036595b17c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,10 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -60,11 +57,10 @@ static void core_clear_initiator_node_from_tpg(
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_lun_acl *acl, *acl_tmp;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
@@ -78,31 +74,10 @@ static void core_clear_initiator_node_from_tpg(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
-
- spin_lock(&lun->lun_acl_lock);
- list_for_each_entry_safe(acl, acl_tmp,
- &lun->lun_acl_list, lacl_list) {
- if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
- (acl->mapped_lun == deve->mapped_lun))
- break;
- }
-
- if (!acl) {
- pr_err("Unable to locate struct se_lun_acl for %s,"
- " mapped_lun: %u\n", nacl->initiatorname,
- deve->mapped_lun);
- spin_unlock(&lun->lun_acl_lock);
- spin_lock_irq(&nacl->device_list_lock);
- continue;
- }
-
- list_del(&acl->lacl_list);
- spin_unlock(&lun->lun_acl_lock);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
- kfree(acl);
}
spin_unlock_irq(&nacl->device_list_lock);
}
@@ -136,17 +111,12 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!strcmp(acl->initiatorname, initiatorname) &&
- !acl->dynamic_node_acl) {
- spin_unlock_irq(&tpg->acl_node_lock);
- return acl;
- }
- }
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
- return NULL;
+ return acl;
}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
/* core_tpg_add_node_to_devs():
*
@@ -163,7 +133,7 @@ void core_tpg_add_node_to_devs(
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &tpg->tpg_lun_list[i];
+ lun = tpg->tpg_lun_list[i];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
continue;
@@ -175,10 +145,7 @@ void core_tpg_add_node_to_devs(
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
- if (dev->dev_flags & DF_READ_ONLY)
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
- else
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*
* Allow only optical drives to issue R/W in default RO
@@ -197,8 +164,8 @@ void core_tpg_add_node_to_devs(
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
- core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg, 1);
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -222,6 +189,34 @@ static int core_set_queue_depth_for_node(
return 0;
}
+void array_free(void *array, int n)
+{
+ void **a = array;
+ int i;
+
+ for (i = 0; i < n; i++)
+ kfree(a[i]);
+ kfree(a);
+}
+
+static void *array_zalloc(int n, size_t size, gfp_t flags)
+{
+ void **a;
+ int i;
+
+ a = kzalloc(n * sizeof(void*), flags);
+ if (!a)
+ return NULL;
+ for (i = 0; i < n; i++) {
+ a[i] = kzalloc(size, flags);
+ if (!a[i]) {
+ array_free(a, n);
+ return NULL;
+ }
+ }
+ return a;
+}
+
/* core_create_device_list_for_node():
*
*
@@ -231,15 +226,15 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
struct se_dev_entry *deve;
int i;
- nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
- TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+ nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+ sizeof(struct se_dev_entry), GFP_KERNEL);
if (!nacl->device_list) {
pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
@@ -274,6 +269,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
@@ -281,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -298,13 +294,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
}
/*
* Here we only create demo-mode MappedLUNs from the active
- * TPG LUNs if the fabric is not explictly asking for
+ * TPG LUNs if the fabric is not explicitly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
- if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
- (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
- do { ; } while (0);
- else
+ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
+ (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
@@ -329,19 +323,19 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
- int i, ret;
+ int i;
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &tpg->tpg_lun_list[i];
+ lun = tpg->tpg_lun_list[i];
if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
(lun->lun_se_dev == NULL))
continue;
spin_unlock(&tpg->tpg_lun_lock);
- ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+ core_dev_del_lun(tpg, lun->unpacked_lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -402,6 +396,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
@@ -409,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -448,39 +443,47 @@ int core_tpg_del_initiator_node_acl(
struct se_node_acl *acl,
int force)
{
+ LIST_HEAD(sess_list);
struct se_session *sess, *sess_tmp;
- int dynamic_acl = 0;
+ unsigned long flags;
+ int rc;
spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
- dynamic_acl = 1;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_bh(&tpg->session_lock);
- list_for_each_entry_safe(sess, sess_tmp,
- &tpg->tpg_sess_list, sess_list) {
- if (sess->se_node_acl != acl)
- continue;
- /*
- * Determine if the session needs to be closed by our context.
- */
- if (!tpg->se_tpg_tfo->shutdown_session(sess))
+ spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ acl->acl_stop = 1;
+
+ list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+ sess_acl_list) {
+ if (sess->sess_tearing_down != 0)
continue;
- spin_unlock_bh(&tpg->session_lock);
- /*
- * If the $FABRIC_MOD session for the Initiator Node ACL exists,
- * forcefully shutdown the $FABRIC_MOD session/nexus.
- */
- tpg->se_tpg_tfo->close_session(sess);
+ target_get_session(sess);
+ list_move(&sess->sess_acl_list, &sess_list);
+ }
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+ list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
+ list_del(&sess->sess_acl_list);
- spin_lock_bh(&tpg->session_lock);
+ rc = tpg->se_tpg_tfo->shutdown_session(sess);
+ target_put_session(sess);
+ if (!rc)
+ continue;
+ target_put_session(sess);
}
- spin_unlock_bh(&tpg->session_lock);
+ target_put_nacl(acl);
+ /*
+ * Wait for last target_put_nacl() to complete in target_complete_nacl()
+ * for active fabric session transport_deregister_session() callbacks.
+ */
+ wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
@@ -507,6 +510,7 @@ int core_tpg_set_initiator_node_queue_depth(
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
+ unsigned long flags;
int dynamic_acl = 0;
spin_lock_irq(&tpg->acl_node_lock);
@@ -525,7 +529,7 @@ int core_tpg_set_initiator_node_queue_depth(
}
spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_bh(&tpg->session_lock);
+ spin_lock_irqsave(&tpg->session_lock, flags);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
@@ -537,7 +541,7 @@ int core_tpg_set_initiator_node_queue_depth(
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -567,7 +571,7 @@ int core_tpg_set_initiator_node_queue_depth(
acl->queue_depth = queue_depth;
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
@@ -583,7 +587,7 @@ int core_tpg_set_initiator_node_queue_depth(
spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL;
}
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
@@ -605,6 +609,36 @@ int core_tpg_set_initiator_node_queue_depth(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+/* core_tpg_set_initiator_node_tag():
+ *
+ * Initiator nodeacl tags are not used internally, but may be used by
+ * userspace to emulate aliases or groups.
+ * Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ const char *new_tag)
+{
+ if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+ return -EINVAL;
+
+ if (!strncmp("NULL", new_tag, 4)) {
+ acl->acl_tag[0] = '\0';
+ return 0;
+ }
+
+ return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_ref_comp);
+}
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -618,12 +652,11 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
@@ -647,8 +680,8 @@ int core_tpg_register(
struct se_lun *lun;
u32 i;
- se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
- TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+ se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+ sizeof(struct se_lun), GFP_KERNEL);
if (!se_tpg->tpg_lun_list) {
pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
@@ -656,16 +689,16 @@ int core_tpg_register(
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &se_tpg->tpg_lun_list[i];
+ lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
}
se_tpg->se_tpg_type = se_tpg_type;
@@ -682,7 +715,8 @@ int core_tpg_register(
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
- kfree(se_tpg);
+ array_free(se_tpg->tpg_lun_list,
+ TRANSPORT_MAX_LUNS_PER_TPG);
return -ENOMEM;
}
}
@@ -742,12 +776,12 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
core_tpg_release_virtual_lun0(se_tpg);
se_tpg->se_tpg_fabric_ptr = NULL;
- kfree(se_tpg->tpg_lun_list);
+ array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
@@ -763,7 +797,7 @@ struct se_lun *core_tpg_pre_addlun(
}
spin_lock(&tpg->tpg_lun_lock);
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
@@ -777,18 +811,24 @@ struct se_lun *core_tpg_pre_addlun(
return lun;
}
-int core_tpg_post_addlun(
+int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
- void *lun_ptr)
+ struct se_device *dev)
{
int ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
+ ret = core_dev_export(dev, tpg, lun);
+ if (ret < 0) {
+ percpu_ref_cancel_init(&lun->lun_ref);
+ return ret;
+ }
+
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -797,14 +837,6 @@ int core_tpg_post_addlun(
return 0;
}
-static void core_tpg_shutdown_lun(
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- core_clear_lun_from_tpg(lun, tpg);
- transport_clear_lun_from_sessions(lun);
-}
-
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun)
@@ -821,7 +853,7 @@ struct se_lun *core_tpg_pre_dellun(
}
spin_lock(&tpg->tpg_lun_lock);
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
@@ -839,7 +871,8 @@ int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- core_tpg_shutdown_lun(tpg, lun);
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 78ea638aa3b..7fa62fc93e0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3,10 +3,7 @@
*
* This file contains the Generic Target Engine Core.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -31,12 +28,12 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -54,49 +51,35 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-static int sub_api_initialized;
+#define CREATE_TRACE_POINTS
+#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_sess_cache;
-struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
-static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
-static void transport_free_dev_tasks(struct se_cmd *cmd);
-static int transport_generic_get_mem(struct se_cmd *cmd);
-static void transport_put_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
-static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *);
+static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
- sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
- 0, NULL);
- if (!se_tmr_req_cache) {
- pr_err("kmem_cache_create() for struct se_tmr_req"
- " failed\n");
- goto out;
- }
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
- goto out_free_tmr_req_cache;
+ goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
@@ -147,14 +130,36 @@ int init_se_kmem_caches(void)
"mem_t failed\n");
goto out_free_tg_pt_gp_cache;
}
+ t10_alua_lba_map_cache = kmem_cache_create(
+ "t10_alua_lba_map_cache",
+ sizeof(struct t10_alua_lba_map),
+ __alignof__(struct t10_alua_lba_map), 0, NULL);
+ if (!t10_alua_lba_map_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_"
+ "cache failed\n");
+ goto out_free_tg_pt_gp_mem_cache;
+ }
+ t10_alua_lba_map_mem_cache = kmem_cache_create(
+ "t10_alua_lba_map_mem_cache",
+ sizeof(struct t10_alua_lba_map_member),
+ __alignof__(struct t10_alua_lba_map_member), 0, NULL);
+ if (!t10_alua_lba_map_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+ "cache failed\n");
+ goto out_free_lba_map_cache;
+ }
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
if (!target_completion_wq)
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_lba_map_mem_cache;
return 0;
+out_free_lba_map_mem_cache:
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+ kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_mem_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
@@ -169,8 +174,6 @@ out_free_ua_cache:
kmem_cache_destroy(se_ua_cache);
out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
-out_free_tmr_req_cache:
- kmem_cache_destroy(se_tmr_req_cache);
out:
return -ENOMEM;
}
@@ -178,7 +181,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
kmem_cache_destroy(t10_pr_reg_cache);
@@ -186,6 +188,8 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_lba_map_cache);
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
@@ -208,17 +212,10 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
-static void transport_init_queue_obj(struct se_queue_obj *qobj)
-{
- atomic_set(&qobj->queue_cnt, 0);
- INIT_LIST_HEAD(&qobj->qobj_list);
- init_waitqueue_head(&qobj->thread_wq);
- spin_lock_init(&qobj->cmd_queue_lock);
-}
-
void transport_subsystem_check_init(void)
{
int ret;
+ static int sub_api_initialized;
if (sub_api_initialized)
return;
@@ -235,15 +232,10 @@ void transport_subsystem_check_init(void)
if (ret != 0)
pr_err("Unable to load target_core_pscsi\n");
- ret = request_module("target_core_stgt");
- if (ret != 0)
- pr_err("Unable to load target_core_stgt\n");
-
sub_api_initialized = 1;
- return;
}
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -258,13 +250,67 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
+ kref_init(&se_sess->sess_kref);
+ se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
EXPORT_SYMBOL(transport_init_session);
+int transport_alloc_session_tags(struct se_session *se_sess,
+ unsigned int tag_num, unsigned int tag_size)
+{
+ int rc;
+
+ se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!se_sess->sess_cmd_map) {
+ se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
+ if (!se_sess->sess_cmd_map) {
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+ return -ENOMEM;
+ }
+ }
+
+ rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ if (rc < 0) {
+ pr_err("Unable to init se_sess->sess_tag_pool,"
+ " tag_num: %u\n", tag_num);
+ if (is_vmalloc_addr(se_sess->sess_cmd_map))
+ vfree(se_sess->sess_cmd_map);
+ else
+ kfree(se_sess->sess_cmd_map);
+ se_sess->sess_cmd_map = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_alloc_session_tags);
+
+struct se_session *transport_init_session_tags(unsigned int tag_num,
+ unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
+{
+ struct se_session *se_sess;
+ int rc;
+
+ se_sess = transport_init_session(sup_prot_ops);
+ if (IS_ERR(se_sess))
+ return se_sess;
+
+ rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
+ if (rc < 0) {
+ transport_free_session(se_sess);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session_tags);
+
/*
- * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
*/
void __transport_register_session(
struct se_portal_group *se_tpg,
@@ -293,6 +339,8 @@ void __transport_register_session(
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
+ kref_get(&se_nacl->acl_kref);
+
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
* The se_nacl->nacl_sess pointer will be set to the
@@ -317,12 +365,54 @@ void transport_register_session(
struct se_session *se_sess,
void *fabric_sess_ptr)
{
- spin_lock_bh(&se_tpg->session_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_tpg->session_lock, flags);
__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
- spin_unlock_bh(&se_tpg->session_lock);
+ spin_unlock_irqrestore(&se_tpg->session_lock, flags);
}
EXPORT_SYMBOL(transport_register_session);
+static void target_release_session(struct kref *kref)
+{
+ struct se_session *se_sess = container_of(kref,
+ struct se_session, sess_kref);
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
+
+ se_tpg->se_tpg_tfo->close_session(se_sess);
+}
+
+void target_get_session(struct se_session *se_sess)
+{
+ kref_get(&se_sess->sess_kref);
+}
+EXPORT_SYMBOL(target_get_session);
+
+void target_put_session(struct se_session *se_sess)
+{
+ struct se_portal_group *tpg = se_sess->se_tpg;
+
+ if (tpg->se_tpg_tfo->put_session != NULL) {
+ tpg->se_tpg_tfo->put_session(se_sess);
+ return;
+ }
+ kref_put(&se_sess->sess_kref, target_release_session);
+}
+EXPORT_SYMBOL(target_put_session);
+
+static void target_complete_nacl(struct kref *kref)
+{
+ struct se_node_acl *nacl = container_of(kref,
+ struct se_node_acl, acl_kref);
+
+ complete(&nacl->acl_free_comp);
+}
+
+void target_put_nacl(struct se_node_acl *nacl)
+{
+ kref_put(&nacl->acl_kref, target_complete_nacl);
+}
+
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
@@ -333,7 +423,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
- list_del(&se_sess->sess_acl_list);
+ if (se_nacl->acl_stop == 0)
+ list_del(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
* Otherwise, set the struct se_session pointer from the tail
@@ -353,6 +444,13 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
+ if (se_sess->sess_cmd_map) {
+ percpu_ida_destroy(&se_sess->sess_tag_pool);
+ if (is_vmalloc_addr(se_sess->sess_cmd_map))
+ vfree(se_sess->sess_cmd_map);
+ else
+ kfree(se_sess->sess_cmd_map);
+ }
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
@@ -360,13 +458,16 @@ EXPORT_SYMBOL(transport_free_session);
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
+ struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
+ bool comp_nacl = true;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
+ se_tfo = se_tpg->se_tpg_tfo;
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
@@ -379,94 +480,77 @@ void transport_deregister_session(struct se_session *se_sess)
* struct se_node_acl if it had been previously dynamically generated.
*/
se_nacl = se_sess->se_node_acl;
- if (se_nacl) {
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
- if (se_nacl->dynamic_node_acl) {
- if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
- se_tpg)) {
- list_del(&se_nacl->acl_list);
- se_tpg->num_node_acls--;
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
-
- core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_free_device_list_for_node(se_nacl, se_tpg);
- se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
- se_nacl);
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
- }
+
+ spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+ if (se_nacl && se_nacl->dynamic_node_acl) {
+ if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+ list_del(&se_nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+ core_tpg_wait_for_nacl_pr_ref(se_nacl);
+ core_free_device_list_for_node(se_nacl, se_tpg);
+ se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
+
+ comp_nacl = false;
+ spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
}
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
}
-
- transport_free_session(se_sess);
+ spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
+ /*
+ * If last kref is dropping now for an explicit NodeACL, awake sleeping
+ * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
+ * removal context.
+ */
+ if (se_nacl && comp_nacl)
+ target_put_nacl(se_nacl);
+
+ transport_free_session(se_sess);
}
EXPORT_SYMBOL(transport_deregister_session);
/*
* Called with cmd->t_state_lock held.
*/
-static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_task *task;
unsigned long flags;
if (!dev)
return;
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (task->task_flags & TF_ACTIVE)
- continue;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- if (task->t_state_active) {
- pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
- cmd->se_tfo->get_task_tag(cmd), dev, task);
+ if (cmd->transport_state & CMD_T_BUSY)
+ return;
- list_del(&task->t_state_list);
- atomic_dec(&cmd->t_task_cdbs_ex_left);
- task->t_state_active = false;
- }
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ if (cmd->state_active) {
+ list_del(&cmd->state_list);
+ cmd->state_active = false;
}
-
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-/* transport_cmd_check_stop():
- *
- * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
- * 'transport_off = 2' determines if task_dev_state should be removed.
- *
- * A non-zero u8 t_state sets cmd->t_state.
- * Returns 1 when command is stopped, else 0.
- */
-static int transport_cmd_check_stop(
- struct se_cmd *cmd,
- int transport_off,
- u8 t_state)
+static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ bool write_pending)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+ if (write_pending)
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2)
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (remove_from_lists) {
+ target_remove_from_state_list(cmd);
- complete(&cmd->transport_lun_stop_comp);
- return 1;
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
}
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -476,416 +560,186 @@ static int transport_cmd_check_stop(
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- if (transport_off == 2)
- transport_all_task_dev_remove_state(cmd);
-
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
- * to FE.
- */
- if (transport_off == 2)
- cmd->se_lun = NULL;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return 1;
}
- if (transport_off) {
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2) {
- transport_all_task_dev_remove_state(cmd);
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2
- * handoff to fabric module.
- */
- cmd->se_lun = NULL;
- /*
- * Some fabric modules like tcm_loop can release
- * their internally allocated I/O reference now and
- * struct se_cmd now.
- *
- * Fabric modules are expected to return '1' here if the
- * se_cmd being passed is released at this point,
- * or zero if not being released.
- */
- if (cmd->se_tfo->check_stop_free != NULL) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- return cmd->se_tfo->check_stop_free(cmd);
- }
+
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ if (remove_from_lists) {
+ /*
+ * Some fabric modules like tcm_loop can release
+ * their internally allocated I/O reference now and
+ * struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the
+ * se_cmd being passed is released at this point,
+ * or zero if not being released.
+ */
+ if (cmd->se_tfo->check_stop_free != NULL) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return cmd->se_tfo->check_stop_free(cmd);
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ }
- return 0;
- } else if (t_state)
- cmd->t_state = t_state;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
return 0;
}
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
- return transport_cmd_check_stop(cmd, 2, 0);
+ return transport_cmd_check_stop(cmd, true, false);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- unsigned long flags;
if (!lun)
return;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- transport_all_task_dev_remove_state(cmd);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (!list_empty(&cmd->se_lun_node))
- list_del_init(&cmd->se_lun_node);
- spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- if (!cmd->se_tmr_req)
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
+ /*
+ * Allow the fabric driver to unmap any resources before
+ * releasing the descriptor via TFO->release_cmd()
+ */
+ if (remove)
+ cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove) {
- transport_remove_cmd_from_queue(cmd);
+ if (remove)
transport_put_cmd(cmd);
- }
}
-static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
- bool at_head)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- unsigned long flags;
-
- if (t_state) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = t_state;
- cmd->transport_state |= CMD_T_ACTIVE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-
- /* If the cmd is already on the list, remove it before we add it */
- if (!list_empty(&cmd->se_queue_node))
- list_del(&cmd->se_queue_node);
- else
- atomic_inc(&qobj->queue_cnt);
-
- if (at_head)
- list_add(&cmd->se_queue_node, &qobj->qobj_list);
- else
- list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- cmd->transport_state |= CMD_T_QUEUED;
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- wake_up_interruptible(&qobj->thread_wq);
-}
-
-static struct se_cmd *
-transport_get_cmd_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (list_empty(&qobj->qobj_list)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return NULL;
- }
- cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- list_del_init(&cmd->se_queue_node);
- atomic_dec(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- return cmd;
-}
-
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
+static void target_complete_failure_work(struct work_struct *work)
{
- struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
- unsigned long flags;
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(cmd->transport_state & CMD_T_QUEUED)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return;
- }
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_del_init(&cmd->se_queue_node);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ transport_generic_request_failure(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
}
/*
- * Completion function used by TCM subsystem plugins (such as FILEIO)
- * for queueing up response from struct se_subsystem_api->do_task()
+ * Used when asking transport to copy Sense Data from the underlying
+ * Linux/SCSI struct scsi_cmnd
*/
-void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
{
- struct se_task *task = list_entry(cmd->t_task_list.next,
- struct se_task, t_list);
+ struct se_device *dev = cmd->se_dev;
- if (good) {
- cmd->scsi_status = SAM_STAT_GOOD;
- task->task_scsi_status = GOOD;
- } else {
- task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_se_cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ WARN_ON(!cmd->se_lun);
- }
+ if (!dev)
+ return NULL;
- transport_complete_task(task, good);
-}
-EXPORT_SYMBOL(transport_complete_sync_cache);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ return NULL;
-static void target_complete_failure_work(struct work_struct *work)
-{
- struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
- transport_generic_request_failure(cmd);
+ pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
+ dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
+ return cmd->sense_buffer;
}
-/* transport_complete_task():
- *
- * Called from interrupt and non interrupt context depending
- * on the transport plugin.
- */
-void transport_complete_task(struct se_task *task, int success)
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
- struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
+ int success = scsi_status == GOOD;
unsigned long flags;
+ cmd->scsi_status = scsi_status;
+
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags &= ~TF_ACTIVE;
+ cmd->transport_state &= ~CMD_T_BUSY;
- /*
- * See if any sense data exists, if so set the TASK_SENSE flag.
- * Also check for any other post completion work that needs to be
- * done by the plugins.
- */
if (dev && dev->transport->transport_complete) {
- if (dev->transport->transport_complete(task) != 0) {
- cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
- task->task_flags |= TF_HAS_SENSE;
+ dev->transport->transport_complete(cmd,
+ cmd->t_data_sg,
+ transport_get_sense_buffer(cmd));
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
success = 1;
- }
}
/*
- * See if we are waiting for outstanding struct se_task
- * to complete for an exception condition
+ * See if we are waiting to complete for an exception condition.
*/
- if (task->task_flags & TF_REQUEST_STOP) {
+ if (cmd->transport_state & CMD_T_REQUEST_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&task->task_stop_comp);
+ complete(&cmd->task_stop_comp);
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
- * Decrement the outstanding t_task_cdbs_left count. The last
- * struct se_task from struct se_cmd will complete itself into the
- * device queue depending upon int success.
+ * Check for case where an explicit ABORT_TASK has been received
+ * and transport_wait_for_tasks() will be waiting for completion..
*/
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+ if (cmd->transport_state & CMD_T_ABORTED &&
+ cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete_all(&cmd->t_transport_stop_comp);
return;
- }
-
- if (cmd->transport_state & CMD_T_FAILED) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
- cmd->transport_state |= CMD_T_COMPLETE;
INIT_WORK(&cmd->work, target_complete_ok_work);
}
cmd->t_state = TRANSPORT_COMPLETE;
- cmd->transport_state |= CMD_T_ACTIVE;
+ cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
queue_work(target_completion_wq, &cmd->work);
}
-EXPORT_SYMBOL(transport_complete_task);
-
-/*
- * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
- * struct se_task list are ready to be added to the active execution list
- * struct se_device
+EXPORT_SYMBOL(target_complete_cmd);
- * Called with se_dev_t->execute_task_lock called.
- */
-static inline int transport_add_task_check_sam_attr(
- struct se_task *task,
- struct se_task *task_prev,
- struct se_device *dev)
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- /*
- * No SAM Task attribute emulation enabled, add to tail of
- * execution queue
- */
- if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
- list_add_tail(&task->t_execute_list, &dev->execute_task_list);
- return 0;
- }
- /*
- * HEAD_OF_QUEUE attribute for received CDB, which means
- * the first task that is associated with a struct se_cmd goes to
- * head of the struct se_device->execute_task_list, and task_prev
- * after that for each subsequent task
- */
- if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
- list_add(&task->t_execute_list,
- (task_prev != NULL) ?
- &task_prev->t_execute_list :
- &dev->execute_task_list);
-
- pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
- " in execution queue\n",
- task->task_se_cmd->t_task_cdb[0]);
- return 1;
- }
- /*
- * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
- * transitioned from Dermant -> Active state, and are added to the end
- * of the struct se_device->execute_task_list
- */
- list_add_tail(&task->t_execute_list, &dev->execute_task_list);
- return 0;
-}
-
-/* __transport_add_task_to_execute_queue():
- *
- * Called with se_dev_t->execute_task_lock called.
- */
-static void __transport_add_task_to_execute_queue(
- struct se_task *task,
- struct se_task *task_prev,
- struct se_device *dev)
-{
- int head_of_queue;
-
- head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
- atomic_inc(&dev->execute_tasks);
-
- if (task->t_state_active)
- return;
- /*
- * Determine if this task needs to go to HEAD_OF_QUEUE for the
- * state list as well. Running with SAM Task Attribute emulation
- * will always return head_of_queue == 0 here
- */
- if (head_of_queue)
- list_add(&task->t_state_list, (task_prev) ?
- &task_prev->t_state_list :
- &dev->state_task_list);
- else
- list_add_tail(&task->t_state_list, &dev->state_task_list);
-
- task->t_state_active = true;
-
- pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
- task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
- task, dev);
-}
-
-static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_task *task;
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- spin_lock(&dev->execute_task_lock);
- if (!task->t_state_active) {
- list_add_tail(&task->t_state_list,
- &dev->state_task_list);
- task->t_state_active = true;
-
- pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
- task->task_se_cmd->se_tfo->get_task_tag(
- task->task_se_cmd), task, dev);
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ cmd->residual_count += cmd->data_length - length;
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = cmd->data_length - length;
}
- spin_unlock(&dev->execute_task_lock);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_task *task, *task_prev = NULL;
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (!list_empty(&task->t_execute_list))
- continue;
- /*
- * __transport_add_task_to_execute_queue() handles the
- * SAM Task Attribute emulation if enabled
- */
- __transport_add_task_to_execute_queue(task, task_prev, dev);
- task_prev = task;
+ cmd->data_length = length;
}
-}
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
-{
- unsigned long flags;
- struct se_device *dev = cmd->se_dev;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- __transport_add_tasks_from_cmd(cmd);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ target_complete_cmd(cmd, scsi_status);
}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
-void __transport_remove_task_from_execute_queue(struct se_task *task,
- struct se_device *dev)
-{
- list_del_init(&task->t_execute_list);
- atomic_dec(&dev->execute_tasks);
-}
-
-static void transport_remove_task_from_execute_queue(
- struct se_task *task,
- struct se_device *dev)
+static void target_add_to_state_list(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
unsigned long flags;
- if (WARN_ON(list_empty(&task->t_execute_list)))
- return;
-
spin_lock_irqsave(&dev->execute_task_lock, flags);
- __transport_remove_task_from_execute_queue(task, dev);
+ if (!cmd->state_active) {
+ list_add_tail(&cmd->state_list, &dev->state_list);
+ cmd->state_active = true;
+ }
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
/*
* Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
+static void transport_write_pending_qf(struct se_cmd *cmd);
+static void transport_complete_qf(struct se_cmd *cmd);
-static void target_qf_do_work(struct work_struct *work)
+void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
@@ -899,7 +753,7 @@ static void target_qf_do_work(struct work_struct *work)
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -907,7 +761,10 @@ static void target_qf_do_work(struct work_struct *work)
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
- transport_add_cmd_to_queue(cmd, cmd->t_state, true);
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
+ transport_write_pending_qf(cmd);
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ transport_complete_qf(cmd);
}
}
@@ -935,29 +792,15 @@ void transport_dump_dev_state(
int *bl)
{
*bl += sprintf(b + *bl, "Status: ");
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
*bl += sprintf(b + *bl, "ACTIVATED");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
+ else
*bl += sprintf(b + *bl, "DEACTIVATED");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- *bl += sprintf(b + *bl, "SHUTDOWN");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- *bl += sprintf(b + *bl, "OFFLINE");
- break;
- default:
- *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
- break;
- }
- *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
- atomic_read(&dev->execute_tasks), dev->queue_depth);
- *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
- dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
+ *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
+ *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
+ dev->dev_attrib.block_size,
+ dev->dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -1151,15 +994,18 @@ int transport_dump_vpd_ident(
switch (vpd->device_identifier_code_set) {
case 0x01: /* Binary */
- sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD Binary Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x02: /* ASCII */
- sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD ASCII Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x03: /* UTF-8 */
- sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD UTF-8 Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
default:
@@ -1181,7 +1027,7 @@ int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{
static const char hex_str[] = "0123456789abcdef";
- int j = 0, i = 4; /* offset to start of the identifer */
+ int j = 0, i = 4; /* offset to start of the identifier */
/*
* The VPD Code Set (encoding)
@@ -1214,238 +1060,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
-static void core_setup_task_attr_emulation(struct se_device *dev)
-{
- /*
- * If this device is from Target_Core_Mod/pSCSI, disable the
- * SAM Task Attribute emulation.
- *
- * This is currently not available in upsream Linux/SCSI Target
- * mode code, and is assumed to be disabled while using TCM/pSCSI.
- */
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
- return;
- }
-
- dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", dev->transport->name,
- dev->transport->get_device_rev(dev));
-}
-
-static void scsi_dump_inquiry(struct se_device *dev)
-{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
- char buf[17];
- int i, device_type;
- /*
- * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
- */
- for (i = 0; i < 8; i++)
- if (wwn->vendor[i] >= 0x20)
- buf[i] = wwn->vendor[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Vendor: %s\n", buf);
-
- for (i = 0; i < 16; i++)
- if (wwn->model[i] >= 0x20)
- buf[i] = wwn->model[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Model: %s\n", buf);
-
- for (i = 0; i < 4; i++)
- if (wwn->revision[i] >= 0x20)
- buf[i] = wwn->revision[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Revision: %s\n", buf);
-
- device_type = dev->transport->get_device_type(dev);
- pr_debug(" Type: %s ", scsi_device_type(device_type));
- pr_debug(" ANSI SCSI revision: %02x\n",
- dev->transport->get_device_rev(dev));
-}
-
-struct se_device *transport_add_device_to_core_hba(
- struct se_hba *hba,
- struct se_subsystem_api *transport,
- struct se_subsystem_dev *se_dev,
- u32 device_flags,
- void *transport_dev,
- struct se_dev_limits *dev_limits,
- const char *inquiry_prod,
- const char *inquiry_rev)
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
- int force_pt;
- struct se_device *dev;
-
- dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!dev) {
- pr_err("Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
+ struct se_device *dev = cmd->se_dev;
- transport_init_queue_obj(&dev->dev_queue_obj);
- dev->dev_flags = device_flags;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = transport_dev;
- dev->se_hba = hba;
- dev->se_sub_dev = se_dev;
- dev->transport = transport;
- INIT_LIST_HEAD(&dev->dev_list);
- INIT_LIST_HEAD(&dev->dev_sep_list);
- INIT_LIST_HEAD(&dev->dev_tmr_list);
- INIT_LIST_HEAD(&dev->execute_task_list);
- INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->state_task_list);
- INIT_LIST_HEAD(&dev->qf_cmd_list);
- spin_lock_init(&dev->execute_task_lock);
- spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->dev_reservation_lock);
- spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->se_port_lock);
- spin_lock_init(&dev->se_tmr_lock);
- spin_lock_init(&dev->qf_cmd_lock);
- atomic_set(&dev->dev_ordered_id, 0);
-
- se_dev_set_default_attribs(dev, dev_limits);
-
- dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
- dev->creation_time = get_jiffies_64();
- spin_lock_init(&dev->stats_lock);
-
- spin_lock(&hba->device_lock);
- list_add_tail(&dev->dev_list, &hba->hba_dev_list);
- hba->dev_count++;
- spin_unlock(&hba->device_lock);
- /*
- * Setup the SAM Task Attribute emulation for struct se_device
- */
- core_setup_task_attr_emulation(dev);
- /*
- * Force PR and ALUA passthrough emulation with internal object use.
- */
- force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
- /*
- * Setup the Reservations infrastructure for struct se_device
- */
- core_setup_reservations(dev, force_pt);
- /*
- * Setup the Asymmetric Logical Unit Assignment for struct se_device
- */
- if (core_setup_alua(dev, force_pt) < 0)
- goto out;
+ if (cmd->unknown_data_length) {
+ cmd->data_length = size;
+ } else if (size != cmd->data_length) {
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ cmd->data_length, size, cmd->t_task_cdb[0]);
- /*
- * Startup the struct se_device processing thread
- */
- dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", dev->transport->name);
- if (IS_ERR(dev->process_thread)) {
- pr_err("Unable to create kthread: LIO_%s\n",
- dev->transport->name);
- goto out;
- }
- /*
- * Setup work_queue for QUEUE_FULL
- */
- INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- /*
- * Preload the initial INQUIRY const values if we are doing
- * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
- * passthrough because this is being provided by the backend LLD.
- * This is required so that transport_get_inquiry() copies these
- * originals once back into DEV_T10_WWN(dev) for the virtual device
- * setup.
- */
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!inquiry_prod || !inquiry_rev) {
- pr_err("All non TCM/pSCSI plugins require"
- " INQUIRY consts\n");
- goto out;
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ pr_err("Rejecting underflow/overflow"
+ " WRITE data\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+ * type SCF_SCSI_DATA_CDB.
+ */
+ if (dev->dev_attrib.block_size != 512) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
+ " CDB on non 512-byte sector setup subsystem"
+ " plugin: %s\n", dev->transport->name);
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * For the overflow case keep the existing fabric provided
+ * ->data_length. Otherwise for the underflow case, reset
+ * ->data_length to the smaller SCSI expected data transfer
+ * length.
+ */
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ cmd->data_length = size;
}
-
- strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
- strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
- strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
- }
- scsi_dump_inquiry(dev);
-
- return dev;
-out:
- kthread_stop(dev->process_thread);
-
- spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
- spin_unlock(&hba->device_lock);
-
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(transport_add_device_to_core_hba);
-
-/* transport_generic_prepare_cdb():
- *
- * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
- * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
- * The point of this is since we are mapping iSCSI LUNs to
- * SCSI Target IDs having a non-zero LUN in the CDB will throw the
- * devices and HBAs for a loop.
- */
-static inline void transport_generic_prepare_cdb(
- unsigned char *cdb)
-{
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
- }
-}
-
-static struct se_task *
-transport_generic_get_task(struct se_cmd *cmd,
- enum dma_data_direction data_direction)
-{
- struct se_task *task;
- struct se_device *dev = cmd->se_dev;
-
- task = dev->transport->alloc_task(cmd->t_task_cdb);
- if (!task) {
- pr_err("Unable to allocate struct se_task\n");
- return NULL;
}
- INIT_LIST_HEAD(&task->t_list);
- INIT_LIST_HEAD(&task->t_execute_list);
- INIT_LIST_HEAD(&task->t_state_list);
- init_completion(&task->task_stop_comp);
- task->task_se_cmd = cmd;
- task->task_data_direction = data_direction;
+ return 0;
- return task;
}
-static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
-
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
@@ -1459,17 +1122,15 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
- INIT_LIST_HEAD(&cmd->t_task_list);
- init_completion(&cmd->transport_lun_fe_stop_comp);
- init_completion(&cmd->transport_lun_stop_comp);
+ INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
+ init_completion(&cmd->task_stop_comp);
spin_lock_init(&cmd->t_state_lock);
+ kref_init(&cmd->cmd_kref);
cmd->transport_state = CMD_T_DEV_ACTIVE;
cmd->se_tfo = tfo;
@@ -1478,46 +1139,46 @@ void transport_init_se_cmd(
cmd->data_direction = data_direction;
cmd->sam_task_attr = task_attr;
cmd->sense_buffer = sense_buffer;
+
+ cmd->state_active = false;
}
EXPORT_SYMBOL(transport_init_se_cmd);
-static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+
/*
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
- smp_mb__after_atomic_inc();
+ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
+ smp_mb__after_atomic();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- cmd->se_dev->transport->name);
+ dev->transport->name);
return 0;
}
-/* transport_generic_allocate_tasks():
- *
- * Called from fabric RX Thread.
- */
-int transport_generic_allocate_tasks(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
{
- int ret;
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
- transport_generic_prepare_cdb(cdb);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1526,9 +1187,7 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1543,10 +1202,7 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_OUT_OF_RESOURCES;
}
} else
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@@ -1554,30 +1210,43 @@ int transport_generic_allocate_tasks(
* Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
+
+ trace_target_sequencer_start(cmd);
+
/*
- * Setup the received CDB based on SCSI defined opcodes and
- * perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The cmd->t_task_cdb
- * pointer is expected to be setup before we reach this point.
+ * Check for an existing UNIT ATTENTION condition
*/
- ret = transport_generic_cmd_sequencer(cmd, cdb);
- if (ret < 0)
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ return ret;
+
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ return ret;
+
+ ret = target_check_reservation(cmd);
+ if (ret) {
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
return ret;
- /*
- * Check for SAM Task Attribute Emulation
- */
- if (transport_check_alloc_task_attr(cmd) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
+
+ ret = dev->transport->parse_cdb(cmd);
+ if (ret)
+ return ret;
+
+ ret = transport_check_alloc_task_attr(cmd);
+ if (ret)
+ return ret;
+
+ cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
spin_unlock(&cmd->se_lun->lun_sep_lock);
return 0;
}
-EXPORT_SYMBOL(transport_generic_allocate_tasks);
+EXPORT_SYMBOL(target_setup_cmd_from_cdb);
/*
* Used by fabric module frontends to queue tasks directly.
@@ -1586,7 +1255,7 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
- int ret;
+ sense_reason_t ret;
if (!cmd->se_lun) {
dump_stack();
@@ -1600,10 +1269,9 @@ int transport_handle_cdb_direct(
return -EINVAL;
}
/*
- * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
- * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
- * in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_wait_for_tasks()
+ * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+ * outstanding descriptors are handled correctly during shutdown via
+ * transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1617,15 +1285,44 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0)
- transport_generic_request_failure(cmd);
-
+ if (ret)
+ transport_generic_request_failure(cmd, ret);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
-/**
- * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+ if (!sgl || !sgl_count)
+ return 0;
+
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
+
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
+ }
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ return 0;
+}
+
+/*
+ * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
+ * se_cmd + use pre-allocated SGL memory.
*
* @se_cmd: command descriptor to submit
* @se_sess: associated se_sess for endpoint
@@ -1636,16 +1333,30 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
* @task_addr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ *
+ * Returns non zero to signal active I/O shutdown failure. All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
*
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
- **/
-void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ */
+int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
- u32 data_length, int task_attr, int data_dir, int flags)
+ u32 data_length, int task_attr, int data_dir, int flags,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count)
{
struct se_portal_group *se_tpg;
- int rc;
+ sense_reason_t rc;
+ int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@@ -1658,13 +1369,17 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
data_length, data_dir, task_attr, sense);
+ if (flags & TARGET_SCF_UNKNOWN_SIZE)
+ se_cmd->unknown_data_length = 1;
/*
* Obtain struct se_cmd->cmd_kref reference and add new cmd to
* se_sess->sess_cmd_list. A second kref_get here is necessary
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret)
+ return ret;
/*
* Signal bidirectional data payloads to target-core
*/
@@ -1673,180 +1388,221 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
- if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+ if (rc) {
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
target_put_sess_cmd(se_sess, se_cmd);
- return;
+ return 0;
}
+
+ rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+ if (rc != 0) {
+ transport_generic_request_failure(se_cmd, rc);
+ return 0;
+ }
+
/*
- * Sanitize CDBs via transport_generic_cmd_sequencer() and
- * allocate the necessary tasks to complete the received CDB+data
+ * Save pointers for SGLs containing protection information,
+ * if present.
*/
- rc = transport_generic_allocate_tasks(se_cmd, cdb);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd);
- return;
+ if (sgl_prot_count) {
+ se_cmd->t_prot_sg = sgl_prot;
+ se_cmd->t_prot_nents = sgl_prot_count;
}
+
/*
- * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
- * for immediate execution of READs, otherwise wait for
- * transport_generic_handle_data() to be called for WRITEs
- * when fabric has filled the incoming buffer.
+ * When a non zero sgl_count has been passed perform SGL passthrough
+ * mapping for pre-allocated fabric memory instead of having target
+ * core perform an internal SGL allocation..
*/
- transport_handle_cdb_direct(se_cmd);
- return;
-}
-EXPORT_SYMBOL(target_submit_cmd);
+ if (sgl_count != 0) {
+ BUG_ON(!sgl);
-/*
- * Used by fabric module frontends defining a TFO->new_cmd_map() caller
- * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
- * complete setup in TCM process context w/ TFO->new_cmd_map().
- */
-int transport_generic_handle_cdb_map(
- struct se_cmd *cmd)
-{
- if (!cmd->se_lun) {
- dump_stack();
- pr_err("cmd->se_lun is NULL\n");
- return -EINVAL;
+ /*
+ * A work-around for tcm_loop as some userspace code via
+ * scsi-generic do not memset their associated read buffers,
+ * so go ahead and do that here for type non-data CDBs. Also
+ * note that this is currently guaranteed to be a single SGL
+ * for this case by target core in target_setup_cmd_from_cdb()
+ * -> transport_generic_cmd_sequencer().
+ */
+ if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+ se_cmd->data_direction == DMA_FROM_DEVICE) {
+ unsigned char *buf = NULL;
+
+ if (sgl)
+ buf = kmap(sg_page(sgl)) + sgl->offset;
+
+ if (buf) {
+ memset(buf, 0, sgl->length);
+ kunmap(sg_page(sgl));
+ }
+ }
+
+ rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+ sgl_bidi, sgl_bidi_count);
+ if (rc != 0) {
+ transport_generic_request_failure(se_cmd, rc);
+ return 0;
+ }
}
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
+ /*
+ * Check if we need to delay processing because of ALUA
+ * Active/NonOptimized primary access state..
+ */
+ core_alua_check_nonop_delay(se_cmd);
+
+ transport_handle_cdb_direct(se_cmd);
return 0;
}
-EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+EXPORT_SYMBOL(target_submit_cmd_map_sgls);
-/* transport_generic_handle_data():
+/*
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
*
+ * Returns non zero to signal active I/O shutdown failure. All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ *
+ * It also assumes interal target core SGL memory allocation.
*/
-int transport_generic_handle_data(
- struct se_cmd *cmd)
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
{
- /*
- * For the software fabric case, then we assume the nexus is being
- * failed/shutdown when signals are pending from the kthread context
- * caller, so we return a failure. For the HW target mode case running
- * in interrupt code, the signal_pending() check is skipped.
- */
- if (!in_interrupt() && signal_pending(current))
- return -EPERM;
- /*
- * If the received CDB has aleady been ABORTED by the generic
- * target engine, we now call transport_check_aborted_status()
- * to queue any delated TASK_ABORTED status for the received CDB to the
- * fabric module as we are expecting no further incoming DATA OUT
- * sequences at this point.
- */
- if (transport_check_aborted_status(cmd, 1) != 0)
- return 0;
+ return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
+ unpacked_lun, data_length, task_attr, data_dir,
+ flags, NULL, 0, NULL, 0, NULL, 0);
+}
+EXPORT_SYMBOL(target_submit_cmd);
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
- return 0;
+static void target_complete_tmr_failure(struct work_struct *work)
+{
+ struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
+
+ se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+ transport_cmd_check_stop_to_fabric(se_cmd);
}
-EXPORT_SYMBOL(transport_generic_handle_data);
-/* transport_generic_handle_tmr():
+/**
+ * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
+ * for TMR CDBs
*
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @fabric_context: fabric context for TMR req
+ * @tm_type: Type of TM request
+ * @gfp: gfp type for caller
+ * @tag: referenced task tag for TMR_ABORT_TASK
+ * @flags: submit cmd flags
*
- */
-int transport_generic_handle_tmr(
- struct se_cmd *cmd)
+ * Callable from all contexts.
+ **/
+
+int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u32 unpacked_lun,
+ void *fabric_tmr_ptr, unsigned char tm_type,
+ gfp_t gfp, unsigned int tag, int flags)
{
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
+ struct se_portal_group *se_tpg;
+ int ret;
+
+ se_tpg = se_sess->se_tpg;
+ BUG_ON(!se_tpg);
+
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+ /*
+ * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+ * allocation failure.
+ */
+ ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
+ if (ret < 0)
+ return -ENOMEM;
+
+ if (tm_type == TMR_ABORT_TASK)
+ se_cmd->se_tmr_req->ref_task_tag = tag;
+
+ /* See target_submit_cmd for commentary */
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret) {
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ return ret;
+ }
+
+ ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
+ if (ret) {
+ /*
+ * For callback during failure handling, push this work off
+ * to process context with TMR_LUN_DOES_NOT_EXIST status.
+ */
+ INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+ schedule_work(&se_cmd->work);
+ return 0;
+ }
+ transport_generic_handle_tmr(se_cmd);
return 0;
}
-EXPORT_SYMBOL(transport_generic_handle_tmr);
+EXPORT_SYMBOL(target_submit_tmr);
/*
- * If the task is active, request it to be stopped and sleep until it
+ * If the cmd is active, request it to be stopped and sleep until it
* has completed.
*/
-bool target_stop_task(struct se_task *task, unsigned long *flags)
+bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
{
- struct se_cmd *cmd = task->task_se_cmd;
bool was_active = false;
- if (task->task_flags & TF_ACTIVE) {
- task->task_flags |= TF_REQUEST_STOP;
+ if (cmd->transport_state & CMD_T_BUSY) {
+ cmd->transport_state |= CMD_T_REQUEST_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
- pr_debug("Task %p waiting to complete\n", task);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("Task %p stopped successfully\n", task);
+ pr_debug("cmd %p waiting to complete\n", cmd);
+ wait_for_completion(&cmd->task_stop_comp);
+ pr_debug("cmd %p stopped successfully\n", cmd);
spin_lock_irqsave(&cmd->t_state_lock, *flags);
- atomic_dec(&cmd->t_task_cdbs_left);
- task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+ cmd->transport_state &= ~CMD_T_REQUEST_STOP;
+ cmd->transport_state &= ~CMD_T_BUSY;
was_active = true;
}
return was_active;
}
-static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
-{
- struct se_task *task, *task_tmp;
- unsigned long flags;
- int ret = 0;
-
- pr_debug("ITT[0x%08x] - Stopping tasks\n",
- cmd->se_tfo->get_task_tag(cmd));
-
- /*
- * No tasks remain in the execution queue
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list) {
- pr_debug("Processing task %p\n", task);
- /*
- * If the struct se_task has not been sent and is not active,
- * remove the struct se_task from the execution queue.
- */
- if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- transport_remove_task_from_execute_queue(task,
- cmd->se_dev);
-
- pr_debug("Task %p removed from execute queue\n", task);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- continue;
- }
-
- if (!target_stop_task(task, &flags)) {
- pr_debug("Task %p - did nothing\n", task);
- ret++;
- }
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- return ret;
-}
-
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd,
+ sense_reason_t sense_reason)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->scsi_sense_reason);
- pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
- " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
- " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
- cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
- atomic_read(&cmd->t_task_cdbs_sent),
- atomic_read(&cmd->t_task_cdbs_ex_left),
+ cmd->t_state, sense_reason);
+ pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
(cmd->transport_state & CMD_T_SENT) != 0);
@@ -1854,20 +1610,34 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+ * callback is expected to drop the per device ->caw_mutex.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd);
- switch (cmd->scsi_sense_reason) {
+ switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
case TCM_INVALID_PARAMETER_LIST:
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
+ case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ break;
+ case TCM_OUT_OF_RESOURCES:
+ sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
break;
case TCM_RESERVATION_CONFLICT:
/*
@@ -1885,30 +1655,24 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- ret = cmd->se_tfo->queue_status(cmd);
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo-> queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0], cmd->scsi_sense_reason);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->t_task_cdb[0], sense_reason);
+ sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
- /*
- * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
- * make the call to transport_send_check_condition_and_sense()
- * directly. Otherwise expect the fabric to make the call to
- * transport_send_check_condition_and_sense() after handling
- * possible unsoliticied write data payloads.
- */
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+
+ ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -1922,1164 +1686,151 @@ queue_full:
cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
transport_handle_queue_full(cmd, cmd->se_dev);
}
+EXPORT_SYMBOL(transport_generic_request_failure);
-static inline u32 transport_lba_21(unsigned char *cdb)
+void __target_execute_cmd(struct se_cmd *cmd)
{
- return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
-}
+ sense_reason_t ret;
-static inline u32 transport_lba_32(unsigned char *cdb)
-{
- return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
-}
+ if (cmd->execute_cmd) {
+ ret = cmd->execute_cmd(cmd);
+ if (ret) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
-static inline unsigned long long transport_lba_64(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
- __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-/*
- * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
- */
-static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
- __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+ transport_generic_request_failure(cmd, ret);
+ }
+ }
}
-static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+static bool target_handle_task_attr(struct se_cmd *cmd)
{
- unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return false;
-/*
- * Called from Fabric Module context from transport_execute_tasks()
- *
- * The return of this function determins if the tasks from struct se_cmd
- * get added to the execution queue in transport_execute_tasks(),
- * or are added to the delayed or ordered lists here.
- */
-static inline int transport_execute_task_attr(struct se_cmd *cmd)
-{
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
- return 1;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- pr_debug("Added HEAD_OF_QUEUE for CDB:"
- " 0x%02x, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- return 1;
- } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- atomic_inc(&cmd->se_dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
+ switch (cmd->sam_task_attr) {
+ case MSG_HEAD_TAG:
+ pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
+ "se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
+ return false;
+ case MSG_ORDERED_TAG:
+ atomic_inc(&dev->dev_ordered_sync);
+ smp_mb__after_atomic();
- pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
- " list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- /*
- * Add ORDERED command to tail of execution queue if
- * no other older commands exist that need to be
- * completed first.
- */
- if (!atomic_read(&cmd->se_dev->simple_cmds))
- return 1;
- } else {
- /*
- * For SIMPLE and UNTAGGED Task Attribute commands
- */
- atomic_inc(&cmd->se_dev->simple_cmds);
- smp_mb__after_atomic_inc();
- }
- /*
- * Otherwise if one or more outstanding ORDERED task attribute exist,
- * add the dormant task(s) built for the passed struct se_cmd to the
- * execution queue and become in Active state for this struct se_device.
- */
- if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
- /*
- * Otherwise, add cmd w/ tasks to delayed cmd queue that
- * will be drained upon completion of HEAD_OF_QUEUE task.
- */
- spin_lock(&cmd->se_dev->delayed_cmd_lock);
- cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_node,
- &cmd->se_dev->delayed_cmd_list);
- spin_unlock(&cmd->se_dev->delayed_cmd_lock);
-
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
- /*
- * Return zero to let transport_execute_tasks() know
- * not to add the delayed tasks to the execution list.
- */
- return 0;
- }
- /*
- * Otherwise, no ORDERED task attributes exist..
- */
- return 1;
-}
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
+ " se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
-/*
- * Called from fabric module context in transport_generic_new_cmd() and
- * transport_generic_process_write()
- */
-static int transport_execute_tasks(struct se_cmd *cmd)
-{
- int add_tasks;
- struct se_device *se_dev = cmd->se_dev;
- /*
- * Call transport_cmd_check_stop() to see if a fabric exception
- * has occurred that prevents execution.
- */
- if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
/*
- * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
- * attribute for the tasks of the received struct se_cmd CDB
+ * Execute an ORDERED command if no other older commands
+ * exist that need to be completed first.
*/
- add_tasks = transport_execute_task_attr(cmd);
- if (!add_tasks)
- goto execute_tasks;
+ if (!atomic_read(&dev->simple_cmds))
+ return false;
+ break;
+ default:
/*
- * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
- * adds associated se_tasks while holding dev->execute_task_lock
- * before I/O dispath to avoid a double spinlock access.
+ * For SIMPLE and UNTAGGED Task Attribute commands
*/
- __transport_execute_tasks(se_dev, cmd);
- return 0;
- }
-
-execute_tasks:
- __transport_execute_tasks(se_dev, NULL);
- return 0;
-}
-
-/*
- * Called to check struct se_device tcq depth window, and once open pull struct se_task
- * from struct se_device->execute_task_list and
- *
- * Called from transport_processing_thread()
- */
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
-{
- int error;
- struct se_cmd *cmd = NULL;
- struct se_task *task = NULL;
- unsigned long flags;
-
-check_depth:
- spin_lock_irq(&dev->execute_task_lock);
- if (new_cmd != NULL)
- __transport_add_tasks_from_cmd(new_cmd);
-
- if (list_empty(&dev->execute_task_list)) {
- spin_unlock_irq(&dev->execute_task_lock);
- return 0;
- }
- task = list_first_entry(&dev->execute_task_list,
- struct se_task, t_execute_list);
- __transport_remove_task_from_execute_queue(task, dev);
- spin_unlock_irq(&dev->execute_task_lock);
-
- cmd = task->task_se_cmd;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags |= (TF_ACTIVE | TF_SENT);
- atomic_inc(&cmd->t_task_cdbs_sent);
-
- if (atomic_read(&cmd->t_task_cdbs_sent) ==
- cmd->t_task_list_num)
- cmd->transport_state |= CMD_T_SENT;
-
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (cmd->execute_task)
- error = cmd->execute_task(task);
- else
- error = dev->transport->do_task(task);
- if (error != 0) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags &= ~TF_ACTIVE;
- cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd);
+ atomic_inc(&dev->simple_cmds);
+ smp_mb__after_atomic();
+ break;
}
- new_cmd = NULL;
- goto check_depth;
-
- return 0;
-}
-
-static inline u32 transport_get_sectors_6(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 8-bit sector value.
- */
- if (!dev)
- goto type_disk;
+ if (atomic_read(&dev->dev_ordered_sync) == 0)
+ return false;
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+ spin_lock(&dev->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+ spin_unlock(&dev->delayed_cmd_lock);
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value. SBC-3 says:
- *
- * A TRANSFER LENGTH field set to zero specifies that 256
- * logical blocks shall be written. Any other value
- * specifies the number of logical blocks that shall be
- * written.
- */
-type_disk:
- return cdb[4] ? : 256;
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
+ return true;
}
-static inline u32 transport_get_sectors_10(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
+void target_execute_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
-
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 16-bit sector value.
+ * If the received CDB has aleady been aborted stop processing it here.
*/
- if (!dev)
- goto type_disk;
-
- /*
- * XXX_10 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
- }
-
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 16-bit sector value.
- */
-type_disk:
- return (u32)(cdb[7] << 8) + cdb[8];
-}
-
-static inline u32 transport_get_sectors_12(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
+ if (transport_check_aborted_status(cmd, 1))
+ return;
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
*/
- if (!dev)
- goto type_disk;
+ spin_lock_irq(&cmd->t_state_lock);
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+ cmd->se_tfo->get_task_tag(cmd));
- /*
- * XXX_12 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
}
+ cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
/*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 32-bit sector value.
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
*/
-type_disk:
- return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
-}
-
-static inline u32 transport_get_sectors_16(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
- */
- if (!dev)
- goto type_disk;
-
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
-
-type_disk:
- return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
- (cdb[12] << 8) + cdb[13];
-}
-
-/*
- * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
- */
-static inline u32 transport_get_sectors_32(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
- */
- return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
- (cdb[30] << 8) + cdb[31];
-
-}
-
-static inline u32 transport_get_size(
- u32 sectors,
- unsigned char *cdb,
- struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
-
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- if (cdb[1] & 1) { /* sectors */
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
- } else /* bytes */
- return sectors;
+ if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
}
-#if 0
- pr_debug("Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
- dev->se_sub_dev->se_dev_attrib.block_size * sectors,
- dev->transport->name);
-#endif
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
-}
-static void transport_xor_callback(struct se_cmd *cmd)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- int i;
- int count;
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
+ if (target_handle_task_attr(cmd)) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
return;
}
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
- /*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
- */
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg), KM_USER0);
- if (!addr)
- goto out;
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr, KM_USER0);
- }
-
-out:
- kfree(buf);
+ __target_execute_cmd(cmd);
}
+EXPORT_SYMBOL(target_execute_cmd);
/*
- * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ * Process all commands up to the last received ORDERED task attribute which
+ * requires another blocking boundary
*/
-static int transport_get_sense_data(struct se_cmd *cmd)
-{
- unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
- struct se_device *dev = cmd->se_dev;
- struct se_task *task = NULL, *task_tmp;
- unsigned long flags;
- u32 offset = 0;
-
- WARN_ON(!cmd->se_lun);
-
- if (!dev)
- return 0;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
- }
-
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list) {
- if (!(task->task_flags & TF_HAS_SENSE))
- continue;
-
- if (!dev->transport->get_sense_buffer) {
- pr_err("dev->transport->get_sense_buffer"
- " is NULL\n");
- continue;
- }
-
- sense_buffer = dev->transport->get_sense_buffer(task);
- if (!sense_buffer) {
- pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
- " sense buffer for task with sense\n",
- cmd->se_tfo->get_task_tag(cmd), task);
- continue;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- offset = cmd->se_tfo->set_fabric_sense_len(cmd,
- TRANSPORT_SENSE_BUFFER);
-
- memcpy(&buffer[offset], sense_buffer,
- TRANSPORT_SENSE_BUFFER);
- cmd->scsi_status = task->task_scsi_status;
- /* Automatically padded */
- cmd->scsi_sense_length =
- (TRANSPORT_SENSE_BUFFER + offset);
-
- pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
- " and sense\n",
- dev->se_hba->hba_id, dev->transport->name,
- cmd->scsi_status);
- return 0;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- return -1;
-}
-
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- u32 sectors;
-
- if (dev->transport->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-
- if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
- pr_err("LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- cmd->t_task_lba, sectors,
- transport_dev_end_lba(dev));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
+static void target_restart_delayed_cmds(struct se_device *dev)
{
- /*
- * Determine if the received WRITE_SAME is used to for direct
- * passthrough into Linux/SCSI with struct request via TCM/pSCSI
- * or we are signaling the use of internal WRITE_SAME + UNMAP=1
- * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
- */
- int passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
-
- if (!passthrough) {
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
- pr_err("WRITE_SAME PBDATA and LBDATA"
- " bits not supported for Block Discard"
- " Emulation\n");
- return -ENOSYS;
- }
- /*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
- */
- if (!(flags[0] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
- return -ENOSYS;
- }
- }
+ for (;;) {
+ struct se_cmd *cmd;
- return 0;
-}
-
-/* transport_generic_cmd_sequencer():
- *
- * Generic Command Sequencer that should work for most DAS transport
- * drivers.
- *
- * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
- * RX Thread.
- *
- * FIXME: Need to support other SCSI OPCODES where as well.
- */
-static int transport_generic_cmd_sequencer(
- struct se_cmd *cmd,
- unsigned char *cdb)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- int ret = 0, sector_ret = 0, passthrough;
- u32 sectors = 0, size = 0, pr_reg_type = 0;
- u16 service_action;
- u8 alua_ascq = 0;
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -EINVAL;
- }
- /*
- * Check status of Asymmetric Logical Unit Assignment port
- */
- ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
- if (ret != 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- if (ret > 0) {
-#if 0
- pr_debug("[%s]: ALUA TG Port not available,"
- " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-#endif
- transport_set_sense_codes(cmd, 0x04, alua_ascq);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -EINVAL;
- }
- goto out_invalid_cdb_field;
- }
- /*
- * Check status for SPC-3 Persistent Reservations
- */
- if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EBUSY;
- }
- /*
- * This means the CDB is allowed for the SCSI Initiator port
- * when said port is *NOT* holding the legacy SPC-2 or
- * SPC-3 Persistent Reservation.
- */
- }
-
- /*
- * If we operate in passthrough mode we skip most CDB emulation and
- * instead hand the commands down to the physical SCSI device.
- */
- passthrough =
- (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
-
- switch (cdb[0]) {
- case READ_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_10:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_10:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case XDWRITEREAD_10:
- if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- goto out_invalid_cdb_field;
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case VARIABLE_LENGTH_CMD:
- service_action = get_unaligned_be16(&cdb[8]);
- switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case WRITE_SAME_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
- " supported\n");
- goto out_invalid_cdb_field;
- }
-
- cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[10], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_task = target_emulate_write_same;
- break;
- default:
- pr_err("VARIABLE_LENGTH_CMD service action"
- " 0x%04x not supported\n", service_action);
- goto out_unsupported_cdb;
- }
- break;
- case MAINTENANCE_IN:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_IN from SCC-2 */
- /*
- * Check for emulated MI_REPORT_TARGET_PGS.
- */
- if (cdb[1] == MI_REPORT_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_task =
- target_emulate_report_target_port_groups;
- }
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_SEND_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_modesense;
- break;
- case MODE_SENSE_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_modesense;
- break;
- case GPCMD_READ_BUFFER_CAPACITY:
- case GPCMD_SEND_OPC:
- case LOG_SELECT:
- case LOG_SENSE:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_BLOCK_LIMITS:
- size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_GET_CONFIGURATION:
- case GPCMD_READ_FORMAT_CAPACITIES:
- case GPCMD_READ_DISC_INFO:
- case GPCMD_READ_TRACK_RZONE_INFO:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_IN:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_task = target_scsi3_emulate_pr_in;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_OUT:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_task = target_scsi3_emulate_pr_out;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_MECHANISM_STATUS:
- case GPCMD_READ_DVD_STRUCTURE:
- size = (cdb[8] << 8) + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_POSITION:
- size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MAINTENANCE_OUT:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_OUT from SCC-2
- *
- * Check for emulated MO_SET_TARGET_PGS.
- */
- if (cdb[1] == MO_SET_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_task =
- target_emulate_set_target_port_groups;
- }
-
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_REPORT_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case INQUIRY:
- size = (cdb[3] << 8) + cdb[4];
- /*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_inquiry;
- break;
- case READ_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_CAPACITY:
- size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_readcapacity;
- break;
- case READ_MEDIA_SERIAL_NUMBER:
- case SECURITY_PROTOCOL_IN:
- case SECURITY_PROTOCOL_OUT:
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case SERVICE_ACTION_IN:
- switch (cmd->t_task_cdb[1] & 0x1f) {
- case SAI_READ_CAPACITY_16:
- if (!passthrough)
- cmd->execute_task =
- target_emulate_readcapacity_16;
+ spin_lock(&dev->delayed_cmd_lock);
+ if (list_empty(&dev->delayed_cmd_list)) {
+ spin_unlock(&dev->delayed_cmd_lock);
break;
- default:
- if (passthrough)
- break;
-
- pr_err("Unsupported SA: 0x%02x\n",
- cmd->t_task_cdb[1] & 0x1f);
- goto out_unsupported_cdb;
}
- /*FALLTHROUGH*/
- case ACCESS_CONTROL_IN:
- case ACCESS_CONTROL_OUT:
- case EXTENDED_COPY:
- case READ_ATTRIBUTE:
- case RECEIVE_COPY_RESULTS:
- case WRITE_ATTRIBUTE:
- size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RECEIVE_DIAGNOSTIC:
- case SEND_DIAGNOSTIC:
- size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
-#if 0
- case GPCMD_READ_CD:
- sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-#endif
- case READ_TOC:
- size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case REQUEST_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_request_sense;
- break;
- case READ_ELEMENT_STATUS:
- size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case WRITE_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RESERVE:
- case RESERVE_10:
- /*
- * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RESERVE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
- /*
- * Setup the legacy emulated handler for SPC-2 and
- * >= SPC-3 compatible reservation handling (CRH=1)
- * Otherwise, we assume the underlying SCSI logic is
- * is running in SPC_PASSTHROUGH, and wants reservations
- * emulation disabled.
- */
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_task = target_scsi2_reservation_reserve;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case RELEASE:
- case RELEASE_10:
- /*
- * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RELEASE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
-
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_task = target_scsi2_reservation_release;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case SYNCHRONIZE_CACHE:
- case 0x91: /* SYNCHRONIZE_CACHE_16: */
- /*
- * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
- */
- if (cdb[0] == SYNCHRONIZE_CACHE) {
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_32(cdb);
- } else {
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_64(cdb);
- }
- if (sector_ret)
- goto out_unsupported_cdb;
+ cmd = list_entry(dev->delayed_cmd_list.next,
+ struct se_cmd, se_delayed_node);
+ list_del(&cmd->se_delayed_node);
+ spin_unlock(&dev->delayed_cmd_lock);
- size = transport_get_size(sectors, cdb, cmd);
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ __target_execute_cmd(cmd);
- if (passthrough)
+ if (cmd->sam_task_attr == MSG_ORDERED_TAG)
break;
-
- /*
- * Check to ensure that LBA + Range does not exceed past end of
- * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
- */
- if ((cmd->t_task_lba != 0) || (sectors != 0)) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- goto out_invalid_cdb_field;
- }
- cmd->execute_task = target_emulate_synchronize_cache;
- break;
- case UNMAP:
- size = get_unaligned_be16(&cdb[7]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_unmap;
- break;
- case WRITE_SAME_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
- }
-
- cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_task = target_emulate_write_same;
- break;
- case WRITE_SAME:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
- }
-
- cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- /*
- * Follow sbcr26 with WRITE_SAME (10) and check for the existence
- * of byte 1 bit 3 UNMAP instead of original reserved field
- */
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_task = target_emulate_write_same;
- break;
- case ALLOW_MEDIUM_REMOVAL:
- case ERASE:
- case REZERO_UNIT:
- case SEEK_10:
- case SPACE:
- case START_STOP:
- case TEST_UNIT_READY:
- case VERIFY:
- case WRITE_FILEMARKS:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- if (!passthrough)
- cmd->execute_task = target_emulate_noop;
- break;
- case GPCMD_CLOSE_TRACK:
- case INITIALIZE_ELEMENT_STATUS:
- case GPCMD_LOAD_UNLOAD:
- case GPCMD_SET_SPEED:
- case MOVE_MEDIUM:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case REPORT_LUNS:
- cmd->execute_task = target_report_luns;
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- /*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- default:
- pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
- " 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(), cdb[0]);
- goto out_unsupported_cdb;
}
-
- if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
- " %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
- cmd->data_length, size, cdb[0]);
-
- cmd->cmd_spdtl = size;
-
- if (cmd->data_direction == DMA_TO_DEVICE) {
- pr_err("Rejecting underflow/overflow"
- " WRITE data\n");
- goto out_invalid_cdb_field;
- }
- /*
- * Reject READ_* or WRITE_* with overflow/underflow for
- * type SCF_SCSI_DATA_SG_IO_CDB.
- */
- if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
- pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
- " CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", dev->transport->name);
- /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- goto out_invalid_cdb_field;
- }
-
- if (size > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = (size - cmd->data_length);
- } else {
- cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
- cmd->residual_count = (cmd->data_length - size);
- }
- cmd->data_length = size;
- }
-
- /* reject any command that we don't have a handler for */
- if (!(passthrough || cmd->execute_task ||
- (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
- goto out_unsupported_cdb;
-
- transport_set_supported_SAM_opcode(cmd);
- return ret;
-
-out_unsupported_cdb:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
/*
@@ -3089,12 +1840,13 @@ out_invalid_cdb_field:
static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_cmd *cmd_p, *cmd_tmp;
- int new_active_tasks = 0;
+
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -3106,54 +1858,24 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
atomic_dec(&dev->dev_ordered_sync);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
- /*
- * Process all commands up to the last received
- * ORDERED task attribute which requires another blocking
- * boundary
- */
- spin_lock(&dev->delayed_cmd_lock);
- list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_node) {
- list_del(&cmd_p->se_delayed_node);
- spin_unlock(&dev->delayed_cmd_lock);
-
- pr_debug("Calling add_tasks() for"
- " cmd_p: 0x%02x Task Attr: 0x%02x"
- " Dormant -> Active, se_ordered_id: %u\n",
- cmd_p->t_task_cdb[0],
- cmd_p->sam_task_attr, cmd_p->se_ordered_id);
-
- transport_add_tasks_from_cmd(cmd_p);
- new_active_tasks++;
-
- spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
- break;
- }
- spin_unlock(&dev->delayed_cmd_lock);
- /*
- * If new tasks have become active, wake up the transport thread
- * to do the processing of the Active tasks.
- */
- if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+ target_restart_delayed_cmds(dev);
}
static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret)
goto out;
@@ -3161,16 +1883,18 @@ static void transport_complete_qf(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
+ trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
case DMA_TO_DEVICE:
- if (cmd->t_bidi_data_sg) {
+ if (cmd->se_cmd_flags & SCF_BIDI) {
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret < 0)
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
+ trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
break;
default:
@@ -3193,24 +1917,39 @@ static void transport_handle_queue_full(
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue);
}
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+ sense_reason_t rc;
+
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ rc = sbc_dif_read_strip(cmd);
+ if (rc) {
+ cmd->pi_err = rc;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void target_complete_ok_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- int reason = 0, ret;
+ int ret;
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
+
/*
* Check to schedule QUEUE_FULL work, or execute an existing
* cmd->transport_qf_callback()
@@ -3219,20 +1958,33 @@ static void target_complete_ok_work(struct work_struct *work)
schedule_work(&cmd->se_dev->qf_work_queue);
/*
- * Check if we need to retrieve a sense buffer from
+ * Check if we need to send a sense buffer from
* the struct se_cmd in question.
*/
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
- if (transport_get_sense_data(cmd) < 0)
- reason = TCM_NON_EXISTENT_LUN;
+ WARN_ON(!cmd->scsi_status);
+ ret = transport_send_check_condition_and_sense(
+ cmd, 0, 1);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
- /*
- * Only set when an struct se_task->task_scsi_status returned
- * a non GOOD status.
- */
- if (cmd->scsi_status) {
- ret = transport_send_check_condition_and_sense(
- cmd, reason, 1);
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ /*
+ * Check for a callback, used by amongst other things
+ * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
+ */
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+ rc = cmd->transport_complete_callback(cmd);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ rc, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -3241,12 +1993,6 @@ static void target_complete_ok_work(struct work_struct *work)
return;
}
}
- /*
- * Check for a callback, used by amongst other things
- * XDWRITE_READ_10 emulation.
- */
- if (cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
@@ -3256,7 +2002,24 @@ static void target_complete_ok_work(struct work_struct *work)
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * Perform READ_STRIP of PI using software emulation when
+ * backend had PI enabled, if the transport will not be
+ * performing hardware READ_STRIP offload.
+ */
+ if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+ target_check_read_strip(cmd)) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->pi_err, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -3271,7 +2034,7 @@ static void target_complete_ok_work(struct work_struct *work)
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (cmd->t_bidi_data_sg) {
+ if (cmd->se_cmd_flags & SCF_BIDI) {
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) {
cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3285,6 +2048,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
+ trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -3304,33 +2068,6 @@ queue_full:
transport_handle_queue_full(cmd, cmd->se_dev);
}
-static void transport_free_dev_tasks(struct se_cmd *cmd)
-{
- struct se_task *task, *task_tmp;
- unsigned long flags;
- LIST_HEAD(dispose_list);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list) {
- if (!(task->task_flags & TF_ACTIVE))
- list_move_tail(&task->t_list, &dispose_list);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- while (!list_empty(&dispose_list)) {
- task = list_first_entry(&dispose_list, struct se_task, t_list);
-
- if (task->task_sg != cmd->t_data_sg &&
- task->task_sg != cmd->t_bidi_data_sg)
- kfree(task->task_sg);
-
- list_del(&task->t_list);
-
- cmd->se_dev->transport->free_task(task);
- }
-}
-
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
{
struct scatterlist *sg;
@@ -3342,10 +2079,29 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
kfree(sgl);
}
+static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+{
+ /*
+ * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
+ * emulation, and free + reset pointers if necessary..
+ */
+ if (!cmd->t_data_sg_orig)
+ return;
+
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = cmd->t_data_sg_orig;
+ cmd->t_data_sg_orig = NULL;
+ cmd->t_data_nents = cmd->t_data_nents_orig;
+ cmd->t_data_nents_orig = 0;
+}
+
static inline void transport_free_pages(struct se_cmd *cmd)
{
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ transport_reset_sgl_orig(cmd);
return;
+ }
+ transport_reset_sgl_orig(cmd);
transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
@@ -3354,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
+
+ transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
}
/**
@@ -3363,11 +2123,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
* This routine unconditionally frees a command, and reference counting
* or list removal must be done in the caller.
*/
-static void transport_release_cmd(struct se_cmd *cmd)
+static int transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
- if (cmd->se_tmr_req)
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
@@ -3375,11 +2135,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
*/
- if (cmd->check_release != 0) {
- target_put_sess_cmd(cmd->se_sess, cmd);
- return;
- }
- cmd->se_tfo->release_cmd(cmd);
+ return target_put_sess_cmd(cmd->se_sess, cmd);
}
/**
@@ -3388,89 +2144,11 @@ static void transport_release_cmd(struct se_cmd *cmd)
*
* This routine releases our reference to the command and frees it if possible.
*/
-static void transport_put_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
{
- unsigned long flags;
- int free_tasks = 0;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_fe_count)) {
- if (!atomic_dec_and_test(&cmd->t_fe_count))
- goto out_busy;
- }
-
- if (atomic_read(&cmd->t_se_count)) {
- if (!atomic_dec_and_test(&cmd->t_se_count))
- goto out_busy;
- }
-
- if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- transport_all_task_dev_remove_state(cmd);
- free_tasks = 1;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (free_tasks != 0)
- transport_free_dev_tasks(cmd);
-
transport_free_pages(cmd);
- transport_release_cmd(cmd);
- return;
-out_busy:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-/*
- * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
- * allocating in the core.
- * @cmd: Associated se_cmd descriptor
- * @mem: SGL style memory for TCM WRITE / READ
- * @sg_mem_num: Number of SGL elements
- * @mem_bidi_in: SGL style memory for TCM BIDI READ
- * @sg_mem_bidi_num: Number of BIDI READ SGL elements
- *
- * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
- * of parameters.
- */
-int transport_generic_map_mem_to_cmd(
- struct se_cmd *cmd,
- struct scatterlist *sgl,
- u32 sgl_count,
- struct scatterlist *sgl_bidi,
- u32 sgl_bidi_count)
-{
- if (!sgl || !sgl_count)
- return 0;
-
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * Reject SCSI data overflow with map_mem_to_cmd() as incoming
- * scatterlists already have been set to follow what the fabric
- * passes for the original expected data transfer length.
- */
- if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- pr_warn("Rejecting SCSI DATA overflow for fabric using"
- " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- cmd->t_data_sg = sgl;
- cmd->t_data_nents = sgl_count;
-
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- }
-
- return 0;
+ return transport_release_cmd(cmd);
}
-EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
@@ -3478,7 +2156,6 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
struct page **pages;
int i;
- BUG_ON(!sg);
/*
* We need to take into account a possible offset here for fabrics like
* tcm_loop who may be using a contig buffer from the SCSI midlayer for
@@ -3486,7 +2163,9 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
*/
if (!cmd->t_data_nents)
return NULL;
- else if (cmd->t_data_nents == 1)
+
+ BUG_ON(!sg);
+ if (cmd->t_data_nents == 1)
return kmap(sg_page(sg)) + sg->offset;
/* >1 page. use vmap */
@@ -3510,34 +2189,34 @@ EXPORT_SYMBOL(transport_kmap_data_sg);
void transport_kunmap_data_sg(struct se_cmd *cmd)
{
- if (!cmd->t_data_nents)
+ if (!cmd->t_data_nents) {
return;
- else if (cmd->t_data_nents == 1)
+ } else if (cmd->t_data_nents == 1) {
kunmap(sg_page(cmd->t_data_sg));
+ return;
+ }
vunmap(cmd->t_data_vmap);
cmd->t_data_vmap = NULL;
}
EXPORT_SYMBOL(transport_kunmap_data_sg);
-static int
-transport_generic_get_mem(struct se_cmd *cmd)
+int
+target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
+ bool zero_page)
{
- u32 length = cmd->data_length;
- unsigned int nents;
+ struct scatterlist *sg;
struct page *page;
- gfp_t zero_flag;
+ gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
+ unsigned int nent;
int i = 0;
- nents = DIV_ROUND_UP(length, PAGE_SIZE);
- cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
- if (!cmd->t_data_sg)
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+ if (!sg)
return -ENOMEM;
- cmd->t_data_nents = nents;
- sg_init_table(cmd->t_data_sg, nents);
-
- zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+ sg_init_table(sg, nent);
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -3545,261 +2224,31 @@ transport_generic_get_mem(struct se_cmd *cmd)
if (!page)
goto out;
- sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+ sg_set_page(&sg[i], page, page_len, 0);
length -= page_len;
i++;
}
+ *sgl = sg;
+ *nents = nent;
return 0;
out:
- while (i >= 0) {
- __free_page(sg_page(&cmd->t_data_sg[i]));
+ while (i > 0) {
i--;
+ __free_page(sg_page(&sg[i]));
}
- kfree(cmd->t_data_sg);
- cmd->t_data_sg = NULL;
+ kfree(sg);
return -ENOMEM;
}
-/* Reduce sectors if they are too long for the device */
-static inline sector_t transport_limit_task_sectors(
- struct se_device *dev,
- unsigned long long lba,
- sector_t sectors)
-{
- sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
-
- if (dev->transport->get_device_type(dev) == TYPE_DISK)
- if ((lba + sectors) > transport_dev_end_lba(dev))
- sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
- return sectors;
-}
-
-
/*
- * This function can be used by HW target mode drivers to create a linked
- * scatterlist from all contiguously allocated struct se_task->task_sg[].
- * This is intended to be called during the completion path by TCM Core
- * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ * Allocate any required resources to execute the command. For writes we
+ * might not have the payload yet, so notify the fabric via a call to
+ * ->write_pending instead. Otherwise place it on the execution queue.
*/
-void transport_do_task_sg_chain(struct se_cmd *cmd)
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct scatterlist *sg_first = NULL;
- struct scatterlist *sg_prev = NULL;
- int sg_prev_nents = 0;
- struct scatterlist *sg;
- struct se_task *task;
- u32 chained_nents = 0;
- int i;
-
- BUG_ON(!cmd->se_tfo->task_sg_chaining);
-
- /*
- * Walk the struct se_task list and setup scatterlist chains
- * for each contiguously allocated struct se_task->task_sg[].
- */
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (!task->task_sg)
- continue;
-
- if (!sg_first) {
- sg_first = task->task_sg;
- chained_nents = task->task_sg_nents;
- } else {
- sg_chain(sg_prev, sg_prev_nents, task->task_sg);
- chained_nents += task->task_sg_nents;
- }
- /*
- * For the padded tasks, use the extra SGL vector allocated
- * in transport_allocate_data_tasks() for the sg_prev_nents
- * offset into sg_chain() above.
- *
- * We do not need the padding for the last task (or a single
- * task), but in that case we will never use the sg_prev_nents
- * value below which would be incorrect.
- */
- sg_prev_nents = (task->task_sg_nents + 1);
- sg_prev = task->task_sg;
- }
- /*
- * Setup the starting pointer and total t_tasks_sg_linked_no including
- * padding SGs for linking and to mark the end.
- */
- cmd->t_tasks_sg_chained = sg_first;
- cmd->t_tasks_sg_chained_no = chained_nents;
-
- pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
- cmd->t_tasks_sg_chained_no);
-
- for_each_sg(cmd->t_tasks_sg_chained, sg,
- cmd->t_tasks_sg_chained_no, i) {
-
- pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
- i, sg, sg_page(sg), sg->length, sg->offset);
- if (sg_is_chain(sg))
- pr_debug("SG: %p sg_is_chain=1\n", sg);
- if (sg_is_last(sg))
- pr_debug("SG: %p sg_is_last=1\n", sg);
- }
-}
-EXPORT_SYMBOL(transport_do_task_sg_chain);
-
-/*
- * Break up cmd into chunks transport can handle
- */
-static int
-transport_allocate_data_tasks(struct se_cmd *cmd,
- enum dma_data_direction data_direction,
- struct scatterlist *cmd_sg, unsigned int sgl_nents)
-{
- struct se_device *dev = cmd->se_dev;
- int task_count, i;
- unsigned long long lba;
- sector_t sectors, dev_max_sectors;
- u32 sector_size;
-
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
- sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
-
- WARN_ON(cmd->data_length % sector_size);
-
- lba = cmd->t_task_lba;
- sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
- task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
-
- /*
- * If we need just a single task reuse the SG list in the command
- * and avoid a lot of work.
- */
- if (task_count == 1) {
- struct se_task *task;
- unsigned long flags;
-
- task = transport_generic_get_task(cmd, data_direction);
- if (!task)
- return -ENOMEM;
-
- task->task_sg = cmd_sg;
- task->task_sg_nents = sgl_nents;
-
- task->task_lba = lba;
- task->task_sectors = sectors;
- task->task_size = task->task_sectors * sector_size;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_add_tail(&task->t_list, &cmd->t_task_list);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- return task_count;
- }
-
- for (i = 0; i < task_count; i++) {
- struct se_task *task;
- unsigned int task_size, task_sg_nents_padded;
- struct scatterlist *sg;
- unsigned long flags;
- int count;
-
- task = transport_generic_get_task(cmd, data_direction);
- if (!task)
- return -ENOMEM;
-
- task->task_lba = lba;
- task->task_sectors = min(sectors, dev_max_sectors);
- task->task_size = task->task_sectors * sector_size;
-
- /*
- * This now assumes that passed sg_ents are in PAGE_SIZE chunks
- * in order to calculate the number per task SGL entries
- */
- task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
- /*
- * Check if the fabric module driver is requesting that all
- * struct se_task->task_sg[] be chained together.. If so,
- * then allocate an extra padding SG entry for linking and
- * marking the end of the chained SGL for every task except
- * the last one for (task_count > 1) operation, or skipping
- * the extra padding for the (task_count == 1) case.
- */
- if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
- task_sg_nents_padded = (task->task_sg_nents + 1);
- } else
- task_sg_nents_padded = task->task_sg_nents;
-
- task->task_sg = kmalloc(sizeof(struct scatterlist) *
- task_sg_nents_padded, GFP_KERNEL);
- if (!task->task_sg) {
- cmd->se_dev->transport->free_task(task);
- return -ENOMEM;
- }
-
- sg_init_table(task->task_sg, task_sg_nents_padded);
-
- task_size = task->task_size;
-
- /* Build new sgl, only up to task_size */
- for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
- if (cmd_sg->length > task_size)
- break;
-
- *sg = *cmd_sg;
- task_size -= cmd_sg->length;
- cmd_sg = sg_next(cmd_sg);
- }
-
- lba += task->task_sectors;
- sectors -= task->task_sectors;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_add_tail(&task->t_list, &cmd->t_task_list);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
-
- return task_count;
-}
-
-static int
-transport_allocate_control_task(struct se_cmd *cmd)
-{
- struct se_task *task;
- unsigned long flags;
-
- /* Workaround for handling zero-length control CDBs */
- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
- !cmd->data_length)
- return 0;
-
- task = transport_generic_get_task(cmd, cmd->data_direction);
- if (!task)
- return -ENOMEM;
-
- task->task_sg = cmd->t_data_sg;
- task->task_size = cmd->data_length;
- task->task_sg_nents = cmd->t_data_nents;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_add_tail(&task->t_list, &cmd->t_task_list);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- /* Success! Return number of tasks allocated */
- return 1;
-}
-
-/*
- * Allocate any required ressources to execute the command, and either place
- * it on the execution queue if possible. For writes we might not have the
- * payload yet, thus notify the fabric via a call to ->write_pending instead.
- */
-int transport_generic_new_cmd(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- int task_cdbs, task_cdbs_bidi = 0;
- int set_counts = 1;
int ret = 0;
/*
@@ -3809,101 +2258,67 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- ret = transport_generic_get_mem(cmd);
- if (ret < 0)
- goto out_fail;
- }
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
- /*
- * For BIDI command set up the read tasks first.
- */
- if (cmd->t_bidi_data_sg &&
- dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
-
- task_cdbs_bidi = transport_allocate_data_tasks(cmd,
- DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
- cmd->t_bidi_data_nents);
- if (task_cdbs_bidi <= 0)
- goto out_fail;
-
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- set_counts = 0;
- }
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+ u32 bidi_length;
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- task_cdbs = transport_allocate_data_tasks(cmd,
- cmd->data_direction, cmd->t_data_sg,
- cmd->t_data_nents);
- } else {
- task_cdbs = transport_allocate_control_task(cmd);
- }
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
+ bidi_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+ else
+ bidi_length = cmd->data_length;
- if (task_cdbs < 0)
- goto out_fail;
- else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->t_state = TRANSPORT_COMPLETE;
- cmd->transport_state |= CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
-
- if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
- u8 ua_asc = 0, ua_ascq = 0;
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ bidi_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
- core_scsi3_ua_clear_for_request_sense(cmd,
- &ua_asc, &ua_ascq);
+ if (cmd->prot_op != TARGET_PROT_NORMAL) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg,
+ &cmd->t_prot_nents,
+ cmd->prot_length, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- INIT_WORK(&cmd->work, target_complete_ok_work);
- queue_work(target_completion_wq, &cmd->work);
+ ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+ cmd->data_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * If this command is not a write we can execute it right here,
+ * for write buffers we need to notify the fabric driver first
+ * and let it call back once the write buffers are ready.
+ */
+ target_add_to_state_list(cmd);
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ target_execute_cmd(cmd);
return 0;
}
+ transport_cmd_check_stop(cmd, false, true);
- if (set_counts) {
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- }
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
- cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
- atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
- atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+ /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+ WARN_ON(ret);
- /*
- * For WRITEs, let the fabric know its buffer is ready..
- * This WRITE struct se_cmd (and all of its associated struct se_task's)
- * will be added to the struct se_device execution queue after its WRITE
- * data has arrived. (ie: It gets handled by the transport processing
- * thread a second time)
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- transport_add_tasks_to_state_queue(cmd);
- return transport_generic_write_pending(cmd);
- }
- /*
- * Everything else but a WRITE, add the struct se_cmd's struct se_task's
- * to the execution queue.
- */
- transport_execute_tasks(cmd);
- return 0;
+ return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-out_fail:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
-/* transport_generic_process_write():
- *
- *
- */
-void transport_generic_process_write(struct se_cmd *cmd)
-{
- transport_execute_tasks(cmd);
-}
-EXPORT_SYMBOL(transport_generic_process_write);
-
static void transport_write_pending_qf(struct se_cmd *cmd)
{
int ret;
@@ -3916,63 +2331,36 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
}
}
-static int transport_generic_write_pending(struct se_cmd *cmd)
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- /*
- * Clear the se_cmd for WRITE_PENDING status in order to set
- * CMD_T_ACTIVE so that transport_generic_handle_data can be called
- * from HW target mode interrupt code. This is safe to be called
- * with transport_off=1 before the cmd->se_tfo->write_pending
- * because the se_cmd->se_lun pointer is not being cleared.
- */
- transport_cmd_check_stop(cmd, 1, 0);
-
- /*
- * Call the fabric write_pending function here to let the
- * frontend know that WRITE buffers are ready.
- */
- ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- else if (ret < 0)
- return ret;
-
- return 1;
-
-queue_full:
- pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
- return 0;
-}
+ int ret = 0;
-void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
-{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && cmd->se_tmr_req)
+ if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
transport_wait_for_tasks(cmd);
- transport_release_cmd(cmd);
+ ret = transport_release_cmd(cmd);
} else {
if (wait_for_tasks)
transport_wait_for_tasks(cmd);
-
- core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+ /*
+ * Handle WRITE failure case where transport_generic_new_cmd()
+ * has already added se_cmd to state_list, but fabric has
+ * failed command before I/O submission.
+ */
+ if (cmd->state_active) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ target_remove_from_state_list(cmd);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ }
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- transport_free_dev_tasks(cmd);
-
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
}
+ return ret;
}
EXPORT_SYMBOL(transport_generic_free_cmd);
@@ -3981,24 +2369,31 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
- bool ack_kref)
+int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ bool ack_kref)
{
unsigned long flags;
+ int ret = 0;
- kref_init(&se_cmd->cmd_kref);
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref == true)
+ if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
- se_cmd->check_release = 1;
+out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(target_get_sess_cmd);
@@ -4006,21 +2401,19 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
- unsigned long flags;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- WARN_ON(1);
+ spin_unlock(&se_sess->sess_cmd_lock);
+ se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
complete(&se_cmd->cmd_wait_comp);
return;
}
list_del(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@@ -4031,24 +2424,31 @@ static void target_release_cmd_kref(struct kref *kref)
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
- return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+ if (!se_sess) {
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+ }
+ return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+ &se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_put_sess_cmd);
-/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
- * @se_sess: session to split
+/* target_sess_cmd_list_set_waiting - Flag all commands in
+ * sess_cmd_list to complete cmd_wait_comp. Set
+ * sess_tearing_down so no more commands are queued.
+ * @se_sess: session to flag
*/
-void target_splice_sess_cmd_list(struct se_session *se_sess)
+void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
struct se_cmd *se_cmd;
unsigned long flags;
- WARN_ON(!list_empty(&se_sess->sess_wait_list));
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
-
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return;
+ }
se_sess->sess_tearing_down = 1;
-
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
@@ -4056,18 +2456,15 @@ void target_splice_sess_cmd_list(struct se_session *se_sess)
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
-EXPORT_SYMBOL(target_splice_sess_cmd_list);
+EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/* target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
- * @wait_for_tasks: Make extra transport_wait_for_tasks call
*/
-void target_wait_for_sess_cmds(
- struct se_session *se_sess,
- int wait_for_tasks)
+void target_wait_for_sess_cmds(struct se_session *se_sess)
{
struct se_cmd *se_cmd, *tmp_cmd;
- bool rc = false;
+ unsigned long flags;
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
@@ -4077,189 +2474,38 @@ void target_wait_for_sess_cmds(
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
- if (wait_for_tasks) {
- pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- rc = transport_wait_for_tasks(se_cmd);
-
- pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
- }
-
- if (!rc) {
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
- }
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
se_cmd->se_tfo->release_cmd(se_cmd);
}
-}
-EXPORT_SYMBOL(target_wait_for_sess_cmds);
-
-/* transport_lun_wait_for_tasks():
- *
- * Called from ConfigFS context to stop the passed struct se_cmd to allow
- * an struct se_lun to be successfully shutdown.
- */
-static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
-{
- unsigned long flags;
- int ret;
- /*
- * If the frontend has already requested this struct se_cmd to
- * be stopped, we can safely ignore this struct se_cmd.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_STOP) {
- cmd->transport_state &= ~CMD_T_LUN_STOP;
-
- pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
- cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, 1, 0);
- return -EPERM;
- }
- cmd->transport_state |= CMD_T_LUN_FE_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
- ret = transport_stop_tasks_for_cmd(cmd);
-
- pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
- " %d\n", cmd, cmd->t_task_list_num, ret);
- if (!ret) {
- pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- wait_for_completion(&cmd->transport_lun_stop_comp);
- pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- }
- transport_remove_cmd_from_queue(cmd);
-
- return 0;
-}
-
-static void __transport_clear_lun_from_sessions(struct se_lun *lun)
-{
- struct se_cmd *cmd = NULL;
- unsigned long lun_flags, cmd_flags;
- /*
- * Do exception processing and return CHECK_CONDITION status to the
- * Initiator Port.
- */
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty(&lun->lun_cmd_list)) {
- cmd = list_first_entry(&lun->lun_cmd_list,
- struct se_cmd, se_lun_node);
- list_del_init(&cmd->se_lun_node);
- /*
- * This will notify iscsi_target_transport.c:
- * transport_cmd_check_stop() that a LUN shutdown is in
- * progress for the iscsi_cmd_t.
- */
- spin_lock(&cmd->t_state_lock);
- pr_debug("SE_LUN[%d] - Setting cmd->transport"
- "_lun_stop for ITT: 0x%08x\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state |= CMD_T_LUN_STOP;
- spin_unlock(&cmd->t_state_lock);
-
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-
- if (!cmd->se_lun) {
- pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- /*
- * If the Storage engine still owns the iscsi_cmd_t, determine
- * and/or stop its context.
- */
- pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
-
- pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
- "_wait_for_tasks(): SUCCESS\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- goto check_cond;
- }
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-
- transport_free_dev_tasks(cmd);
- /*
- * The Storage engine stopped this struct se_cmd before it was
- * send to the fabric frontend for delivery back to the
- * Initiator Node. Return this SCSI CDB back with an
- * CHECK_CONDITION status.
- */
-check_cond:
- transport_send_check_condition_and_sense(cmd,
- TCM_NON_EXISTENT_LUN, 0);
- /*
- * If the fabric frontend is waiting for this iscsi_cmd_t to
- * be released, notify the waiting thread now that LU has
- * finished accessing it.
- */
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
- pr_debug("SE_LUN[%d] - Detected FE stop for"
- " struct se_cmd: %p ITT: 0x%08x\n",
- lun->unpacked_lun,
- cmd, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock,
- cmd_flags);
- transport_cmd_check_stop(cmd, 1, 0);
- complete(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
- pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- }
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
-static int transport_clear_lun_thread(void *p)
+static int transport_clear_lun_ref_thread(void *p)
{
struct se_lun *lun = p;
- __transport_clear_lun_from_sessions(lun);
+ percpu_ref_kill(&lun->lun_ref);
+
+ wait_for_completion(&lun->lun_ref_comp);
complete(&lun->lun_shutdown_comp);
return 0;
}
-int transport_clear_lun_from_sessions(struct se_lun *lun)
+int transport_clear_lun_ref(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, lun,
+ kt = kthread_run(transport_clear_lun_ref_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
pr_err("Unable to start clear_lun thread\n");
@@ -4282,58 +2528,19 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
- * has been set in transport_set_supported_SAM_opcode().
- */
- if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * If we are already stopped due to an external event (ie: LUN shutdown)
- * sleep until the connection can have the passed struct se_cmd back.
- * The cmd->transport_lun_stopped_sem will be upped by
- * transport_clear_lun_from_sessions() once the ConfigFS context caller
- * has completed its operation on the struct se_cmd.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("wait_for_tasks: Stopping"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe"
- "_stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- /*
- * There is a special case for WRITES where a FE exception +
- * LUN shutdown means ConfigFS context is still sleeping on
- * transport_lun_stop_comp in transport_lun_wait_for_tasks().
- * We go ahead and up transport_lun_stop_comp just to be sure
- * here.
- */
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->transport_lun_stop_comp);
- wait_for_completion(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- transport_all_task_dev_remove_state(cmd);
- /*
- * At this point, the frontend who was the originator of this
- * struct se_cmd, now owns the structure and can be released through
- * normal means below.
- */
- pr_debug("wait_for_tasks: Stopped"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
- "stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state &= ~CMD_T_LUN_STOP;
- }
-
- if (!(cmd->transport_state & CMD_T_ACTIVE) ||
- (cmd->transport_state & CMD_T_ABORTED)) {
+ if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
@@ -4347,14 +2554,12 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
wait_for_completion(&cmd->t_transport_stop_comp);
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
- pr_debug("wait_for_tasks: Stopped wait_for_compltion("
+ pr_debug("wait_for_tasks: Stopped wait_for_completion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd));
@@ -4375,25 +2580,25 @@ static int transport_get_sense_codes(
return 0;
}
-static int transport_set_sense_codes(
- struct se_cmd *cmd,
- u8 asc,
- u8 ascq)
+static
+void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
{
- cmd->scsi_asc = asc;
- cmd->scsi_ascq = ascq;
+ /* Place failed LBA in sense data information descriptor 0. */
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
+ buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
+ buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
+ buffer[SPC_VALIDITY_OFFSET] = 0x80;
- return 0;
+ /* Descriptor Information: failing sector */
+ put_unaligned_be64(bad_sector, &buffer[12]);
}
-int transport_send_check_condition_and_sense(
- struct se_cmd *cmd,
- u8 reason,
- int from_transport)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ sense_reason_t reason, int from_transport)
{
unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;
- int offset;
u8 asc = 0, ascq = 0;
spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -4409,157 +2614,225 @@ int transport_send_check_condition_and_sense(
if (!from_transport)
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- /*
- * Data Segment and SenseLength of the fabric response PDU.
- *
- * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
- * from include/scsi/scsi_cmnd.h
- */
- offset = cmd->se_tfo->set_fabric_sense_len(cmd,
- TRANSPORT_SENSE_BUFFER);
+
/*
* Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
* SENSE KEY values from include/scsi/scsi.h
*/
switch (reason) {
+ case TCM_NO_SENSE:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* Not Ready */
+ buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ /* NO ADDITIONAL SENSE INFORMATION */
+ buffer[SPC_ASC_KEY_OFFSET] = 0;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0;
+ break;
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT NOT SUPPORTED */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x25;
break;
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID COMMAND OPERATION CODE */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x20;
break;
case TCM_UNKNOWN_MODE_PAGE:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x24;
break;
case TCM_CHECK_CONDITION_ABORT_CMD:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* BUS DEVICE RESET FUNCTION OCCURRED */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x29;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
break;
case TCM_INCORRECT_AMOUNT_OF_DATA:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
/* NOT ENOUGH UNSOLICITED DATA */
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
break;
case TCM_INVALID_CDB_FIELD:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x24;
break;
case TCM_INVALID_PARAMETER_LIST:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN PARAMETER LIST */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x26;
+ break;
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* PARAMETER LIST LENGTH ERROR */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
break;
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
/* UNEXPECTED_UNSOLICITED_DATA */
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
break;
case TCM_SERVICE_CRC_ERROR:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* PROTOCOL SERVICE CRC ERROR */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x47;
/* N/A */
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
break;
case TCM_SNACK_REJECTED:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* READ ERROR */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x11;
/* FAILED RETRANSMISSION REQUEST */
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
break;
case TCM_WRITE_PROTECTED:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* DATA PROTECT */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
/* WRITE PROTECTED */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
+ case TCM_ADDRESS_OUT_OF_RANGE:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x21;
break;
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* UNIT ATTENTION */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ buffer[SPC_ASC_KEY_OFFSET] = asc;
+ buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
break;
case TCM_CHECK_CONDITION_NOT_READY:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* Not Ready */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
transport_get_sense_codes(cmd, &asc, &ascq);
- buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
- buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ buffer[SPC_ASC_KEY_OFFSET] = asc;
+ buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
+ break;
+ case TCM_MISCOMPARE_VERIFY:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
+ /* MISCOMPARE DURING VERIFY OPERATION */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
+ break;
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK GUARD CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
+ transport_err_sector_info(buffer, cmd->bad_sector);
break;
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
default:
/* CURRENT ERROR */
- buffer[offset] = 0x70;
- buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /*
+ * Returning ILLEGAL REQUEST would cause immediate IO errors on
+ * Solaris initiators. Returning NOT READY instead means the
+ * operations will be retried a finite number of times and we
+ * can survive intermittent errors.
+ */
+ buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
/* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+ buffer[SPC_ASC_KEY_OFFSET] = 0x08;
break;
}
/*
@@ -4570,32 +2843,35 @@ int transport_send_check_condition_and_sense(
* Automatically padded, this value is encoded in the fabric's
* data_length response PDU containing the SCSI defined sense data.
*/
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
after_reason:
+ trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
- int ret = 0;
+ if (!(cmd->transport_state & CMD_T_ABORTED))
+ return 0;
- if (cmd->transport_state & CMD_T_ABORTED) {
- if (!send_status ||
- (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
- return 1;
-#if 0
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
- " status for CDB: 0x%02x ITT: 0x%08x\n",
- cmd->t_task_cdb[0],
- cmd->se_tfo->get_task_tag(cmd));
-#endif
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- cmd->se_tfo->queue_status(cmd);
- ret = 1;
- }
- return ret;
+ /*
+ * If cmd has been aborted but either no status is to be sent or it has
+ * already been sent, just return
+ */
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+ return 1;
+
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+ cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ trace_target_cmd_complete(cmd);
+ cmd->se_tfo->queue_status(cmd);
+
+ return 1;
}
EXPORT_SYMBOL(transport_check_aborted_status);
@@ -4604,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
@@ -4619,27 +2895,33 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
- smp_mb__after_atomic_inc();
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ smp_mb__after_atomic();
+ return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-#if 0
+
+ transport_lun_remove_cmd(cmd);
+
pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
" ITT: 0x%08x\n", cmd->t_task_cdb[0],
cmd->se_tfo->get_task_tag(cmd));
-#endif
+
+ trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
}
-static int transport_generic_do_tmr(struct se_cmd *cmd)
+static void target_tmr_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
switch (tmr->function) {
case TMR_ABORT_TASK:
- tmr->response = TMR_FUNCTION_REJECTED;
+ core_tmr_abort_task(dev, tmr, cmd->se_sess);
break;
case TMR_ABORT_TASK_SET:
case TMR_CLEAR_ACA:
@@ -4668,80 +2950,19 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop_to_fabric(cmd);
- return 0;
}
-/* transport_processing_thread():
- *
- *
- */
-static int transport_processing_thread(void *param)
+int transport_generic_handle_tmr(
+ struct se_cmd *cmd)
{
- int ret;
- struct se_cmd *cmd;
- struct se_device *dev = param;
-
- while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
- atomic_read(&dev->dev_queue_obj.queue_cnt) ||
- kthread_should_stop());
- if (ret < 0)
- goto out;
-
-get_cmd:
- cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
- if (!cmd)
- continue;
-
- switch (cmd->t_state) {
- case TRANSPORT_NEW_CMD:
- BUG();
- break;
- case TRANSPORT_NEW_CMD_MAP:
- if (!cmd->se_tfo->new_cmd_map) {
- pr_err("cmd->se_tfo->new_cmd_map is"
- " NULL for TRANSPORT_NEW_CMD_MAP\n");
- BUG();
- }
- ret = cmd->se_tfo->new_cmd_map(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- break;
- case TRANSPORT_PROCESS_WRITE:
- transport_generic_process_write(cmd);
- break;
- case TRANSPORT_PROCESS_TMR:
- transport_generic_do_tmr(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_WP:
- transport_write_pending_qf(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_OK:
- transport_complete_qf(cmd);
- break;
- default:
- pr_err("Unknown t_state: %d for ITT: 0x%08x "
- "i_state: %d on SE LUN: %u\n",
- cmd->t_state,
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->se_lun->unpacked_lun);
- BUG();
- }
+ unsigned long flags;
- goto get_cmd;
- }
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->transport_state |= CMD_T_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-out:
- WARN_ON(!list_empty(&dev->state_task_list));
- WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
- dev->process_thread = NULL;
+ INIT_WORK(&cmd->work, target_tmr_work);
+ queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 3e12f6bcfa1..101858e245b 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -3,8 +3,7 @@
*
* This file contains logic for SPC-3 Unit Attention emulation
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -38,9 +37,8 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-int core_scsi3_ua_check(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
@@ -53,7 +51,7 @@ int core_scsi3_ua_check(
if (!nacl)
return 0;
- deve = &nacl->device_list[cmd->orig_fe_lun];
+ deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count))
return 0;
/*
@@ -71,16 +69,14 @@ int core_scsi3_ua_check(
* was received, then the device server shall process the command
* and either:
*/
- switch (cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
- return -EINVAL;
+ return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
-
- return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -102,7 +98,6 @@ int core_scsi3_ua_allocate(
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
@@ -110,7 +105,7 @@ int core_scsi3_ua_allocate(
ua->ua_ascq = ascq;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[unpacked_lun];
+ deve = nacl->device_list[unpacked_lun];
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
@@ -167,7 +162,7 @@ int core_scsi3_ua_allocate(
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -180,7 +175,7 @@ int core_scsi3_ua_allocate(
asc, ascq);
atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return 0;
}
@@ -195,7 +190,7 @@ void core_scsi3_ua_release_all(
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
}
@@ -220,7 +215,7 @@ void core_scsi3_ua_for_check_condition(
return;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[cmd->orig_fe_lun];
+ deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
@@ -237,7 +232,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -256,7 +251,7 @@ void core_scsi3_ua_for_check_condition(
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
@@ -265,8 +260,8 @@ void core_scsi3_ua_for_check_condition(
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
@@ -289,7 +284,7 @@ int core_scsi3_ua_clear_for_request_sense(
return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
- deve = &nacl->device_list[cmd->orig_fe_lun];
+ deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return -EPERM;
@@ -315,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense(
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 6e6b03460a1..be912b36daa 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -19,14 +19,14 @@
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
-#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
extern struct kmem_cache *se_ua_cache;
-extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 00000000000..e9186cdf35e
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1081 @@
+/*******************************************************************************
+ * Filename: target_core_xcopy.c
+ *
+ * This file contains support for SPC-4 Extended-Copy offload with generic
+ * TCM backends.
+ *
+ * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
+ *
+ * Author:
+ * Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static struct workqueue_struct *xcopy_wq = NULL;
+/*
+ * From target_core_device.c
+ */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+/*
+ * From target_core_configfs.c
+ */
+extern struct configfs_subsystem *target_core_subsystem[];
+
+static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
+{
+ int off = 0;
+
+ buf[off++] = (0x6 << 4);
+ buf[off++] = 0x01;
+ buf[off++] = 0x40;
+ buf[off] = (0x5 << 4);
+
+ spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ return 0;
+}
+
+static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ bool src)
+{
+ struct se_device *se_dev;
+ struct configfs_subsystem *subsys = target_core_subsystem[0];
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+ int rc;
+
+ if (src)
+ dev_wwn = &xop->dst_tid_wwn[0];
+ else
+ dev_wwn = &xop->src_tid_wwn[0];
+
+ mutex_lock(&g_device_mutex);
+ list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+
+ if (!se_dev->dev_attrib.emulate_3pc)
+ continue;
+
+ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+ if (rc != 0)
+ continue;
+
+ if (src) {
+ xop->dst_dev = se_dev;
+ pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
+ " se_dev\n", xop->dst_dev);
+ } else {
+ xop->src_dev = se_dev;
+ pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
+ " se_dev\n", xop->src_dev);
+ }
+
+ rc = configfs_depend_item(subsys,
+ &se_dev->dev_group.cg_item);
+ if (rc != 0) {
+ pr_err("configfs_depend_item attempt failed:"
+ " %d for se_dev: %p\n", rc, se_dev);
+ mutex_unlock(&g_device_mutex);
+ return rc;
+ }
+
+ pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
+ " se_dev->se_dev_group: %p\n", subsys, se_dev,
+ &se_dev->dev_group);
+
+ mutex_unlock(&g_device_mutex);
+ return 0;
+ }
+ mutex_unlock(&g_device_mutex);
+
+ pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ unsigned char *p, bool src)
+{
+ unsigned char *desc = p;
+ unsigned short ript;
+ u8 desig_len;
+ /*
+ * Extract RELATIVE INITIATOR PORT IDENTIFIER
+ */
+ ript = get_unaligned_be16(&desc[2]);
+ pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
+ /*
+ * Check for supported code set, association, and designator type
+ */
+ if ((desc[4] & 0x0f) != 0x1) {
+ pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x30) != 0x00) {
+ pr_err("XCOPY 0xe4: association other than LUN not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x0f) != 0x3) {
+ pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
+ (desc[5] & 0x0f));
+ return -EINVAL;
+ }
+ /*
+ * Check for matching 16 byte length for NAA IEEE Registered Extended
+ * Assigned designator
+ */
+ desig_len = desc[7];
+ if (desig_len != 16) {
+ pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
+ return -EINVAL;
+ }
+ pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
+ /*
+ * Check for NAA IEEE Registered Extended Assigned header..
+ */
+ if ((desc[8] & 0xf0) != 0x60) {
+ pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
+ (desc[8] & 0xf0));
+ return -EINVAL;
+ }
+
+ if (src) {
+ memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the source designator matches the local device
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_SOURCE_RECV_OP;
+ xop->src_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
+ " received xop\n", xop->src_dev);
+ }
+ } else {
+ memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the destination designator matches the local device
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_DEST_RECV_OP;
+ xop->dst_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
+ " received xop\n", xop->dst_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+ unsigned short tdll)
+{
+ struct se_device *local_dev = se_cmd->se_dev;
+ unsigned char *desc = p;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+ unsigned short start = 0;
+ bool src = true;
+
+ if (offset != 0) {
+ pr_err("XCOPY target descriptor list length is not"
+ " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ return -EINVAL;
+ }
+ if (tdll > 64) {
+ pr_err("XCOPY target descriptor supports a maximum"
+ " two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ return -EINVAL;
+ }
+ /*
+ * Generate an IEEE Registered Extended designator based upon the
+ * se_device the XCOPY was received upon..
+ */
+ memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+
+ while (start < tdll) {
+ /*
+ * Check target descriptor identification with 0xE4 type with
+ * use VPD 0x83 WWPN matching ..
+ */
+ switch (desc[0]) {
+ case 0xe4:
+ rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+ &desc[0], src);
+ if (rc != 0)
+ goto out;
+ /*
+ * Assume target descriptors are in source -> destination order..
+ */
+ if (src)
+ src = false;
+ else
+ src = true;
+ start += XCOPY_TARGET_DESC_LEN;
+ desc += XCOPY_TARGET_DESC_LEN;
+ ret++;
+ break;
+ default:
+ pr_err("XCOPY unsupported descriptor type code:"
+ " 0x%02x\n", desc[0]);
+ goto out;
+ }
+ }
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+ else
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+
+ if (rc < 0)
+ goto out;
+
+ pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->src_dev, &xop->src_tid_wwn[0]);
+ pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->dst_dev, &xop->dst_tid_wwn[0]);
+
+ return ret;
+
+out:
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ unsigned char *p)
+{
+ unsigned char *desc = p;
+ int dc = (desc[1] & 0x02);
+ unsigned short desc_len;
+
+ desc_len = get_unaligned_be16(&desc[2]);
+ if (desc_len != 0x18) {
+ pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
+ " %hu\n", desc_len);
+ return -EINVAL;
+ }
+
+ xop->stdi = get_unaligned_be16(&desc[4]);
+ xop->dtdi = get_unaligned_be16(&desc[6]);
+ pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
+ desc_len, xop->stdi, xop->dtdi, dc);
+
+ xop->nolb = get_unaligned_be16(&desc[10]);
+ xop->src_lba = get_unaligned_be64(&desc[12]);
+ xop->dst_lba = get_unaligned_be64(&desc[20]);
+ pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
+ xop->nolb, (unsigned long long)xop->src_lba,
+ (unsigned long long)xop->dst_lba);
+
+ if (dc != 0) {
+ xop->dbl = (desc[29] & 0xff) << 16;
+ xop->dbl |= (desc[30] & 0xff) << 8;
+ xop->dbl |= desc[31] & 0xff;
+
+ pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
+ }
+ return 0;
+}
+
+static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+ unsigned int sdll)
+{
+ unsigned char *desc = p;
+ unsigned int start = 0;
+ int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+
+ if (offset != 0) {
+ pr_err("XCOPY segment descriptor list length is not"
+ " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ return -EINVAL;
+ }
+
+ while (start < sdll) {
+ /*
+ * Check segment descriptor type code for block -> block
+ */
+ switch (desc[0]) {
+ case 0x02:
+ rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+ if (rc < 0)
+ goto out;
+
+ ret++;
+ start += XCOPY_SEGMENT_DESC_LEN;
+ desc += XCOPY_SEGMENT_DESC_LEN;
+ break;
+ default:
+ pr_err("XCOPY unspported segment descriptor"
+ "type: 0x%02x\n", desc[0]);
+ goto out;
+ }
+ }
+
+ return ret;
+
+out:
+ return -EINVAL;
+}
+
+/*
+ * Start xcopy_pt ops
+ */
+
+struct xcopy_pt_cmd {
+ bool remote_port;
+ struct se_cmd se_cmd;
+ struct xcopy_op *xcopy_op;
+ struct completion xpt_passthrough_sem;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+};
+
+static struct se_port xcopy_pt_port;
+static struct se_portal_group xcopy_pt_tpg;
+static struct se_session xcopy_pt_sess;
+static struct se_node_acl xcopy_pt_nacl;
+
+static char *xcopy_pt_get_fabric_name(void)
+{
+ return "xcopy-pt";
+}
+
+static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+{
+ struct configfs_subsystem *subsys = target_core_subsystem[0];
+ struct se_device *remote_dev;
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+ remote_dev = xop->dst_dev;
+ else
+ remote_dev = xop->src_dev;
+
+ pr_debug("Calling configfs_undepend_item for subsys: %p"
+ " remote_dev: %p remote_dev->dev_group: %p\n",
+ subsys, remote_dev, &remote_dev->dev_group.cg_item);
+
+ configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+}
+
+static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ kfree(xpt_cmd);
+}
+
+static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ complete(&xpt_cmd->xpt_passthrough_sem);
+ return 0;
+}
+
+static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static struct target_core_fabric_ops xcopy_pt_tfo = {
+ .get_fabric_name = xcopy_pt_get_fabric_name,
+ .get_task_tag = xcopy_pt_get_tag,
+ .get_cmd_state = xcopy_pt_get_cmd_state,
+ .release_cmd = xcopy_pt_release_cmd,
+ .check_stop_free = xcopy_pt_check_stop_free,
+ .write_pending = xcopy_pt_write_pending,
+ .write_pending_status = xcopy_pt_write_pending_status,
+ .queue_data_in = xcopy_pt_queue_data_in,
+ .queue_status = xcopy_pt_queue_status,
+};
+
+/*
+ * End xcopy_pt_ops
+ */
+
+int target_xcopy_setup_pt(void)
+{
+ xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ if (!xcopy_wq) {
+ pr_err("Unable to allocate xcopy_wq\n");
+ return -ENOMEM;
+ }
+
+ memset(&xcopy_pt_port, 0, sizeof(struct se_port));
+ INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
+ INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
+ mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
+
+ memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
+ INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
+ INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
+ INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
+
+ xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
+ xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
+
+ memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+ memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+ INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
+ INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+
+ xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+
+ xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+ return 0;
+}
+
+void target_xcopy_release_pt(void)
+{
+ if (xcopy_wq)
+ destroy_workqueue(xcopy_wq);
+}
+
+static void target_xcopy_setup_pt_port(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ bool remote_port)
+{
+ struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
+ /*
+ * Honor destination port reservations for X-COPY PUSH emulation
+ * when CDB is received on local source port, and READs blocks to
+ * WRITE on remote destination port.
+ */
+ if (remote_port) {
+ xpt_cmd->remote_port = remote_port;
+ pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+ pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
+ " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
+ pt_cmd->se_lun->lun_sep);
+ } else {
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pt_cmd->se_dev = ec_cmd->se_dev;
+
+ pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
+ " %p\n", pt_cmd->se_dev);
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
+ pt_cmd->se_lun);
+ }
+ } else {
+ /*
+ * Honor source port reservation for X-COPY PULL emulation
+ * when CDB is received on local desintation port, and READs
+ * blocks from the remote source port to WRITE on local
+ * destination port.
+ */
+ if (remote_port) {
+ xpt_cmd->remote_port = remote_port;
+ pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+ pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
+ " cmd->se_lun->lun_sep for X-COPY data PULL\n",
+ pt_cmd->se_lun->lun_sep);
+ } else {
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pt_cmd->se_dev = ec_cmd->se_dev;
+
+ pr_debug("Honoring local DST port from ec_cmd->se_dev:"
+ " %p\n", pt_cmd->se_dev);
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
+ pt_cmd->se_lun);
+ }
+ }
+}
+
+static int target_xcopy_init_pt_lun(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ struct se_device *se_dev,
+ struct se_cmd *pt_cmd,
+ bool remote_port)
+{
+ /*
+ * Don't allocate + init an pt_cmd->se_lun if honoring local port for
+ * reservations. The pt_cmd->se_lun pointer will be setup from within
+ * target_xcopy_setup_pt_port()
+ */
+ if (!remote_port) {
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+ return 0;
+ }
+
+ pt_cmd->se_lun = &se_dev->xcopy_lun;
+ pt_cmd->se_dev = se_dev;
+
+ pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+
+ pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
+ pt_cmd->se_lun->lun_se_dev);
+
+ return 0;
+}
+
+static int target_xcopy_setup_pt_cmd(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ struct se_device *se_dev,
+ unsigned char *cdb,
+ bool remote_port,
+ bool alloc_mem)
+{
+ struct se_cmd *cmd = &xpt_cmd->se_cmd;
+ sense_reason_t sense_rc;
+ int ret = 0, rc;
+ /*
+ * Setup LUN+port to honor reservations based upon xop->op_origin for
+ * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
+ */
+ rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ xpt_cmd->xcopy_op = xop;
+ target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+
+ sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
+ if (sense_rc) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (alloc_mem) {
+ rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+ cmd->data_length, false);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ /*
+ * Set this bit so that transport_free_pages() allows the
+ * caller to release SGLs + physical memory allocated by
+ * transport_generic_get_mem()..
+ */
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ } else {
+ /*
+ * Here the previously allocated SGLs for the internal READ
+ * are mapped zero-copy to the internal WRITE.
+ */
+ sense_rc = transport_generic_map_mem_to_cmd(cmd,
+ xop->xop_data_sg, xop->xop_data_nents,
+ NULL, 0);
+ if (sense_rc) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
+{
+ struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
+ sense_reason_t sense_rc;
+
+ sense_rc = transport_generic_new_cmd(se_cmd);
+ if (sense_rc)
+ return -EINVAL;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ target_execute_cmd(se_cmd);
+
+ wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
+
+ pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
+ se_cmd->scsi_status);
+
+ return (se_cmd->scsi_status) ? -EINVAL : 0;
+}
+
+static int target_xcopy_read_source(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *src_dev,
+ sector_t src_lba,
+ u32 src_sectors)
+{
+ struct xcopy_pt_cmd *xpt_cmd;
+ struct se_cmd *se_cmd;
+ u32 length = (src_sectors * src_dev->dev_attrib.block_size);
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+
+ xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+ if (!xpt_cmd) {
+ pr_err("Unable to allocate xcopy_pt_cmd\n");
+ return -ENOMEM;
+ }
+ init_completion(&xpt_cmd->xpt_passthrough_sem);
+ se_cmd = &xpt_cmd->se_cmd;
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = READ_16;
+ put_unaligned_be64(src_lba, &cdb[2]);
+ put_unaligned_be32(src_sectors, &cdb[10]);
+ pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+ (unsigned long long)src_lba, src_sectors, length);
+
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+ xop->src_pt_cmd = xpt_cmd;
+
+ rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port, true);
+ if (rc < 0) {
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ xop->xop_data_sg = se_cmd->t_data_sg;
+ xop->xop_data_nents = se_cmd->t_data_nents;
+ pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
+ " memory\n", xop->xop_data_sg, xop->xop_data_nents);
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+ /*
+ * Clear off the allocated t_data_sg, that has been saved for
+ * zero-copy WRITE submission reuse in struct xcopy_op..
+ */
+ se_cmd->t_data_sg = NULL;
+ se_cmd->t_data_nents = 0;
+
+ return 0;
+}
+
+static int target_xcopy_write_destination(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *dst_dev,
+ sector_t dst_lba,
+ u32 dst_sectors)
+{
+ struct xcopy_pt_cmd *xpt_cmd;
+ struct se_cmd *se_cmd;
+ u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+
+ xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+ if (!xpt_cmd) {
+ pr_err("Unable to allocate xcopy_pt_cmd\n");
+ return -ENOMEM;
+ }
+ init_completion(&xpt_cmd->xpt_passthrough_sem);
+ se_cmd = &xpt_cmd->se_cmd;
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = WRITE_16;
+ put_unaligned_be64(dst_lba, &cdb[2]);
+ put_unaligned_be32(dst_sectors, &cdb[10]);
+ pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+ (unsigned long long)dst_lba, dst_sectors, length);
+
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+ xop->dst_pt_cmd = xpt_cmd;
+
+ rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
+ remote_port, false);
+ if (rc < 0) {
+ struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ /*
+ * If the failure happened before the t_mem_list hand-off in
+ * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+ * core releases this memory on error during X-COPY WRITE I/O.
+ */
+ src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ src_cmd->t_data_sg = xop->xop_data_sg;
+ src_cmd->t_data_nents = xop->xop_data_nents;
+
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
+ se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void target_xcopy_do_work(struct work_struct *work)
+{
+ struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
+ struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
+ struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
+ unsigned int max_sectors;
+ int rc;
+ unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
+
+ end_lba = src_lba + nolb;
+ /*
+ * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
+ * smallest max_sectors between src_dev + dev_dev, or
+ */
+ max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
+ dst_dev->dev_attrib.hw_max_sectors);
+ max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
+
+ max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
+
+ pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
+ nolb, max_nolb, (unsigned long long)end_lba);
+ pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+
+ while (src_lba < end_lba) {
+ cur_nolb = min(nolb, max_nolb);
+
+ pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
+ " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
+
+ rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
+ if (rc < 0)
+ goto out;
+
+ src_lba += cur_nolb;
+ pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
+ (unsigned long long)src_lba);
+
+ pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
+ " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
+
+ rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+ dst_lba, cur_nolb);
+ if (rc < 0) {
+ transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ goto out;
+ }
+
+ dst_lba += cur_nolb;
+ pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
+ (unsigned long long)dst_lba);
+
+ copied_nolb += cur_nolb;
+ nolb -= cur_nolb;
+
+ transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
+ }
+
+ xcopy_pt_undepend_remotedev(xop);
+ kfree(xop);
+
+ pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+ pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
+ copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
+
+ pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
+ target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+ return;
+
+out:
+ xcopy_pt_undepend_remotedev(xop);
+ kfree(xop);
+
+ pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+}
+
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+ struct se_device *dev = se_cmd->se_dev;
+ struct xcopy_op *xop = NULL;
+ unsigned char *p = NULL, *seg_desc;
+ unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+ sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
+ int rc;
+ unsigned short tdll;
+
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_err("EXTENDED_COPY operation explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ sa = se_cmd->t_task_cdb[1] & 0x1f;
+ if (sa != 0x00) {
+ pr_err("EXTENDED_COPY(LID4) not supported\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop) {
+ pr_err("Unable to allocate xcopy_op\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+ xop->xop_se_cmd = se_cmd;
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+ kfree(xop);
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ list_id = p[0];
+ list_id_usage = (p[1] & 0x18) >> 3;
+
+ /*
+ * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
+ */
+ tdll = get_unaligned_be16(&p[2]);
+ sdll = get_unaligned_be32(&p[8]);
+
+ inline_dl = get_unaligned_be32(&p[12]);
+ if (inline_dl != 0) {
+ pr_err("XCOPY with non zero inline data length\n");
+ goto out;
+ }
+
+ pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
+ " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+ tdll, sdll, inline_dl);
+
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ if (rc <= 0)
+ goto out;
+
+ if (xop->src_dev->dev_attrib.block_size !=
+ xop->dst_dev->dev_attrib.block_size) {
+ pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+ " block_size: %u currently unsupported\n",
+ xop->src_dev->dev_attrib.block_size,
+ xop->dst_dev->dev_attrib.block_size);
+ xcopy_pt_undepend_remotedev(xop);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
+ pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+ rc * XCOPY_TARGET_DESC_LEN);
+ seg_desc = &p[16];
+ seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
+
+ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
+ if (rc <= 0) {
+ xcopy_pt_undepend_remotedev(xop);
+ goto out;
+ }
+ transport_kunmap_data_sg(se_cmd);
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+ INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+ queue_work(xcopy_wq, &xop->xop_work);
+ return TCM_NO_SENSE;
+
+out:
+ if (p)
+ transport_kunmap_data_sg(se_cmd);
+ kfree(xop);
+ return ret;
+}
+
+static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+{
+ unsigned char *p;
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg failed in"
+ " target_rcr_operating_parameters\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ if (se_cmd->data_length < 54) {
+ pr_err("Receive Copy Results Op Parameters length"
+ " too small: %u\n", se_cmd->data_length);
+ transport_kunmap_data_sg(se_cmd);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Set SNLID=1 (Supports no List ID)
+ */
+ p[4] = 0x1;
+ /*
+ * MAXIMUM TARGET DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
+ /*
+ * MAXIMUM SEGMENT DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
+ /*
+ * MAXIMUM DESCRIPTOR LIST LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
+ /*
+ * MAXIMUM SEGMENT LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
+ /*
+ * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
+ */
+ put_unaligned_be32(0x0, &p[20]);
+ /*
+ * HELD DATA LIMIT
+ */
+ put_unaligned_be32(0x0, &p[24]);
+ /*
+ * MAXIMUM STREAM DEVICE TRANSFER SIZE
+ */
+ put_unaligned_be32(0x0, &p[28]);
+ /*
+ * TOTAL CONCURRENT COPIES
+ */
+ put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
+ /*
+ * MAXIMUM CONCURRENT COPIES
+ */
+ p[36] = RCR_OP_MAX_CONCURR_COPIES;
+ /*
+ * DATA SEGMENT GRANULARITY (log 2)
+ */
+ p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
+ /*
+ * INLINE DATA GRANULARITY log 2)
+ */
+ p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
+ /*
+ * HELD DATA GRANULARITY
+ */
+ p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
+ /*
+ * IMPLEMENTED DESCRIPTOR LIST LENGTH
+ */
+ p[43] = 0x2;
+ /*
+ * List of implemented descriptor type codes (ordered)
+ */
+ p[44] = 0x02; /* Copy Block to Block device */
+ p[45] = 0xe4; /* Identification descriptor target descriptor */
+
+ /*
+ * AVAILABLE DATA (n-3)
+ */
+ put_unaligned_be32(42, &p[0]);
+
+ transport_kunmap_data_sg(se_cmd);
+ target_complete_cmd(se_cmd, GOOD);
+
+ return TCM_NO_SENSE;
+}
+
+sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
+{
+ unsigned char *cdb = &se_cmd->t_task_cdb[0];
+ int sa = (cdb[1] & 0x1f), list_id = cdb[2];
+ sense_reason_t rc = TCM_NO_SENSE;
+
+ pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
+ " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
+
+ if (list_id != 0) {
+ pr_err("Receive Copy Results with non zero list identifier"
+ " not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ switch (sa) {
+ case RCR_SA_OPERATING_PARAMETERS:
+ rc = target_rcr_operating_parameters(se_cmd);
+ break;
+ case RCR_SA_COPY_STATUS:
+ case RCR_SA_RECEIVE_DATA:
+ case RCR_SA_FAILED_SEGMENT_DETAILS:
+ default:
+ pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return rc;
+}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 00000000000..700a981c7b4
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,62 @@
+#define XCOPY_TARGET_DESC_LEN 32
+#define XCOPY_SEGMENT_DESC_LEN 28
+#define XCOPY_NAA_IEEE_REGEX_LEN 16
+#define XCOPY_MAX_SECTORS 1024
+
+enum xcopy_origin_list {
+ XCOL_SOURCE_RECV_OP = 0x01,
+ XCOL_DEST_RECV_OP = 0x02,
+};
+
+struct xcopy_pt_cmd;
+
+struct xcopy_op {
+ int op_origin;
+
+ struct se_cmd *xop_se_cmd;
+ struct se_device *src_dev;
+ unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct se_device *dst_dev;
+ unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+
+ sector_t src_lba;
+ sector_t dst_lba;
+ unsigned short stdi;
+ unsigned short dtdi;
+ unsigned short nolb;
+ unsigned int dbl;
+
+ struct xcopy_pt_cmd *src_pt_cmd;
+ struct xcopy_pt_cmd *dst_pt_cmd;
+
+ u32 xop_data_nents;
+ struct scatterlist *xop_data_sg;
+ struct work_struct xop_work;
+};
+
+/*
+ * Receive Copy Results Sevice Actions
+ */
+#define RCR_SA_COPY_STATUS 0x00
+#define RCR_SA_RECEIVE_DATA 0x01
+#define RCR_SA_OPERATING_PARAMETERS 0x03
+#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04
+
+/*
+ * Receive Copy Results defs for Operating Parameters
+ */
+#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2
+#define RCR_OP_MAX_SG_DESC_COUNT 0x1
+#define RCR_OP_MAX_DESC_LIST_LEN 1024
+#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */
+#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */
+#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */
+#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+
+extern int target_xcopy_setup_pt(void);
+extern void target_xcopy_release_pt(void);
+extern sense_reason_t target_do_xcopy(struct se_cmd *);
+extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index e05c55100ec..a0bcfd3e7e7 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -17,11 +17,12 @@
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
-#define FT_VERSION "0.3"
+#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
+#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
@@ -93,45 +94,43 @@ struct ft_lun {
*/
struct ft_tpg {
u32 index;
- struct ft_lport_acl *lport_acl;
+ struct ft_lport_wwn *lport_wwn;
struct ft_tport *tport; /* active tport or NULL */
- struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct workqueue_struct *workqueue;
};
-struct ft_lport_acl {
+struct ft_lport_wwn {
u64 wwpn;
char name[FT_NAMELEN];
- struct list_head list;
- struct list_head tpg_list;
- struct se_wwn fc_lport_wwn;
+ struct list_head ft_wwn_node;
+ struct ft_tpg *tpg;
+ struct se_wwn se_wwn;
};
/*
* Commands
*/
struct ft_cmd {
- u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
struct fc_frame *req_frame;
- unsigned char *cdb; /* pointer to CDB inside frame */
u32 write_data_len; /* data received on writes */
struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
+ u32 aborted:1; /* Set if aborted by reset or timeout */
struct scatterlist *sg; /* Set only if DDP is setup */
u32 sg_cnt; /* No. of item in scatterlist */
};
-extern struct list_head ft_lport_list;
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
+extern unsigned int ft_debug_logging;
/*
* Fabric methods.
@@ -143,11 +142,8 @@ extern struct target_fabric_configfs *ft_configfs;
void ft_sess_put(struct ft_sess *);
int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
-void ft_sess_stop(struct se_session *, int, int);
-int ft_sess_logged_in(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
-void ft_sess_set_erl0(struct se_session *);
void ft_lport_add(struct fc_lport *, void *);
void ft_lport_del(struct fc_lport *, void *);
@@ -164,8 +160,8 @@ int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
-int ft_queue_tm_resp(struct se_cmd *);
-int ft_is_state_remove(struct se_cmd *);
+void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
/*
* other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 9e7e26c74c7..be0c0d08c56 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -29,6 +28,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -48,7 +48,7 @@
/*
* Dump cmd state for debugging.
*/
-void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
@@ -59,9 +59,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
se_cmd = &cmd->se_cmd;
pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->sess, cmd->seq, se_cmd);
- pr_debug("%s: cmd %p cdb %p\n",
- caller, cmd, cmd->cdb);
- pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_data_nents,
@@ -81,24 +78,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
}
- print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
- 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
+}
+
+void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+ if (unlikely(ft_debug_logging))
+ _ft_dump_cmd(cmd, caller);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
+ struct ft_sess *sess;
if (!cmd)
return;
+ sess = cmd->sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
- ft_sess_put(cmd->sess); /* undo get from lookup at recv */
- kfree(cmd);
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
@@ -125,16 +128,20 @@ int ft_queue_status(struct se_cmd *se_cmd)
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
+ int rc;
+ if (cmd->aborted)
+ return 0;
ft_dump_cmd(cmd, __func__);
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
- /* XXX shouldn't just drop it - requeue and retry? */
- return 0;
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
}
+
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
@@ -165,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd)
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ if (rc) {
+ pr_info_ratelimited("%s: Failed to send response frame %p, "
+ "xid <0x%x>\n", __func__, fp, ep->xid);
+ /*
+ * Generate a TASK_SET_FULL status to notify the initiator
+ * to reduce it's queue_depth after the se_cmd response has
+ * been re-queued by target-core.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
+ }
lport->tt.exch_done(cmd->seq);
return 0;
}
@@ -192,6 +210,8 @@ int ft_write_pending(struct se_cmd *se_cmd)
ft_dump_cmd(cmd, __func__);
+ if (cmd->aborted)
+ return 0;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
@@ -216,20 +236,10 @@ int ft_write_pending(struct se_cmd *se_cmd)
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
- if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- /*
- * cmd may have been broken up into multiple
- * tasks. Link their sgs together so we can
- * operate on them all at once.
- */
- transport_do_task_sg_chain(se_cmd);
- cmd->sg = se_cmd->t_tasks_sg_chained;
- cmd->sg_cnt =
- se_cmd->t_tasks_sg_chained_no;
- }
- if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
- cmd->sg,
- cmd->sg_cnt))
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+ lport->tt.ddp_target(lport, ep->xid,
+ se_cmd->t_data_sg,
+ se_cmd->t_data_nents))
cmd->was_ddp_setup = 1;
}
}
@@ -241,6 +251,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+ if (cmd->aborted)
+ return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
@@ -249,11 +261,6 @@ int ft_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
-int ft_is_state_remove(struct se_cmd *se_cmd)
-{
- return 0; /* XXX TBD */
-}
-
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
@@ -262,10 +269,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
- if (IS_ERR(fp)) {
+ if (unlikely(IS_ERR(fp))) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ cmd->aborted = true;
return;
}
@@ -325,10 +332,12 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
- if (sp)
+ if (sp) {
lport->tt.seq_send(lport, sp, fp);
- else
+ lport->tt.exch_done(sp);
+ } else {
lport->tt.frame_send(lport, fp);
+ }
}
/*
@@ -358,16 +367,10 @@ static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
*/
static void ft_send_tm(struct ft_cmd *cmd)
{
- struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
- struct ft_sess *sess;
+ int rc;
u8 tm_func;
- transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
- cmd->sess->se_sess, 0, DMA_NONE, 0,
- &cmd->ft_sense_buffer[0]);
- target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
-
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
@@ -396,55 +399,25 @@ static void ft_send_tm(struct ft_cmd *cmd)
return;
}
- pr_debug("alloc tm cmd fn %d\n", tm_func);
- tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
- if (!tmr) {
- pr_debug("alloc failed\n");
+ /* FIXME: Add referenced task tag for ABORT_TASK */
+ rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
+ &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+ cmd, tm_func, GFP_KERNEL, 0, 0);
+ if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
- return;
- }
- cmd->se_cmd.se_tmr_req = tmr;
-
- switch (fcp->fc_tm_flags) {
- case FCP_TMF_LUN_RESET:
- cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
- if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
- /*
- * Make sure to clean up newly allocated TMR request
- * since "unable to handle TMR request because failed
- * to get to LUN"
- */
- pr_debug("Failed to get LUN for TMR func %d, "
- "se_cmd %p, unpacked_lun %d\n",
- tm_func, &cmd->se_cmd, cmd->lun);
- ft_dump_cmd(cmd, __func__);
- sess = cmd->sess;
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- ft_sess_put(sess);
- return;
- }
- break;
- case FCP_TMF_TGT_RESET:
- case FCP_TMF_CLR_TASK_SET:
- case FCP_TMF_ABT_TASK_SET:
- case FCP_TMF_CLR_ACA:
- break;
- default:
- return;
- }
- transport_generic_handle_tmr(&cmd->se_cmd);
}
/*
* Send status from completed task management request.
*/
-int ft_queue_tm_resp(struct se_cmd *se_cmd)
+void ft_queue_tm_resp(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct se_tmr_req *tmr = se_cmd->se_tmr_req;
enum fcp_resp_rsp_codes code;
+ if (cmd->aborted)
+ return;
switch (tmr->response) {
case TMR_FUNCTION_COMPLETE:
code = FCP_TMF_CMPL;
@@ -456,10 +429,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_REJECTED;
break;
case TMR_TASK_DOES_NOT_EXIST:
- case TMR_TASK_STILL_ALLEGIANT:
- case TMR_TASK_FAILOVER_NOT_SUPPORTED:
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
- case TMR_FUNCTION_AUTHORIZATION_FAILED:
default:
code = FCP_TMF_FAILED;
break;
@@ -467,7 +437,11 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
- return 0;
+}
+
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
}
static void ft_send_work(struct work_struct *work);
@@ -479,14 +453,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
+ struct se_session *se_sess = sess->se_sess;
+ int tag;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
goto busy;
+
+ cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct ft_cmd));
+
+ cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->seq = lport->tt.seq_assign(lport, fp);
if (!cmd->seq) {
- kfree(cmd);
+ percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
@@ -538,7 +519,6 @@ static void ft_send_work(struct work_struct *work)
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct fcp_cmnd *fcp;
int data_dir = 0;
- u32 data_len;
int task_attr;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -548,47 +528,6 @@ static void ft_send_work(struct work_struct *work)
if (fcp->fc_flags & FCP_CFL_LEN_MASK)
goto err; /* not handling longer CDBs yet */
- if (fcp->fc_tm_flags) {
- task_attr = FCP_PTA_SIMPLE;
- data_dir = DMA_NONE;
- data_len = 0;
- } else {
- switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
- case 0:
- data_dir = DMA_NONE;
- break;
- case FCP_CFL_RDDATA:
- data_dir = DMA_FROM_DEVICE;
- break;
- case FCP_CFL_WRDATA:
- data_dir = DMA_TO_DEVICE;
- break;
- case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
- goto err; /* TBD not supported by tcm_fc yet */
- }
- /*
- * Locate the SAM Task Attr from fc_pri_ta
- */
- switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
- case FCP_PTA_HEADQ:
- task_attr = MSG_HEAD_TAG;
- break;
- case FCP_PTA_ORDERED:
- task_attr = MSG_ORDERED_TAG;
- break;
- case FCP_PTA_ACA:
- task_attr = MSG_ACA_TAG;
- break;
- case FCP_PTA_SIMPLE: /* Fallthrough */
- default:
- task_attr = MSG_SIMPLE_TAG;
- }
-
-
- task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
- data_len = ntohl(fcp->fc_dl);
- cmd->cdb = fcp->fc_cdb;
- }
/*
* Check for FCP task management flags
*/
@@ -596,15 +535,48 @@ static void ft_send_work(struct work_struct *work)
ft_send_tm(cmd);
return;
}
+
+ switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
+ case 0:
+ data_dir = DMA_NONE;
+ break;
+ case FCP_CFL_RDDATA:
+ data_dir = DMA_FROM_DEVICE;
+ break;
+ case FCP_CFL_WRDATA:
+ data_dir = DMA_TO_DEVICE;
+ break;
+ case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
+ goto err; /* TBD not supported by tcm_fc yet */
+ }
+ /*
+ * Locate the SAM Task Attr from fc_pri_ta
+ */
+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_HEADQ:
+ task_attr = MSG_HEAD_TAG;
+ break;
+ case FCP_PTA_ORDERED:
+ task_attr = MSG_ORDERED_TAG;
+ break;
+ case FCP_PTA_ACA:
+ task_attr = MSG_ACA_TAG;
+ break;
+ case FCP_PTA_SIMPLE: /* Fallthrough */
+ default:
+ task_attr = MSG_SIMPLE_TAG;
+ }
+
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
- cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
- &cmd->ft_sense_buffer[0], cmd->lun, data_len,
- task_attr, data_dir, 0);
+ if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
+ &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+ ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+ goto err;
+
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 73852fbc857..efdcb9663a1 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
struct target_fabric_configfs *ft_configfs;
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
@@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
return found;
}
-struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
struct ft_node_acl *acl;
@@ -298,8 +298,9 @@ static struct se_portal_group *ft_add_tpg(
struct config_group *group,
const char *name)
{
- struct ft_lport_acl *lacl;
+ struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
+ struct workqueue_struct *wq;
unsigned long index;
int ret;
@@ -310,32 +311,43 @@ static struct se_portal_group *ft_add_tpg(
*/
if (strstr(name, "tpgt_") != name)
return NULL;
- if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
+
+ ret = kstrtoul(name + 5, 10, &index);
+ if (ret)
+ return NULL;
+ if (index > UINT_MAX)
return NULL;
- lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+ if ((index != 1)) {
+ pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
- tpg->lport_acl = lacl;
+ tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
- ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
- tpg, TRANSPORT_TPG_TYPE_NORMAL);
- if (ret < 0) {
+ wq = alloc_workqueue("tcm_fc", 0, 1);
+ if (!wq) {
kfree(tpg);
return NULL;
}
- tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
- if (!tpg->workqueue) {
+ ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ destroy_workqueue(wq);
kfree(tpg);
return NULL;
}
+ tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
- list_add_tail(&tpg->list, &lacl->tpg_list);
+ ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
@@ -344,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+ struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -354,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
synchronize_rcu();
mutex_lock(&ft_lport_lock);
- list_del(&tpg->list);
+ ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
@@ -373,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
- struct ft_lport_acl *lacl;
- struct ft_tpg *tpg;
+ struct ft_lport_wwn *ft_wwn;
- list_for_each_entry(lacl, &ft_lport_list, list) {
- if (lacl->wwpn == lport->wwpn) {
- list_for_each_entry(tpg, &lacl->tpg_list, list)
- return tpg; /* XXX for now return first entry */
- return NULL;
- }
+ list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (ft_wwn->wwpn == lport->wwpn)
+ return ft_wwn->tpg;
}
return NULL;
}
@@ -394,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- struct ft_lport_acl *lacl;
- struct ft_lport_acl *old_lacl;
+ struct ft_lport_wwn *ft_wwn;
+ struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
- pr_debug("add lport %s\n", name);
+ pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
- lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
- if (!lacl)
+ ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+ if (!ft_wwn)
return NULL;
- lacl->wwpn = wwpn;
- INIT_LIST_HEAD(&lacl->tpg_list);
+ ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
- list_for_each_entry(old_lacl, &ft_lport_list, list) {
- if (old_lacl->wwpn == wwpn) {
+ list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
return NULL;
}
}
- list_add_tail(&lacl->list, &ft_lport_list);
- ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+ list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+ ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
- return &lacl->fc_lport_wwn;
+ return &ft_wwn->se_wwn;
}
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
{
- struct ft_lport_acl *lacl = container_of(wwn,
- struct ft_lport_acl, fc_lport_wwn);
+ struct ft_lport_wwn *ft_wwn = container_of(wwn,
+ struct ft_lport_wwn, se_wwn);
- pr_debug("del lport %s\n", lacl->name);
+ pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
- list_del(&lacl->list);
+ list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
}
static ssize_t ft_wwn_show_attr_version(
@@ -464,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
- return tpg->lport_acl->name;
+ return tpg->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -492,16 +500,6 @@ static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
{
}
-static u16 ft_get_fabric_sense_len(void)
-{
- return 0;
-}
-
-static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
-{
- return 0;
-}
-
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -529,9 +527,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
- .stop_session = ft_sess_stop,
- .fall_back_to_erl0 = ft_sess_set_erl0,
- .sess_logged_in = ft_sess_logged_in,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
@@ -542,15 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
- .get_fabric_sense_len = ft_get_fabric_sense_len,
- .set_fabric_sense_len = ft_set_fabric_sense_len,
- .is_state_remove = ft_is_state_remove,
+ .aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
- .fabric_make_wwn = &ft_add_lport,
- .fabric_drop_wwn = &ft_del_lport,
+ .fabric_make_wwn = &ft_add_wwn,
+ .fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
.fabric_post_link = NULL,
@@ -561,7 +554,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_nodeacl = &ft_del_acl,
};
-int ft_register_configfs(void)
+static int ft_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
@@ -577,22 +570,19 @@ int ft_register_configfs(void)
}
fabric->tf_ops = ft_fabric_ops;
- /* Allowing support for task_sg_chaining */
- fabric->tf_ops.task_sg_chaining = 1;
-
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
ft_nacl_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* register the fabric for use within TCM
*/
@@ -611,7 +601,7 @@ int ft_register_configfs(void)
return 0;
}
-void ft_deregister_configfs(void)
+static void ft_deregister_configfs(void)
{
if (!ft_configfs)
return;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index d8cabc21036..97b486c3dda 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -81,6 +80,12 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
void *from;
void *to = NULL;
+ if (cmd->aborted)
+ return 0;
+
+ if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
+ goto queue_status;
+
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
@@ -102,6 +107,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
use_sg = !(remaining % 4);
while (remaining) {
+ struct fc_seq *seq = cmd->seq;
+
+ if (!seq) {
+ pr_debug("%s: Command aborted, xid 0x%x\n",
+ __func__, ep->xid);
+ break;
+ }
if (!mem_len) {
sg = sg_next(sg);
mem_len = min((size_t)sg->length, remaining);
@@ -146,14 +158,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
PAGE_SIZE << compound_order(page);
} else {
BUG_ON(!page);
- from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
- KM_SOFTIRQ0);
+ from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
from += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ kunmap_atomic(page_addr);
to += tlen;
}
@@ -169,19 +180,35 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, cmd->seq, fp);
+ error = lport->tt.seq_send(lport, seq, fp);
if (error) {
- /* XXX For now, initiator will retry */
- pr_err_ratelimited("%s: Failed to send frame %p, "
+ pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
+ /*
+ * Go ahead and set TASK_SET_FULL status ignoring the
+ * rest of the DataIN, and immediately attempt to
+ * send the response via ft_queue_status() in order
+ * to notify the initiator that it should reduce it's
+ * per LUN queue_depth.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ break;
}
}
+queue_status:
return ft_queue_status(se_cmd);
}
+static void ft_execute_work(struct work_struct *work)
+{
+ struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+
+ target_execute_cmd(&cmd->se_cmd);
+}
+
/*
* Receive write data frame.
*/
@@ -227,7 +254,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
- cmd->sg, cmd->sg_cnt);
+ se_cmd->t_data_sg, se_cmd->t_data_nents);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
@@ -291,14 +318,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len);
- to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
- KM_SOFTIRQ0);
+ to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ kunmap_atomic(page_addr);
from += tlen;
frame_len -= tlen;
@@ -307,8 +333,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
cmd->write_data_len += tlen;
}
last_frame:
- if (cmd->write_data_len == se_cmd->data_length)
- transport_generic_handle_data(se_cmd);
+ if (cmd->write_data_len == se_cmd->data_length) {
+ INIT_WORK(&cmd->work, ft_execute_work);
+ queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
+ }
drop:
fc_frame_free(fp);
}
@@ -319,11 +347,12 @@ drop:
*/
void ft_invl_hw_context(struct ft_cmd *cmd)
{
- struct fc_seq *seq = cmd->seq;
+ struct fc_seq *seq;
struct fc_exch *ep = NULL;
struct fc_lport *lport = NULL;
BUG_ON(!cmd);
+ seq = cmd->seq;
/* Cleanup the DDP context in HW if DDP was setup */
if (cmd->was_ddp_setup && seq) {
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4c0507cf808..21ce50880c7 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -52,13 +51,14 @@ static void ft_sess_delete_all(struct ft_tport *);
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
int i;
- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
if (tport) {
tport->tpg = tpg;
+ tpg->tport = tport;
return tport;
}
@@ -86,16 +87,6 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
}
/*
- * Free tport via RCU.
- */
-static void ft_tport_rcu_free(struct rcu_head *rcu)
-{
- struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
-
- kfree(tport);
-}
-
-/*
* Delete a target local port.
* Caller holds ft_lport_lock.
*/
@@ -114,7 +105,7 @@ static void ft_tport_delete(struct ft_tport *tport)
tpg->tport = NULL;
tport->tpg = NULL;
}
- call_rcu(&tport->rcu, ft_tport_rcu_free);
+ kfree_rcu(tport, rcu);
}
/*
@@ -124,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
- ft_tport_create(lport);
+ ft_tport_get(lport);
mutex_unlock(&ft_lport_lock);
}
@@ -179,7 +170,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
rcu_read_lock();
@@ -188,7 +178,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
goto out;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
@@ -211,10 +201,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
{
struct ft_sess *sess;
struct hlist_head *head;
- struct hlist_node *pos;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash)
+ hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
@@ -222,7 +211,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
if (!sess)
return NULL;
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
+ sizeof(struct ft_cmd),
+ TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
@@ -263,11 +254,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
@@ -283,12 +273,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
@@ -319,11 +308,9 @@ int ft_sess_shutdown(struct se_session *se_sess)
void ft_sess_close(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- struct fc_lport *lport;
u32 port_id;
mutex_lock(&ft_lport_lock);
- lport = sess->tport->lport;
port_id = sess->port_id;
if (port_id == -1) {
mutex_unlock(&ft_lport_lock);
@@ -338,20 +325,6 @@ void ft_sess_close(struct se_session *se_sess)
synchronize_rcu(); /* let transport deregister happen */
}
-void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
-{
- struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
- pr_debug("port_id %x\n", sess->port_id);
-}
-
-int ft_sess_logged_in(struct se_session *se_sess)
-{
- struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
- return sess->port_id != -1;
-}
-
u32 ft_sess_get_index(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
@@ -367,11 +340,6 @@ u32 ft_sess_get_port_name(struct se_session *se_sess,
return ft_format_wwn(buf, len, sess->port_name);
}
-void ft_sess_set_erl0(struct se_session *se_sess)
-{
- /* XXX TBD called when out of memory */
-}
-
/*
* libfc ops involving sessions.
*/
@@ -384,13 +352,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
struct ft_node_acl *acl;
u32 fcp_parm;
- tport = ft_tport_create(rdata->local_port);
+ tport = ft_tport_get(rdata->local_port);
if (!tport)
- return 0; /* not a target for this local port */
+ goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
- return 0;
+ goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
@@ -427,12 +395,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
/*
* OR in our service parameters with other provider (initiator), if any.
- * TBD XXX - indicate RETRY capability?
*/
fill:
fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
+
+not_target:
+ fcp_parm = ntohl(spp->spp_params);
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ spp->spp_params = htonl(fcp_parm);
+ return 0;
}
/**
@@ -457,19 +431,12 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
return ret;
}
-static void ft_sess_rcu_free(struct rcu_head *rcu)
-{
- struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
-
- transport_deregister_session(sess->se_sess);
- kfree(sess);
-}
-
static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- call_rcu(&sess->rcu, ft_sess_rcu_free);
+ transport_deregister_session(sess->se_sess);
+ kfree_rcu(sess, rcu);
}
void ft_sess_put(struct ft_sess *sess)
@@ -486,7 +453,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
- tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
+
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;