aboutsummaryrefslogtreecommitdiff
path: root/drivers/target/target_core_tpg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_tpg.c')
-rw-r--r--drivers/target/target_core_tpg.c243
1 files changed, 138 insertions, 105 deletions
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 06336ecd872..c036595b17c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,10 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -60,11 +57,10 @@ static void core_clear_initiator_node_from_tpg(
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_lun_acl *acl, *acl_tmp;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
@@ -78,31 +74,10 @@ static void core_clear_initiator_node_from_tpg(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
-
- spin_lock(&lun->lun_acl_lock);
- list_for_each_entry_safe(acl, acl_tmp,
- &lun->lun_acl_list, lacl_list) {
- if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
- (acl->mapped_lun == deve->mapped_lun))
- break;
- }
-
- if (!acl) {
- pr_err("Unable to locate struct se_lun_acl for %s,"
- " mapped_lun: %u\n", nacl->initiatorname,
- deve->mapped_lun);
- spin_unlock(&lun->lun_acl_lock);
- spin_lock_irq(&nacl->device_list_lock);
- continue;
- }
-
- list_del(&acl->lacl_list);
- spin_unlock(&lun->lun_acl_lock);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
- kfree(acl);
}
spin_unlock_irq(&nacl->device_list_lock);
}
@@ -136,17 +111,12 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!strcmp(acl->initiatorname, initiatorname) &&
- !acl->dynamic_node_acl) {
- spin_unlock_irq(&tpg->acl_node_lock);
- return acl;
- }
- }
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
- return NULL;
+ return acl;
}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
/* core_tpg_add_node_to_devs():
*
@@ -163,7 +133,7 @@ void core_tpg_add_node_to_devs(
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &tpg->tpg_lun_list[i];
+ lun = tpg->tpg_lun_list[i];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
continue;
@@ -175,10 +145,7 @@ void core_tpg_add_node_to_devs(
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
- if (dev->dev_flags & DF_READ_ONLY)
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
- else
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*
* Allow only optical drives to issue R/W in default RO
@@ -197,8 +164,8 @@ void core_tpg_add_node_to_devs(
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
- core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg, 1);
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -222,6 +189,34 @@ static int core_set_queue_depth_for_node(
return 0;
}
+void array_free(void *array, int n)
+{
+ void **a = array;
+ int i;
+
+ for (i = 0; i < n; i++)
+ kfree(a[i]);
+ kfree(a);
+}
+
+static void *array_zalloc(int n, size_t size, gfp_t flags)
+{
+ void **a;
+ int i;
+
+ a = kzalloc(n * sizeof(void*), flags);
+ if (!a)
+ return NULL;
+ for (i = 0; i < n; i++) {
+ a[i] = kzalloc(size, flags);
+ if (!a[i]) {
+ array_free(a, n);
+ return NULL;
+ }
+ }
+ return a;
+}
+
/* core_create_device_list_for_node():
*
*
@@ -231,15 +226,15 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
struct se_dev_entry *deve;
int i;
- nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
- TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+ nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+ sizeof(struct se_dev_entry), GFP_KERNEL);
if (!nacl->device_list) {
pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &nacl->device_list[i];
+ deve = nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
@@ -274,6 +269,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
@@ -281,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -298,13 +294,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
}
/*
* Here we only create demo-mode MappedLUNs from the active
- * TPG LUNs if the fabric is not explictly asking for
+ * TPG LUNs if the fabric is not explicitly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
- if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
- (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
- do { ; } while (0);
- else
+ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
+ (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
@@ -329,19 +323,19 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
- int i, ret;
+ int i;
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &tpg->tpg_lun_list[i];
+ lun = tpg->tpg_lun_list[i];
if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
(lun->lun_se_dev == NULL))
continue;
spin_unlock(&tpg->tpg_lun_lock);
- ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+ core_dev_del_lun(tpg, lun->unpacked_lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -402,6 +396,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
@@ -409,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -448,39 +443,47 @@ int core_tpg_del_initiator_node_acl(
struct se_node_acl *acl,
int force)
{
+ LIST_HEAD(sess_list);
struct se_session *sess, *sess_tmp;
- int dynamic_acl = 0;
+ unsigned long flags;
+ int rc;
spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
- dynamic_acl = 1;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_bh(&tpg->session_lock);
- list_for_each_entry_safe(sess, sess_tmp,
- &tpg->tpg_sess_list, sess_list) {
- if (sess->se_node_acl != acl)
- continue;
- /*
- * Determine if the session needs to be closed by our context.
- */
- if (!tpg->se_tpg_tfo->shutdown_session(sess))
+ spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ acl->acl_stop = 1;
+
+ list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+ sess_acl_list) {
+ if (sess->sess_tearing_down != 0)
continue;
- spin_unlock_bh(&tpg->session_lock);
- /*
- * If the $FABRIC_MOD session for the Initiator Node ACL exists,
- * forcefully shutdown the $FABRIC_MOD session/nexus.
- */
- tpg->se_tpg_tfo->close_session(sess);
+ target_get_session(sess);
+ list_move(&sess->sess_acl_list, &sess_list);
+ }
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+ list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
+ list_del(&sess->sess_acl_list);
- spin_lock_bh(&tpg->session_lock);
+ rc = tpg->se_tpg_tfo->shutdown_session(sess);
+ target_put_session(sess);
+ if (!rc)
+ continue;
+ target_put_session(sess);
}
- spin_unlock_bh(&tpg->session_lock);
+ target_put_nacl(acl);
+ /*
+ * Wait for last target_put_nacl() to complete in target_complete_nacl()
+ * for active fabric session transport_deregister_session() callbacks.
+ */
+ wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
@@ -507,6 +510,7 @@ int core_tpg_set_initiator_node_queue_depth(
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
+ unsigned long flags;
int dynamic_acl = 0;
spin_lock_irq(&tpg->acl_node_lock);
@@ -525,7 +529,7 @@ int core_tpg_set_initiator_node_queue_depth(
}
spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_bh(&tpg->session_lock);
+ spin_lock_irqsave(&tpg->session_lock, flags);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
@@ -537,7 +541,7 @@ int core_tpg_set_initiator_node_queue_depth(
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -567,7 +571,7 @@ int core_tpg_set_initiator_node_queue_depth(
acl->queue_depth = queue_depth;
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
@@ -583,7 +587,7 @@ int core_tpg_set_initiator_node_queue_depth(
spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL;
}
- spin_unlock_bh(&tpg->session_lock);
+ spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
@@ -605,6 +609,36 @@ int core_tpg_set_initiator_node_queue_depth(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+/* core_tpg_set_initiator_node_tag():
+ *
+ * Initiator nodeacl tags are not used internally, but may be used by
+ * userspace to emulate aliases or groups.
+ * Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ const char *new_tag)
+{
+ if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+ return -EINVAL;
+
+ if (!strncmp("NULL", new_tag, 4)) {
+ acl->acl_tag[0] = '\0';
+ return 0;
+ }
+
+ return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_ref_comp);
+}
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -618,12 +652,11 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
@@ -647,8 +680,8 @@ int core_tpg_register(
struct se_lun *lun;
u32 i;
- se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
- TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+ se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+ sizeof(struct se_lun), GFP_KERNEL);
if (!se_tpg->tpg_lun_list) {
pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
@@ -656,16 +689,16 @@ int core_tpg_register(
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = &se_tpg->tpg_lun_list[i];
+ lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
}
se_tpg->se_tpg_type = se_tpg_type;
@@ -682,7 +715,8 @@ int core_tpg_register(
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
- kfree(se_tpg);
+ array_free(se_tpg->tpg_lun_list,
+ TRANSPORT_MAX_LUNS_PER_TPG);
return -ENOMEM;
}
}
@@ -742,12 +776,12 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
core_tpg_release_virtual_lun0(se_tpg);
se_tpg->se_tpg_fabric_ptr = NULL;
- kfree(se_tpg->tpg_lun_list);
+ array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
@@ -763,7 +797,7 @@ struct se_lun *core_tpg_pre_addlun(
}
spin_lock(&tpg->tpg_lun_lock);
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
@@ -777,18 +811,24 @@ struct se_lun *core_tpg_pre_addlun(
return lun;
}
-int core_tpg_post_addlun(
+int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
- void *lun_ptr)
+ struct se_device *dev)
{
int ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
+ ret = core_dev_export(dev, tpg, lun);
+ if (ret < 0) {
+ percpu_ref_cancel_init(&lun->lun_ref);
+ return ret;
+ }
+
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -797,14 +837,6 @@ int core_tpg_post_addlun(
return 0;
}
-static void core_tpg_shutdown_lun(
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- core_clear_lun_from_tpg(lun, tpg);
- transport_clear_lun_from_sessions(lun);
-}
-
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun)
@@ -821,7 +853,7 @@ struct se_lun *core_tpg_pre_dellun(
}
spin_lock(&tpg->tpg_lun_lock);
- lun = &tpg->tpg_lun_list[unpacked_lun];
+ lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
@@ -839,7 +871,8 @@ int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- core_tpg_shutdown_lun(tpg, lun);
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);