diff options
Diffstat (limited to 'drivers/target')
66 files changed, 8803 insertions, 3620 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 18303686eb5..dc2d84ac5a0 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -3,6 +3,7 @@ menuconfig TARGET_CORE tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" depends on SCSI && BLOCK select CONFIGFS_FS + select CRC_T10DIF default n help Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled @@ -13,6 +14,7 @@ if TARGET_CORE config TCM_IBLOCK tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" + select BLK_DEV_INTEGRITY help Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered access to Linux/Block devices using BIO diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 9fdcb561422..85b012d2f89 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -13,7 +13,8 @@ target_core_mod-y := target_core_configfs.o \ target_core_spc.o \ target_core_ua.o \ target_core_rd.o \ - target_core_stat.o + target_core_stat.o \ + target_core_xcopy.o obj-$(CONFIG_TARGET_CORE) += target_core_mod.o diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile index 5b9a2cf7f0a..13a92403fe3 100644 --- a/drivers/target/iscsi/Makefile +++ b/drivers/target/iscsi/Makefile @@ -15,6 +15,7 @@ iscsi_target_mod-y += iscsi_target_parameters.o \ iscsi_target_util.o \ iscsi_target.o \ iscsi_target_configfs.o \ - iscsi_target_stat.o + iscsi_target_stat.o \ + iscsi_target_transport.o obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 339f97f7085..1f4c794f5fc 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains main functions related to the iSCSI Target Core Driver. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -49,10 +47,12 @@ #include "iscsi_target_device.h" #include "iscsi_target_stat.h" +#include <target/iscsi/iscsi_transport.h> + static LIST_HEAD(g_tiqn_list); static LIST_HEAD(g_np_list); static DEFINE_SPINLOCK(tiqn_lock); -static DEFINE_SPINLOCK(np_lock); +static DEFINE_MUTEX(np_lock); static struct idr tiqn_idr; struct idr sess_idr; @@ -61,15 +61,13 @@ spinlock_t sess_idr_lock; struct iscsit_global *iscsit_global; -struct kmem_cache *lio_cmd_cache; struct kmem_cache *lio_qr_cache; struct kmem_cache *lio_dr_cache; struct kmem_cache *lio_ooo_cache; struct kmem_cache *lio_r2t_cache; static int iscsit_handle_immediate_data(struct iscsi_cmd *, - unsigned char *buf, u32); -static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); + struct iscsi_scsi_req *, u32); struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) { @@ -144,23 +142,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) spin_lock_init(&tiqn->login_stats.lock); spin_lock_init(&tiqn->logout_stats.lock); - if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) { - pr_err("idr_pre_get() for tiqn_idr failed\n"); - kfree(tiqn); - return ERR_PTR(-ENOMEM); - } tiqn->tiqn_state = TIQN_STATE_ACTIVE; + idr_preload(GFP_KERNEL); spin_lock(&tiqn_lock); - ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); + + ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT); if (ret < 0) { - pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); + pr_err("idr_alloc() failed for tiqn->tiqn_index\n"); spin_unlock(&tiqn_lock); + idr_preload_end(); kfree(tiqn); return ERR_PTR(ret); } + tiqn->tiqn_index = ret; list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); + spin_unlock(&tiqn_lock); + idr_preload_end(); pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); @@ -218,11 +217,6 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) spin_unlock_bh(&np->np_thread_lock); return -1; } - if (np->np_login_tpg) { - pr_err("np->np_login_tpg() is not NULL!\n"); - spin_unlock_bh(&np->np_thread_lock); - return -1; - } spin_unlock_bh(&np->np_thread_lock); /* * Determine if the portal group is accepting storage traffic. @@ -237,26 +231,38 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) /* * Here we serialize access across the TIQN+TPG Tuple. */ - ret = mutex_lock_interruptible(&tpg->np_login_lock); + ret = down_interruptible(&tpg->np_login_sem); if ((ret != 0) || signal_pending(current)) return -1; - spin_lock_bh(&np->np_thread_lock); - np->np_login_tpg = tpg; - spin_unlock_bh(&np->np_thread_lock); + spin_lock_bh(&tpg->tpg_state_lock); + if (tpg->tpg_state != TPG_STATE_ACTIVE) { + spin_unlock_bh(&tpg->tpg_state_lock); + up(&tpg->np_login_sem); + return -1; + } + spin_unlock_bh(&tpg->tpg_state_lock); return 0; } -int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) +void iscsit_login_kref_put(struct kref *kref) +{ + struct iscsi_tpg_np *tpg_np = container_of(kref, + struct iscsi_tpg_np, tpg_np_kref); + + complete(&tpg_np->tpg_np_comp); +} + +int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg, + struct iscsi_tpg_np *tpg_np) { struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; - spin_lock_bh(&np->np_thread_lock); - np->np_login_tpg = NULL; - spin_unlock_bh(&np->np_thread_lock); + up(&tpg->np_login_sem); - mutex_unlock(&tpg->np_login_lock); + if (tpg_np) + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); if (tiqn) iscsit_put_tiqn_for_login(tiqn); @@ -264,60 +270,73 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) return 0; } -static struct iscsi_np *iscsit_get_np( +bool iscsit_check_np_match( struct __kernel_sockaddr_storage *sockaddr, + struct iscsi_np *np, int network_transport) { struct sockaddr_in *sock_in, *sock_in_e; struct sockaddr_in6 *sock_in6, *sock_in6_e; - struct iscsi_np *np; - int ip_match = 0; + bool ip_match = false; u16 port; - spin_lock_bh(&np_lock); - list_for_each_entry(np, &g_np_list, np_list) { - spin_lock(&np->np_thread_lock); - if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { - spin_unlock(&np->np_thread_lock); - continue; - } + if (sockaddr->ss_family == AF_INET6) { + sock_in6 = (struct sockaddr_in6 *)sockaddr; + sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; + + if (!memcmp(&sock_in6->sin6_addr.in6_u, + &sock_in6_e->sin6_addr.in6_u, + sizeof(struct in6_addr))) + ip_match = true; + + port = ntohs(sock_in6->sin6_port); + } else { + sock_in = (struct sockaddr_in *)sockaddr; + sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; - if (sockaddr->ss_family == AF_INET6) { - sock_in6 = (struct sockaddr_in6 *)sockaddr; - sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; + if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr) + ip_match = true; - if (!memcmp(&sock_in6->sin6_addr.in6_u, - &sock_in6_e->sin6_addr.in6_u, - sizeof(struct in6_addr))) - ip_match = 1; + port = ntohs(sock_in->sin_port); + } - port = ntohs(sock_in6->sin6_port); - } else { - sock_in = (struct sockaddr_in *)sockaddr; - sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; + if (ip_match && (np->np_port == port) && + (np->np_network_transport == network_transport)) + return true; - if (sock_in->sin_addr.s_addr == - sock_in_e->sin_addr.s_addr) - ip_match = 1; + return false; +} - port = ntohs(sock_in->sin_port); +/* + * Called with mutex np_lock held + */ +static struct iscsi_np *iscsit_get_np( + struct __kernel_sockaddr_storage *sockaddr, + int network_transport) +{ + struct iscsi_np *np; + bool match; + + list_for_each_entry(np, &g_np_list, np_list) { + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { + spin_unlock_bh(&np->np_thread_lock); + continue; } - if ((ip_match == 1) && (np->np_port == port) && - (np->np_network_transport == network_transport)) { + match = iscsit_check_np_match(sockaddr, np, network_transport); + if (match) { /* * Increment the np_exports reference count now to * prevent iscsit_del_np() below from being called * while iscsi_tpg_add_network_portal() is called. */ np->np_exports++; - spin_unlock(&np->np_thread_lock); - spin_unlock_bh(&np_lock); + spin_unlock_bh(&np->np_thread_lock); return np; } - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); } - spin_unlock_bh(&np_lock); return NULL; } @@ -331,16 +350,22 @@ struct iscsi_np *iscsit_add_np( struct sockaddr_in6 *sock_in6; struct iscsi_np *np; int ret; + + mutex_lock(&np_lock); + /* * Locate the existing struct iscsi_np if already active.. */ np = iscsit_get_np(sockaddr, network_transport); - if (np) + if (np) { + mutex_unlock(&np_lock); return np; + } np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); if (!np) { pr_err("Unable to allocate memory for struct iscsi_np\n"); + mutex_unlock(&np_lock); return ERR_PTR(-ENOMEM); } @@ -363,6 +388,7 @@ struct iscsi_np *iscsit_add_np( ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } @@ -371,6 +397,7 @@ struct iscsi_np *iscsit_add_np( pr_err("Unable to create kthread: iscsi_np\n"); ret = PTR_ERR(np->np_thread); kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } /* @@ -381,14 +408,13 @@ struct iscsi_np *iscsit_add_np( * point because iscsi_np has not been added to g_np_list yet. */ np->np_exports = 1; + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; - spin_lock_bh(&np_lock); list_add_tail(&np->np_list, &g_np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", - np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? - "TCP" : "SCTP"); + np->np_ip, np->np_port, np->np_transport->name); return np; } @@ -396,20 +422,10 @@ struct iscsi_np *iscsit_add_np( int iscsit_reset_np_thread( struct iscsi_np *np, struct iscsi_tpg_np *tpg_np, - struct iscsi_portal_group *tpg) + struct iscsi_portal_group *tpg, + bool shutdown) { spin_lock_bh(&np->np_thread_lock); - if (tpg && tpg_np) { - /* - * The reset operation need only be performed when the - * passed struct iscsi_portal_group has a login in progress - * to one of the network portals. - */ - if (tpg_np->tpg_np->np_login_tpg != tpg) { - spin_unlock_bh(&np->np_thread_lock); - return 0; - } - } if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { spin_unlock_bh(&np->np_thread_lock); return 0; @@ -424,14 +440,19 @@ int iscsit_reset_np_thread( } spin_unlock_bh(&np->np_thread_lock); + if (tpg_np && shutdown) { + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + + wait_for_completion(&tpg_np->tpg_np_comp); + } + return 0; } -static int iscsit_del_np_comm(struct iscsi_np *np) +static void iscsit_free_np(struct iscsi_np *np) { if (np->np_socket) sock_release(np->np_socket); - return 0; } int iscsit_del_np(struct iscsi_np *np) @@ -439,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np) spin_lock_bh(&np->np_thread_lock); np->np_exports--; if (np->np_exports) { + np->enabled = true; spin_unlock_bh(&np->np_thread_lock); return 0; } @@ -452,21 +474,67 @@ int iscsit_del_np(struct iscsi_np *np) */ send_sig(SIGINT, np->np_thread, 1); kthread_stop(np->np_thread); + np->np_thread = NULL; } - iscsit_del_np_comm(np); - spin_lock_bh(&np_lock); + np->np_transport->iscsit_free_np(np); + + mutex_lock(&np_lock); list_del(&np->np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", - np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? - "TCP" : "SCTP"); + np->np_ip, np->np_port, np->np_transport->name); + iscsit_put_transport(np->np_transport); kfree(np); return 0; } +static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); +static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); + +static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + return 0; +} + +static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + __iscsit_free_cmd(cmd, scsi_cmd, true); +} + +static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + +static struct iscsit_transport iscsi_target_transport = { + .name = "iSCSI/TCP", + .transport_type = ISCSI_TCP, + .owner = NULL, + .iscsit_setup_np = iscsit_setup_np, + .iscsit_accept_np = iscsit_accept_np, + .iscsit_free_np = iscsit_free_np, + .iscsit_get_login_rx = iscsit_get_login_rx, + .iscsit_put_login_tx = iscsit_put_login_tx, + .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, + .iscsit_immediate_queue = iscsit_immediate_queue, + .iscsit_response_queue = iscsit_response_queue, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, +}; + static int __init iscsi_target_init_module(void) { int ret = 0; @@ -498,22 +566,13 @@ static int __init iscsi_target_init_module(void) goto ts_out1; } - lio_cmd_cache = kmem_cache_create("lio_cmd_cache", - sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), - 0, NULL); - if (!lio_cmd_cache) { - pr_err("Unable to kmem_cache_create() for" - " lio_cmd_cache\n"); - goto ts_out2; - } - lio_qr_cache = kmem_cache_create("lio_qr_cache", sizeof(struct iscsi_queue_req), __alignof__(struct iscsi_queue_req), 0, NULL); if (!lio_qr_cache) { pr_err("nable to kmem_cache_create() for" " lio_qr_cache\n"); - goto cmd_out; + goto ts_out2; } lio_dr_cache = kmem_cache_create("lio_dr_cache", @@ -543,6 +602,8 @@ static int __init iscsi_target_init_module(void) goto ooo_out; } + iscsit_register_transport(&iscsi_target_transport); + if (iscsit_load_discovery_tpg() < 0) goto r2t_out; @@ -555,8 +616,6 @@ dr_out: kmem_cache_destroy(lio_dr_cache); qr_out: kmem_cache_destroy(lio_qr_cache); -cmd_out: - kmem_cache_destroy(lio_cmd_cache); ts_out2: iscsi_deallocate_thread_sets(); ts_out1: @@ -573,7 +632,7 @@ static void __exit iscsi_target_cleanup_module(void) iscsi_deallocate_thread_sets(); iscsi_thread_set_free(); iscsit_release_discovery_tpg(); - kmem_cache_destroy(lio_cmd_cache); + iscsit_unregister_transport(&iscsi_target_transport); kmem_cache_destroy(lio_qr_cache); kmem_cache_destroy(lio_dr_cache); kmem_cache_destroy(lio_ooo_cache); @@ -585,30 +644,23 @@ static void __exit iscsi_target_cleanup_module(void) } static int iscsit_add_reject( + struct iscsi_conn *conn, u8 reason, - int fail_conn, - unsigned char *buf, - struct iscsi_conn *conn) + unsigned char *buf) { struct iscsi_cmd *cmd; - struct iscsi_reject *hdr; - int ret; - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) return -1; cmd->iscsi_opcode = ISCSI_OP_REJECT; - if (fail_conn) - cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; - - hdr = (struct iscsi_reject *) cmd->pdu; - hdr->reason = reason; + cmd->reject_reason = reason; cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); return -1; } @@ -619,23 +671,16 @@ static int iscsit_add_reject( cmd->i_state = ISTATE_SEND_REJECT; iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); - ret = wait_for_completion_interruptible(&cmd->reject_comp); - if (ret != 0) - return -1; - - return (!fail_conn) ? 0 : -1; + return -1; } -int iscsit_add_reject_from_cmd( +static int iscsit_add_reject_from_cmd( + struct iscsi_cmd *cmd, u8 reason, - int fail_conn, - int add_to_conn, - unsigned char *buf, - struct iscsi_cmd *cmd) + bool add_to_conn, + unsigned char *buf) { struct iscsi_conn *conn; - struct iscsi_reject *hdr; - int ret; if (!cmd->conn) { pr_err("cmd->conn is NULL for ITT: 0x%08x\n", @@ -645,16 +690,12 @@ int iscsit_add_reject_from_cmd( conn = cmd->conn; cmd->iscsi_opcode = ISCSI_OP_REJECT; - if (fail_conn) - cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; - - hdr = (struct iscsi_reject *) cmd->pdu; - hdr->reason = reason; + cmd->reject_reason = reason; cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); - iscsit_release_cmd(cmd); + iscsit_free_cmd(cmd, false); return -1; } @@ -666,12 +707,26 @@ int iscsit_add_reject_from_cmd( cmd->i_state = ISTATE_SEND_REJECT; iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + /* + * Perform the kref_put now if se_cmd has already been setup by + * scsit_setup_scsi_cmd() + */ + if (cmd->se_cmd.se_tfo != NULL) { + pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); + } + return -1; +} - ret = wait_for_completion_interruptible(&cmd->reject_comp); - if (ret != 0) - return -1; +static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason, + unsigned char *buf) +{ + return iscsit_add_reject_from_cmd(cmd, reason, true, buf); +} - return (!fail_conn) ? 0 : -1; +int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf) +{ + return iscsit_add_reject_from_cmd(cmd, reason, false, buf); } /* @@ -727,24 +782,32 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) { - struct iscsi_cmd *cmd; + LIST_HEAD(ack_list); + struct iscsi_cmd *cmd, *cmd_p; conn->exp_statsn = exp_statsn; + if (conn->sess->sess_ops->RDMAExtensions) + return; + spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) { spin_lock(&cmd->istate_lock); if ((cmd->i_state == ISTATE_SENT_STATUS) && iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { cmd->i_state = ISTATE_REMOVE; spin_unlock(&cmd->istate_lock); - iscsit_add_cmd_to_immediate_queue(cmd, conn, - cmd->i_state); + list_move_tail(&cmd->i_conn_node, &ack_list); continue; } spin_unlock(&cmd->istate_lock); } spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + iscsit_free_cmd(cmd, false); + } } static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) @@ -763,24 +826,15 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) return 0; } -static int iscsit_handle_scsi_cmd( - struct iscsi_conn *conn, - unsigned char *buf) +int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) { - int data_direction, payload_length, cmdsn_ret = 0, immed_ret; - struct iscsi_cmd *cmd = NULL; + int data_direction, payload_length; struct iscsi_scsi_req *hdr; int iscsi_task_attr; int sam_task_attr; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->cmd_pdus++; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->num_cmds++; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->cmd_pdus); hdr = (struct iscsi_scsi_req *) buf; payload_length = ntoh24(hdr->dlength); @@ -791,104 +845,97 @@ static int iscsit_handle_scsi_cmd( !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" " not set. Bad iSCSI Initiator.\n"); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } if (((hdr->flags & ISCSI_FLAG_CMD_READ) || (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { /* - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) - * that adds support for RESERVE/RELEASE. There is a bug - * add with this new functionality that sets R/W bits when - * neither CDB carries any READ or WRITE datapayloads. + * From RFC-3720 Section 10.3.1: + * + * "Either or both of R and W MAY be 1 when either the + * Expected Data Transfer Length and/or Bidirectional Read + * Expected Data Transfer Length are 0" + * + * For this case, go ahead and clear the unnecssary bits + * to avoid any confusion with ->data_direction. */ - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { - hdr->flags &= ~ISCSI_FLAG_CMD_READ; - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - goto done; - } + hdr->flags &= ~ISCSI_FLAG_CMD_READ; + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" " set when Expected Data Transfer Length is 0 for" - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); } -done: if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" " MUST be set if Expected Data Transfer Length is not 0." " Bad iSCSI Initiator\n"); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } if ((hdr->flags & ISCSI_FLAG_CMD_READ) && (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { pr_err("Bidirectional operations not supported!\n"); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } if (hdr->opcode & ISCSI_OP_IMMEDIATE) { pr_err("Illegally set Immediate Bit in iSCSI Initiator" " Scsi Command PDU.\n"); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } if (payload_length && !conn->sess->sess_ops->ImmediateData) { pr_err("ImmediateData=No but DataSegmentLength=%u," " protocol error.\n", payload_length); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } - if ((be32_to_cpu(hdr->data_length )== payload_length) && + if ((be32_to_cpu(hdr->data_length) == payload_length) && (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { pr_err("Expected Data Transfer Length and Length of" " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" " bit is not set protocol error\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } if (payload_length > be32_to_cpu(hdr->data_length)) { pr_err("DataSegmentLength: %u is greater than" " EDTL: %u, protocol error.\n", payload_length, hdr->data_length); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { pr_err("DataSegmentLength: %u is greater than" " MaxXmitDataSegmentLength: %u, protocol error.\n", payload_length, conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } if (payload_length > conn->sess->sess_ops->FirstBurstLength) { pr_err("DataSegmentLength: %u is greater than" " FirstBurstLength: %u, protocol error.\n", payload_length, conn->sess->sess_ops->FirstBurstLength); - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : DMA_NONE; - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, - buf, conn); - cmd->data_direction = data_direction; iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; /* @@ -931,14 +978,14 @@ done: cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->first_burst_len = payload_length; - if (cmd->data_direction == DMA_FROM_DEVICE) { + if (!conn->sess->sess_ops->RDMAExtensions && + cmd->data_direction == DMA_FROM_DEVICE) { struct iscsi_datain_req *dr; dr = iscsit_allocate_datain_req(); if (!dr) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); iscsit_attach_datain_req(cmd, dr); } @@ -953,7 +1000,10 @@ done: pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, - hdr->cmdsn, hdr->data_length, payload_length, conn->cid); + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, + conn->cid); + + target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, scsilun_to_int(&hdr->lun)); @@ -963,18 +1013,16 @@ done: cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); if (cmd->sense_reason) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } goto attach_cmd; } if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) { - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } attach_cmd: @@ -987,12 +1035,24 @@ attach_cmd: */ core_alua_check_nonop_delay(&cmd->se_cmd); - if (iscsit_allocate_iovecs(cmd) < 0) { - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 0, buf, cmd); - } + return 0; +} +EXPORT_SYMBOL(iscsit_setup_scsi_cmd); +void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd) +{ + iscsit_set_dataout_sequence_values(cmd); + + spin_lock_bh(&cmd->dataout_timeout_lock); + iscsit_start_dataout_timer(cmd, cmd->conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); +} +EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout); + +int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_scsi_req *hdr) +{ + int cmdsn_ret = 0; /* * Check the CmdSN against ExpCmdSN/MaxCmdSN here if * the Immediate Bit is not set, and no Immediate @@ -1004,13 +1064,14 @@ attach_cmd: * be acknowledged. (See below) */ if (!cmd->immediate_data) { - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); @@ -1019,37 +1080,52 @@ attach_cmd: * If no Immediate Data is attached, it's OK to return now. */ if (!cmd->immediate_data) { - if (!cmd->sense_reason && cmd->unsolicited_data) { - iscsit_set_dataout_sequence_values(cmd); - - spin_lock_bh(&cmd->dataout_timeout_lock); - iscsit_start_dataout_timer(cmd, cmd->conn); - spin_unlock_bh(&cmd->dataout_timeout_lock); - } + if (!cmd->sense_reason && cmd->unsolicited_data) + iscsit_set_unsoliticed_dataout(cmd); + if (!cmd->sense_reason) + return 0; + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); return 0; } /* - * Early CHECK_CONDITIONs never make it to the transport processing - * thread. They are processed in CmdSN order by - * iscsit_check_received_cmdsn() below. + * Early CHECK_CONDITIONs with ImmediateData never make it to command + * execution. These exceptions are processed in CmdSN order using + * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. */ if (cmd->sense_reason) { - immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; - goto after_immediate_data; + if (cmd->reject_reason) + return 0; + + return 1; } /* * Call directly into transport_generic_new_cmd() to perform * the backend memory allocation. */ cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); - if (cmd->sense_reason) { - immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; + if (cmd->sense_reason) + return 1; + + return 0; +} +EXPORT_SYMBOL(iscsit_process_scsi_cmd); + +static int +iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, + bool dump_payload) +{ + struct iscsi_conn *conn = cmd->conn; + int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; + /* + * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. + */ + if (dump_payload) goto after_immediate_data; - } - immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); + immed_ret = iscsit_handle_immediate_data(cmd, hdr, + cmd->first_burst_len); after_immediate_data: if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { /* @@ -1057,26 +1133,20 @@ after_immediate_data: * DataCRC, check against ExpCmdSN/MaxCmdSN if * Immediate Bit is not set. */ - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - /* - * Special case for Unsupported SAM WRITE Opcodes - * and ImmediateData=Yes. - */ - if (cmd->sense_reason) { - if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) - return -1; - } else if (cmd->unsolicited_data) { - iscsit_set_dataout_sequence_values(cmd); + cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; - spin_lock_bh(&cmd->dataout_timeout_lock); - iscsit_start_dataout_timer(cmd, cmd->conn); - spin_unlock_bh(&cmd->dataout_timeout_lock); - } + if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + int rc; - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + rc = iscsit_dump_data_payload(cmd->conn, + cmd->first_burst_len, 1); + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); + return rc; + } else if (cmd->unsolicited_data) + iscsit_set_unsoliticed_dataout(cmd); } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { /* @@ -1091,13 +1161,46 @@ after_immediate_data: * CmdSN and issue a retry to plug the sequence. */ cmd->i_state = ISTATE_REMOVE; - iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); + iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state); } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ return -1; return 0; } +static int +iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) +{ + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; + int rc, immed_data; + bool dump_payload = false; + + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); + if (rc < 0) + return 0; + /* + * Allocation iovecs needed for struct socket operations for + * traditional iSCSI block I/O. + */ + if (iscsit_allocate_iovecs(cmd) < 0) { + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + immed_data = cmd->immediate_data; + + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) + return rc; + else if (rc > 0) + dump_payload = true; + + if (!immed_data) + return 0; + + return iscsit_get_immediate_data(cmd, hdr, dump_payload); +} + static u32 iscsit_do_crypto_hash_sg( struct hash_desc *hash, struct iscsi_cmd *cmd, @@ -1140,7 +1243,7 @@ static u32 iscsit_do_crypto_hash_sg( static void iscsit_do_crypto_hash_buf( struct hash_desc *hash, - unsigned char *buf, + const void *buf, u32 payload_length, u32 padding, u8 *pad_bytes, @@ -1160,43 +1263,30 @@ static void iscsit_do_crypto_hash_buf( crypto_hash_final(hash, data_crc); } -static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) +int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, + struct iscsi_cmd **out_cmd) { - int iov_ret, ooo_cmdsn = 0, ret; - u8 data_crc_failed = 0; - u32 checksum, iov_count = 0, padding = 0, rx_got = 0; - u32 rx_size = 0, payload_length; + struct iscsi_data *hdr = (struct iscsi_data *)buf; struct iscsi_cmd *cmd = NULL; struct se_cmd *se_cmd; - struct iscsi_data *hdr; - struct kvec *iov; - unsigned long flags; - - hdr = (struct iscsi_data *) buf; - payload_length = ntoh24(hdr->dlength); + u32 payload_length = ntoh24(hdr->dlength); + int rc; if (!payload_length) { - pr_err("DataOUT payload is ZERO, protocol error.\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + pr_warn("DataOUT payload is ZERO, ignoring.\n"); + return 0; } /* iSCSI write */ - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rx_data_octets += payload_length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->write_bytes += payload_length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(payload_length, &conn->sess->rx_data_octets); if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { pr_err("DataSegmentLength: %u is greater than" " MaxXmitDataSegmentLength: %u\n", payload_length, conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); } cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, @@ -1206,7 +1296,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", - hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, + hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), payload_length, conn->cid); if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { @@ -1219,8 +1309,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) if (cmd->data_direction != DMA_TO_DEVICE) { pr_err("Command ITT: 0x%08x received DataOUT for a" " NON-WRITE command.\n", cmd->init_task_tag); - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + return iscsit_dump_data_payload(conn, payload_length, 1); } se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); @@ -1229,8 +1318,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) pr_err("DataOut Offset: %u, Length %u greater than" " iSCSI Command EDTL %u, protocol error.\n", hdr->offset, payload_length, cmd->se_cmd.data_length); - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, - 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf); } if (cmd->unsolicited_data) { @@ -1250,14 +1338,9 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) */ /* Something's amiss if we're not in WRITE_PENDING state... */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - - spin_lock_irqsave(&se_cmd->t_state_lock, flags); if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE)) dump_unsolicited_data = 1; - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); if (dump_unsolicited_data) { /* @@ -1298,12 +1381,26 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and * within-command recovery checks before receiving the payload. */ - ret = iscsit_check_pre_dataout(cmd, buf); - if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) + rc = iscsit_check_pre_dataout(cmd, buf); + if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY) return 0; - else if (ret == DATAOUT_CANNOT_RECOVER) + else if (rc == DATAOUT_CANNOT_RECOVER) return -1; + *out_cmd = cmd; + return 0; +} +EXPORT_SYMBOL(iscsit_check_dataout_hdr); + +static int +iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_data *hdr) +{ + struct kvec *iov; + u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0; + u32 payload_length = ntoh24(hdr->dlength); + int iov_ret, data_crc_failed = 0; + rx_size += payload_length; iov = &cmd->iov_data[0]; @@ -1356,17 +1453,27 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) payload_length); } } + + return data_crc_failed; +} + +int +iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr, + bool data_crc_failed) +{ + struct iscsi_conn *conn = cmd->conn; + int rc, ooo_cmdsn; /* * Increment post receive data and CRC values or perform * within-command recovery. */ - ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); - if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) + rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed); + if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)) return 0; - else if (ret == DATAOUT_SEND_R2T) { + else if (rc == DATAOUT_SEND_R2T) { iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, conn, false); - } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); + } else if (rc == DATAOUT_SEND_TO_TRANSPORT) { /* * Handle extra special case for out of order * Unsolicited Data Out. @@ -1387,26 +1494,54 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) return 0; } +EXPORT_SYMBOL(iscsit_check_dataout_payload); -static int iscsit_handle_nop_out( - struct iscsi_conn *conn, - unsigned char *buf) +static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) { - unsigned char *ping_data = NULL; - int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; - u32 checksum, data_crc, padding = 0, payload_length; struct iscsi_cmd *cmd = NULL; - struct kvec *iov = NULL; - struct iscsi_nopout *hdr; + struct iscsi_data *hdr = (struct iscsi_data *)buf; + int rc; + bool data_crc_failed = false; - hdr = (struct iscsi_nopout *) buf; - payload_length = ntoh24(hdr->dlength); + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); + if (rc < 0) + return 0; + else if (!cmd) + return 0; + + rc = iscsit_get_dataout(conn, cmd, hdr); + if (rc < 0) + return rc; + else if (rc > 0) + data_crc_failed = true; + + return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed); +} + +int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_nopout *hdr) +{ + u32 payload_length = ntoh24(hdr->dlength); + + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { + pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { pr_err("NOPOUT ITT is reserved, but Immediate Bit is" " not set, protocol error.\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); } if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { @@ -1414,11 +1549,15 @@ static int iscsit_handle_nop_out( " greater than MaxXmitDataSegmentLength: %u, protocol" " error.\n", payload_length, conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); } - pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," + pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x," " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", hdr->itt == RESERVED_ITT ? "Response" : "Request", hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, @@ -1431,12 +1570,6 @@ static int iscsit_handle_nop_out( * can contain ping data. */ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return iscsit_add_reject( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, buf, conn); - cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; cmd->i_state = ISTATE_SEND_NOPIN; cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? @@ -1448,8 +1581,87 @@ static int iscsit_handle_nop_out( cmd->data_direction = DMA_NONE; } + return 0; +} +EXPORT_SYMBOL(iscsit_setup_nop_out); + +int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_nopout *hdr) +{ + struct iscsi_cmd *cmd_p = NULL; + int cmdsn_ret = 0; + /* + * Initiator is expecting a NopIN ping reply.. + */ + if (hdr->itt != RESERVED_ITT) { + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + if (hdr->opcode & ISCSI_OP_IMMEDIATE) { + iscsit_add_cmd_to_response_queue(cmd, conn, + cmd->i_state); + return 0; + } + + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + return 0; + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + return 0; + } + /* + * This was a response to a unsolicited NOPIN ping. + */ + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); + if (!cmd_p) + return -EINVAL; + + iscsit_stop_nopin_response_timer(conn); + + cmd_p->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state); + + iscsit_start_nopin_timer(conn); + return 0; + } + /* + * Otherwise, initiator is not expecting a NOPIN is response. + * Just ignore for now. + */ + return 0; +} +EXPORT_SYMBOL(iscsit_process_nop_out); + +static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) +{ + unsigned char *ping_data = NULL; + struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; + struct kvec *iov = NULL; + u32 payload_length = ntoh24(hdr->dlength); + int ret; + + ret = iscsit_setup_nop_out(conn, cmd, hdr); + if (ret < 0) + return 0; + /* + * Handle NOP-OUT payload for traditional iSCSI sockets + */ if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { - rx_size = payload_length; + u32 checksum, data_crc, padding = 0; + int niov = 0, rx_got, rx_size = payload_length; + ping_data = kzalloc(payload_length + 1, GFP_KERNEL); if (!ping_data) { pr_err("Unable to allocate memory for" @@ -1528,86 +1740,24 @@ static int iscsit_handle_nop_out( pr_debug("Ping Data: \"%s\"\n", ping_data); } - if (hdr->itt != RESERVED_ITT) { - if (!cmd) { - pr_err("Checking CmdSN for NOPOUT," - " but cmd is NULL!\n"); - return -1; - } - /* - * Initiator is expecting a NopIN ping reply, - */ - spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); - spin_unlock_bh(&conn->cmd_lock); - - iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); - - if (hdr->opcode & ISCSI_OP_IMMEDIATE) { - iscsit_add_cmd_to_response_queue(cmd, conn, - cmd->i_state); - return 0; - } - - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { - ret = 0; - goto ping_out; - } - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); - - return 0; - } - - if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { - /* - * This was a response to a unsolicited NOPIN ping. - */ - cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); - if (!cmd) - return -1; - - iscsit_stop_nopin_response_timer(conn); - - cmd->i_state = ISTATE_REMOVE; - iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); - iscsit_start_nopin_timer(conn); - } else { - /* - * Initiator is not expecting a NOPIN is response. - * Just ignore for now. - * - * iSCSI v19-91 10.18 - * "A NOP-OUT may also be used to confirm a changed - * ExpStatSN if another PDU will not be available - * for a long time." - */ - ret = 0; - goto out; - } - - return 0; + return iscsit_process_nop_out(conn, cmd, hdr); out: if (cmd) - iscsit_release_cmd(cmd); -ping_out: + iscsit_free_cmd(cmd, false); + kfree(ping_data); return ret; } -static int iscsit_handle_task_mgt_cmd( - struct iscsi_conn *conn, - unsigned char *buf) +int +iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) { - struct iscsi_cmd *cmd; struct se_tmr_req *se_tmr; struct iscsi_tmr_req *tmr_req; struct iscsi_tm *hdr; - int out_of_order_cmdsn = 0; - int ret; + int out_of_order_cmdsn = 0, ret; + bool sess_ref = false; u8 function; hdr = (struct iscsi_tm *) buf; @@ -1631,27 +1781,22 @@ static int iscsit_handle_task_mgt_cmd( pr_err("Task Management Request TASK_REASSIGN not" " issued as immediate command, bad iSCSI Initiator" "implementation\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } if ((function != ISCSI_TM_FUNC_ABORT_TASK) && be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, buf, conn); - cmd->data_direction = DMA_NONE; cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); if (!cmd->tmr_req) { pr_err("Unable to allocate memory for" " Task Management command!\n"); - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + buf); } /* @@ -1668,6 +1813,9 @@ static int iscsit_handle_task_mgt_cmd( conn->sess->se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, cmd->sense_buffer + 2); + target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); + sess_ref = true; + switch (function) { case ISCSI_TM_FUNC_ABORT_TASK: tcm_function = TMR_ABORT_TASK; @@ -1693,17 +1841,15 @@ static int iscsit_handle_task_mgt_cmd( default: pr_err("Unknown iSCSI TMR Function:" " 0x%02x\n", function); - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL); if (ret < 0) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; } @@ -1762,9 +1908,8 @@ static int iscsit_handle_task_mgt_cmd( break; if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_INVALID, 1, 1, - buf, cmd); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); break; default: pr_err("Unknown TMR function: 0x%02x, protocol" @@ -1782,15 +1927,13 @@ attach: spin_unlock_bh(&conn->cmd_lock); if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { - int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); + int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) out_of_order_cmdsn = 1; else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) return 0; else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + return -1; } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); @@ -1810,50 +1953,142 @@ attach: * For connection recovery, this is also the default action for * TMR TASK_REASSIGN. */ + if (sess_ref) { + pr_debug("Handle TMR, using sess_ref=true check\n"); + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); + } + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); return 0; } +EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); /* #warning FIXME: Support Text Command parameters besides SendTargets */ -static int iscsit_handle_text_cmd( - struct iscsi_conn *conn, - unsigned char *buf) +int +iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_text *hdr) { - char *text_ptr, *text_in; - int cmdsn_ret, niov = 0, rx_got, rx_size; - u32 checksum = 0, data_crc = 0, payload_length; - u32 padding = 0, pad_bytes = 0, text_length = 0; - struct iscsi_cmd *cmd; - struct kvec iov[3]; - struct iscsi_text *hdr; - - hdr = (struct iscsi_text *) buf; - payload_length = ntoh24(hdr->dlength); + u32 payload_length = ntoh24(hdr->dlength); if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { pr_err("Unable to accept text parameter length: %u" "greater than MaxXmitDataSegmentLength %u.\n", payload_length, conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || + (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { + pr_err("Multi sequence text commands currently not supported\n"); + return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, + (unsigned char *)hdr); } pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, hdr->exp_statsn, payload_length); - rx_size = text_length = payload_length; - if (text_length) { - text_in = kzalloc(text_length, GFP_KERNEL); + cmd->iscsi_opcode = ISCSI_OP_TEXT; + cmd->i_state = ISTATE_SEND_TEXTRSP; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); + conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->data_direction = DMA_NONE; + + return 0; +} +EXPORT_SYMBOL(iscsit_setup_text_cmd); + +int +iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_text *hdr) +{ + unsigned char *text_in = cmd->text_in_ptr, *text_ptr; + int cmdsn_ret; + + if (!text_in) { + pr_err("Unable to locate text_in buffer for sendtargets" + " discovery\n"); + goto reject; + } + if (strncmp("SendTargets", text_in, 11) != 0) { + pr_err("Received Text Data that is not" + " SendTargets, cannot continue.\n"); + goto reject; + } + text_ptr = strchr(text_in, '='); + if (!text_ptr) { + pr_err("No \"=\" separator found in Text Data," + " cannot continue.\n"); + goto reject; + } + if (!strncmp("=All", text_ptr, 4)) { + cmd->cmd_flags |= IFC_SENDTARGETS_ALL; + } else if (!strncmp("=iqn.", text_ptr, 5) || + !strncmp("=eui.", text_ptr, 5)) { + cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; + } else { + pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); + goto reject; + } + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + return 0; + } + + return iscsit_execute_cmd(cmd, 0); + +reject: + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); +} +EXPORT_SYMBOL(iscsit_process_text_cmd); + +static int +iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) +{ + struct iscsi_text *hdr = (struct iscsi_text *)buf; + char *text_in = NULL; + u32 payload_length = ntoh24(hdr->dlength); + int rx_size, rc; + + rc = iscsit_setup_text_cmd(conn, cmd, hdr); + if (rc < 0) + return 0; + + rx_size = payload_length; + if (payload_length) { + u32 checksum = 0, data_crc = 0; + u32 padding = 0, pad_bytes = 0; + int niov = 0, rx_got; + struct kvec iov[3]; + + text_in = kzalloc(payload_length, GFP_KERNEL); if (!text_in) { pr_err("Unable to allocate memory for" " incoming text parameters\n"); - return -1; + goto reject; } + cmd->text_in_ptr = text_in; memset(iov, 0, 3 * sizeof(struct kvec)); iov[niov].iov_base = text_in; - iov[niov++].iov_len = text_length; + iov[niov++].iov_len = payload_length; padding = ((-payload_length) & 3); if (padding != 0) { @@ -1870,14 +2105,12 @@ static int iscsit_handle_text_cmd( } rx_got = rx_data(conn, &iov[0], niov, rx_size); - if (rx_got != rx_size) { - kfree(text_in); - return -1; - } + if (rx_got != rx_size) + goto reject; if (conn->conn_ops->DataDigest) { iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, - text_in, text_length, + text_in, payload_length, padding, (u8 *)&pad_bytes, (u8 *)&data_crc); @@ -1889,8 +2122,7 @@ static int iscsit_handle_text_cmd( pr_err("Unable to recover from" " Text Data digest failure while in" " ERL=0.\n"); - kfree(text_in); - return -1; + goto reject; } else { /* * Silently drop this PDU and let the @@ -1905,68 +2137,22 @@ static int iscsit_handle_text_cmd( } else { pr_debug("Got CRC32C DataDigest" " 0x%08x for %u bytes of text data.\n", - checksum, text_length); + checksum, payload_length); } } - text_in[text_length - 1] = '\0'; + text_in[payload_length - 1] = '\0'; pr_debug("Successfully read %d bytes of text" - " data.\n", text_length); - - if (strncmp("SendTargets", text_in, 11) != 0) { - pr_err("Received Text Data that is not" - " SendTargets, cannot continue.\n"); - kfree(text_in); - return -1; - } - text_ptr = strchr(text_in, '='); - if (!text_ptr) { - pr_err("No \"=\" separator found in Text Data," - " cannot continue.\n"); - kfree(text_in); - return -1; - } - if (strncmp("=All", text_ptr, 4) != 0) { - pr_err("Unable to locate All value for" - " SendTargets key, cannot continue.\n"); - kfree(text_in); - return -1; - } -/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */ - kfree(text_in); + " data.\n", payload_length); } - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, buf, conn); - - cmd->iscsi_opcode = ISCSI_OP_TEXT; - cmd->i_state = ISTATE_SEND_TEXTRSP; - cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); - conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; - cmd->targ_xfer_tag = 0xFFFFFFFF; - cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); - cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); - cmd->data_direction = DMA_NONE; - - spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); - spin_unlock_bh(&conn->cmd_lock); - - iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); - - if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); - - return 0; - } + return iscsit_process_text_cmd(conn, cmd, hdr); - return iscsit_execute_cmd(cmd, 0); +reject: + kfree(cmd->text_in_ptr); + cmd->text_in_ptr = NULL; + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); } +EXPORT_SYMBOL(iscsit_handle_text_cmd); int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { @@ -2075,13 +2261,12 @@ int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn return 0; } -static int iscsit_handle_logout_cmd( - struct iscsi_conn *conn, - unsigned char *buf) +int +iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf) { int cmdsn_ret, logout_remove = 0; u8 reason_code = 0; - struct iscsi_cmd *cmd; struct iscsi_logout *hdr; struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); @@ -2105,14 +2290,10 @@ static int iscsit_handle_logout_cmd( if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { pr_err("Received logout request on connection that" " is not in logged in state, ignoring request.\n"); + iscsit_free_cmd(cmd, false); return 0; } - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, - buf, conn); - cmd->iscsi_opcode = ISCSI_OP_LOGOUT; cmd->i_state = ISTATE_SEND_LOGOUTRSP; cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); @@ -2150,18 +2331,16 @@ static int iscsit_handle_logout_cmd( if (ret < 0) return ret; } else { - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) logout_remove = 0; - } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); - } + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; } return logout_remove; } +EXPORT_SYMBOL(iscsit_handle_logout_cmd); static int iscsit_handle_snack( struct iscsi_conn *conn, @@ -2180,8 +2359,8 @@ static int iscsit_handle_snack( if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Initiator sent SNACK request while in" " ErrorRecoveryLevel=0.\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); } /* * SNACK_DATA and SNACK_R2T are both 0, so check which function to @@ -2205,13 +2384,13 @@ static int iscsit_handle_snack( case ISCSI_FLAG_SNACK_TYPE_RDATA: /* FIXME: Support R-Data SNACK */ pr_err("R-Data SNACK Not Supported.\n"); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); default: pr_err("Unknown SNACK type 0x%02x, protocol" " error.\n", hdr->flags & 0x0f); - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buf, conn); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); } return 0; @@ -2229,7 +2408,7 @@ static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) static int iscsit_handle_immediate_data( struct iscsi_cmd *cmd, - unsigned char *buf, + struct iscsi_scsi_req *hdr, u32 length) { int iov_ret, rx_got = 0, rx_size = 0; @@ -2283,14 +2462,14 @@ static int iscsit_handle_immediate_data( pr_err("Unable to recover from" " Immediate Data digest failure while" " in ERL=0.\n"); - iscsit_add_reject_from_cmd( + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, - 1, 0, buf, cmd); + (unsigned char *)hdr); return IMMEDIATE_DATA_CANNOT_RECOVER; } else { - iscsit_add_reject_from_cmd( + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, - 0, 0, buf, cmd); + (unsigned char *)hdr); return IMMEDIATE_DATA_ERL1_CRC_FAILURE; } } else { @@ -2321,6 +2500,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) { struct iscsi_cmd *cmd; struct iscsi_conn *conn_p; + bool found = false; /* * Only send a Asynchronous Message on connections whos network @@ -2329,14 +2509,15 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { iscsit_inc_conn_usage_count(conn_p); + found = true; break; } } - if (!conn_p) + if (!found) return; - cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); if (!cmd) { iscsit_dec_conn_usage_count(conn_p); return; @@ -2381,9 +2562,8 @@ static int iscsit_send_conn_drop_async_message( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2410,18 +2590,60 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) } } -static int iscsit_send_data_in( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +static void +iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, + bool set_statsn) { - int iov_ret = 0, set_statsn = 0; - u32 iov_count = 0, tx_size = 0; + hdr->opcode = ISCSI_OP_SCSI_DATA_IN; + hdr->flags = datain->flags; + if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { + if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } + } + hton24(hdr->dlength, datain->length); + if (hdr->flags & ISCSI_FLAG_DATA_ACK) + int_to_scsilun(cmd->se_cmd.orig_fe_lun, + (struct scsi_lun *)&hdr->lun); + else + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); + + hdr->itt = cmd->init_task_tag; + + if (hdr->flags & ISCSI_FLAG_DATA_ACK) + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + else + hdr->ttt = cpu_to_be32(0xFFFFFFFF); + if (set_statsn) + hdr->statsn = cpu_to_be32(cmd->stat_sn); + else + hdr->statsn = cpu_to_be32(0xFFFFFFFF); + + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + hdr->datasn = cpu_to_be32(datain->data_sn); + hdr->offset = cpu_to_be32(datain->offset); + + pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," + " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", + cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), + ntohl(hdr->offset), datain->length, conn->cid); +} + +static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; struct iscsi_datain datain; struct iscsi_datain_req *dr; - struct iscsi_data_rsp *hdr; struct kvec *iov; - int eodr = 0; - int ret; + u32 iov_count = 0, tx_size = 0; + int eodr = 0, ret, iov_ret; + bool set_statsn = false; memset(&datain, 0, sizeof(struct iscsi_datain)); dr = iscsit_get_datain_values(cmd, &datain); @@ -2430,7 +2652,6 @@ static int iscsit_send_data_in( cmd->init_task_tag); return -1; } - /* * Be paranoid and double check the logic for now. */ @@ -2438,18 +2659,11 @@ static int iscsit_send_data_in( pr_err("Command ITT: 0x%08x, datain.offset: %u and" " datain.length: %u exceeds cmd->data_length: %u\n", cmd->init_task_tag, datain.offset, datain.length, - cmd->se_cmd.data_length); + cmd->se_cmd.data_length); return -1; } - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->tx_data_octets += datain.length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->read_bytes += datain.length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(datain.length, &conn->sess->tx_data_octets); /* * Special case for successfully execution w/ both DATAIN * and Sense Data. @@ -2462,47 +2676,13 @@ static int iscsit_send_data_in( (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { iscsit_increment_maxcmdsn(cmd, conn->sess); cmd->stat_sn = conn->stat_sn++; - set_statsn = 1; + set_statsn = true; } else if (dr->dr_complete == - DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) - set_statsn = 1; + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) + set_statsn = true; } - hdr = (struct iscsi_data_rsp *) cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); - hdr->opcode = ISCSI_OP_SCSI_DATA_IN; - hdr->flags = datain.flags; - if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { - if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { - hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; - hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); - } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { - hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; - hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); - } - } - hton24(hdr->dlength, datain.length); - if (hdr->flags & ISCSI_FLAG_DATA_ACK) - int_to_scsilun(cmd->se_cmd.orig_fe_lun, - (struct scsi_lun *)&hdr->lun); - else - put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); - - hdr->itt = cmd->init_task_tag; - - if (hdr->flags & ISCSI_FLAG_DATA_ACK) - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); - else - hdr->ttt = cpu_to_be32(0xFFFFFFFF); - if (set_statsn) - hdr->statsn = cpu_to_be32(cmd->stat_sn); - else - hdr->statsn = cpu_to_be32(0xFFFFFFFF); - - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); - hdr->datasn = cpu_to_be32(datain.data_sn); - hdr->offset = cpu_to_be32(datain.offset); + iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); iov = &cmd->iov_data[0]; iov[iov_count].iov_base = cmd->pdu; @@ -2512,9 +2692,8 @@ static int iscsit_send_data_in( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2523,7 +2702,8 @@ static int iscsit_send_data_in( " for DataIN PDU 0x%08x\n", *header_digest); } - iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); + iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], + datain.offset, datain.length); if (iov_ret < 0) return -1; @@ -2554,11 +2734,6 @@ static int iscsit_send_data_in( cmd->iov_data_count = iov_count; cmd->tx_size = tx_size; - pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," - " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", - cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), - ntohl(hdr->offset), datain.length, conn->cid); - /* sendpage is preferred but can't insert markers */ if (!conn->conn_ops->IFMarker) ret = iscsit_fe_sendpage_sg(cmd, conn); @@ -2581,16 +2756,13 @@ static int iscsit_send_data_in( return eodr; } -static int iscsit_send_logout_response( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +int +iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_logout_rsp *hdr) { - int niov = 0, tx_size; struct iscsi_conn *logout_conn = NULL; struct iscsi_conn_recovery *cr = NULL; struct iscsi_session *sess = conn->sess; - struct kvec *iov; - struct iscsi_logout_rsp *hdr; /* * The actual shutting down of Sessions and/or Connections * for CLOSESESSION and CLOSECONNECTION Logout Requests @@ -2659,9 +2831,6 @@ static int iscsit_send_logout_response( return -1; } - tx_size = ISCSI_HDR_LEN; - hdr = (struct iscsi_logout_rsp *)cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_LOGOUT_RSP; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->response = cmd->logout_response; @@ -2673,6 +2842,27 @@ static int iscsit_send_logout_response( hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + pr_debug("Built Logout Response ITT: 0x%08x StatSN:" + " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, hdr->response, + cmd->logout_cid, conn->cid); + + return 0; +} +EXPORT_SYMBOL(iscsit_build_logout_rsp); + +static int +iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct kvec *iov; + int niov = 0, tx_size, rc; + + rc = iscsit_build_logout_rsp(cmd, conn, + (struct iscsi_logout_rsp *)&cmd->pdu[0]); + if (rc < 0) + return rc; + + tx_size = ISCSI_HDR_LEN; iov = &cmd->iov_misc[0]; iov[niov].iov_base = cmd->pdu; iov[niov++].iov_len = ISCSI_HDR_LEN; @@ -2680,9 +2870,8 @@ static int iscsit_send_logout_response( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2692,14 +2881,37 @@ static int iscsit_send_logout_response( cmd->iov_misc_count = niov; cmd->tx_size = tx_size; - pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" - " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", - cmd->init_task_tag, cmd->stat_sn, hdr->response, - cmd->logout_cid, conn->cid); - return 0; } +void +iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_nopin *hdr, bool nopout_response) +{ + hdr->opcode = ISCSI_OP_NOOP_IN; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hton24(hdr->dlength, cmd->buf_ptr_size); + if (nopout_response) + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); + hdr->itt = cmd->init_task_tag; + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + cmd->stat_sn = (nopout_response) ? conn->stat_sn++ : + conn->stat_sn; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + if (nopout_response) + iscsit_increment_maxcmdsn(cmd, conn->sess); + + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + + pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," + " StatSN: 0x%08x, Length %u\n", (nopout_response) ? + "Solicitied" : "Unsolicitied", cmd->init_task_tag, + cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); +} +EXPORT_SYMBOL(iscsit_build_nopin_rsp); + /* * Unsolicited NOPIN, either requesting a response or not. */ @@ -2708,27 +2920,16 @@ static int iscsit_send_unsolicited_nopin( struct iscsi_conn *conn, int want_response) { - int tx_size = ISCSI_HDR_LEN; - struct iscsi_nopin *hdr; - int ret; + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; + int tx_size = ISCSI_HDR_LEN, ret; - hdr = (struct iscsi_nopin *) cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); - hdr->opcode = ISCSI_OP_NOOP_IN; - hdr->flags |= ISCSI_FLAG_CMD_FINAL; - hdr->itt = cmd->init_task_tag; - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); - cmd->stat_sn = conn->stat_sn; - hdr->statsn = cpu_to_be32(cmd->stat_sn); - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + iscsit_build_nopin_rsp(cmd, conn, hdr, false); if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2757,31 +2958,17 @@ static int iscsit_send_unsolicited_nopin( return 0; } -static int iscsit_send_nopin_response( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +static int +iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - int niov = 0, tx_size; - u32 padding = 0; + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; struct kvec *iov; - struct iscsi_nopin *hdr; - - tx_size = ISCSI_HDR_LEN; - hdr = (struct iscsi_nopin *) cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); - hdr->opcode = ISCSI_OP_NOOP_IN; - hdr->flags |= ISCSI_FLAG_CMD_FINAL; - hton24(hdr->dlength, cmd->buf_ptr_size); - put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); - hdr->itt = cmd->init_task_tag; - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); - cmd->stat_sn = conn->stat_sn++; - hdr->statsn = cpu_to_be32(cmd->stat_sn); + u32 padding = 0; + int niov = 0, tx_size; - iscsit_increment_maxcmdsn(cmd, conn->sess); - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + iscsit_build_nopin_rsp(cmd, conn, hdr, true); + tx_size = ISCSI_HDR_LEN; iov = &cmd->iov_misc[0]; iov[niov].iov_base = cmd->pdu; iov[niov++].iov_len = ISCSI_HDR_LEN; @@ -2789,9 +2976,8 @@ static int iscsit_send_nopin_response( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2837,10 +3023,6 @@ static int iscsit_send_nopin_response( cmd->iov_misc_count = niov; cmd->tx_size = tx_size; - pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" - " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, - cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); - return 0; } @@ -2884,9 +3066,8 @@ static int iscsit_send_r2t( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2925,8 +3106,8 @@ static int iscsit_send_r2t( * connection recovery. */ int iscsit_build_r2ts_for_cmd( - struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_cmd *cmd, bool recovery) { int first_r2t = 1; @@ -3001,24 +3182,14 @@ int iscsit_build_r2ts_for_cmd( return 0; } -static int iscsit_send_status( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) { - u8 iov_count = 0, recovery; - u32 padding = 0, tx_size = 0; - struct iscsi_scsi_rsp *hdr; - struct kvec *iov; - - recovery = (cmd->i_state != ISTATE_SEND_STATUS); - if (!recovery) + if (inc_stat_sn) cmd->stat_sn = conn->stat_sn++; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rsp_pdus++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->rsp_pdus); - hdr = (struct iscsi_scsi_rsp *) cmd->pdu; memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; hdr->flags |= ISCSI_FLAG_CMD_FINAL; @@ -3038,6 +3209,23 @@ static int iscsit_send_status( hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," + " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status, + cmd->se_cmd.scsi_status, conn->cid); +} +EXPORT_SYMBOL(iscsit_build_rsp_pdu); + +static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; + struct kvec *iov; + u32 padding = 0, tx_size = 0; + int iov_count = 0; + bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); + + iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); + iov = &cmd->iov_misc[0]; iov[iov_count].iov_base = cmd->pdu; iov[iov_count++].iov_len = ISCSI_HDR_LEN; @@ -3091,9 +3279,8 @@ static int iscsit_send_status( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3104,11 +3291,6 @@ static int iscsit_send_status( cmd->iov_misc_count = iov_count; cmd->tx_size = tx_size; - pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," - " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", - (!recovery) ? "" : "Recovery ", cmd->init_task_tag, - cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); - return 0; } @@ -3123,24 +3305,18 @@ static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) return ISCSI_TMF_RSP_NO_LUN; case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: return ISCSI_TMF_RSP_NOT_SUPPORTED; - case TMR_FUNCTION_AUTHORIZATION_FAILED: - return ISCSI_TMF_RSP_AUTH_FAILED; case TMR_FUNCTION_REJECTED: default: return ISCSI_TMF_RSP_REJECTED; } } -static int iscsit_send_task_mgt_rsp( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +void +iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_tm_rsp *hdr) { struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; - struct iscsi_tm_rsp *hdr; - u32 tx_size = 0; - hdr = (struct iscsi_tm_rsp *) cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); @@ -3152,6 +3328,20 @@ static int iscsit_send_task_mgt_rsp( hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + pr_debug("Built Task Management Response ITT: 0x%08x," + " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); +} +EXPORT_SYMBOL(iscsit_build_task_mgt_rsp); + +static int +iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; + u32 tx_size = 0; + + iscsit_build_task_mgt_rsp(cmd, conn, hdr); + cmd->iov_misc[0].iov_base = cmd->pdu; cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; tx_size += ISCSI_HDR_LEN; @@ -3159,9 +3349,8 @@ static int iscsit_send_task_mgt_rsp( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3172,10 +3361,6 @@ static int iscsit_send_task_mgt_rsp( cmd->iov_misc_count = 1; cmd->tx_size = tx_size; - pr_debug("Built Task Management Response ITT: 0x%08x," - " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", - cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); - return 0; } @@ -3205,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) #define SENDTARGETS_BUF_LIMIT 32768U -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) +static int +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, + enum iscsit_transport_type network_transport) { char *payload = NULL; struct iscsi_conn *conn = cmd->conn; @@ -3213,7 +3400,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_tiqn *tiqn; struct iscsi_tpg_np *tpg_np; int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; + int target_name_printed; unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ + unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, SENDTARGETS_BUF_LIMIT); @@ -3224,22 +3413,48 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) " response.\n"); return -ENOMEM; } + /* + * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE + * explicit case.. + */ + if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { + text_ptr = strchr(text_in, '='); + if (!text_ptr) { + pr_err("Unable to locate '=' string in text_in:" + " %s\n", text_in); + kfree(payload); + return -EINVAL; + } + /* + * Skip over '=' character.. + */ + text_ptr += 1; + } spin_lock(&tiqn_lock); list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { - len = sprintf(buf, "TargetName=%s", tiqn->tiqn); - len += 1; - - if ((len + payload_len) > buffer_len) { - end_of_buf = 1; - goto eob; + if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && + strcmp(tiqn->tiqn, text_ptr)) { + continue; } - memcpy(payload + payload_len, buf, len); - payload_len += len; + + target_name_printed = 0; spin_lock(&tiqn->tiqn_tpg_lock); list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + /* If demo_mode_discovery=0 and generate_node_acls=0 + * (demo mode dislabed) do not return + * TargetName+TargetAddress unless a NodeACL exists. + */ + + if ((tpg->tpg_attrib.generate_node_acls == 0) && + (tpg->tpg_attrib.demo_mode_discovery == 0) && + (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, + cmd->conn->sess->sess_ops->InitiatorName))) { + continue; + } + spin_lock(&tpg->tpg_state_lock); if ((tpg->tpg_state == TPG_STATE_FREE) || (tpg->tpg_state == TPG_STATE_INACTIVE)) { @@ -3254,14 +3469,29 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (np->np_network_transport != network_transport) + continue; + + if (!target_name_printed) { + len = sprintf(buf, "TargetName=%s", + tiqn->tiqn); + len += 1; + + if ((len + payload_len) > buffer_len) { + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + end_of_buf = 1; + goto eob; + } + memcpy(payload + payload_len, buf, len); + payload_len += len; + target_name_printed = 1; + } + len = sprintf(buf, "TargetAddress=" - "%s%s%s:%hu,%hu", - (np->np_sockaddr.ss_family == AF_INET6) ? - "[" : "", (inaddr_any == false) ? - np->np_ip : conn->local_ip, - (np->np_sockaddr.ss_family == AF_INET6) ? - "]" : "", (inaddr_any == false) ? - np->np_port : conn->local_port, + "%s:%hu,%hu", + inaddr_any ? conn->local_ip : np->np_ip, + inaddr_any ? conn->local_port : np->np_port, tpg->tpgt); len += 1; @@ -3280,6 +3510,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) eob: if (end_of_buf) break; + + if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) + break; } spin_unlock(&tiqn_lock); @@ -3288,6 +3521,38 @@ eob: return payload_len; } +int +iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_text_rsp *hdr, + enum iscsit_transport_type network_transport) +{ + int text_length, padding; + + text_length = iscsit_build_sendtargets_response(cmd, network_transport); + if (text_length < 0) + return text_length; + + hdr->opcode = ISCSI_OP_TEXT_RSP; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + padding = ((-text_length) & 3); + hton24(hdr->dlength, text_length); + hdr->itt = cmd->init_task_tag; + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + + pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," + " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, + text_length, conn->cid); + + return text_length + padding; +} +EXPORT_SYMBOL(iscsit_build_text_rsp); + /* * FIXME: Add support for F_BIT and C_BIT when the length is longer than * MaxRecvDataSegmentLength. @@ -3296,51 +3561,29 @@ static int iscsit_send_text_rsp( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - struct iscsi_text_rsp *hdr; + struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; struct kvec *iov; - u32 padding = 0, tx_size = 0; - int text_length, iov_count = 0; - - text_length = iscsit_build_sendtargets_response(cmd); - if (text_length < 0) - return text_length; - - padding = ((-text_length) & 3); - if (padding != 0) { - memset(cmd->buf_ptr + text_length, 0, padding); - pr_debug("Attaching %u additional bytes for" - " padding.\n", padding); - } - - hdr = (struct iscsi_text_rsp *) cmd->pdu; - memset(hdr, 0, ISCSI_HDR_LEN); - hdr->opcode = ISCSI_OP_TEXT_RSP; - hdr->flags |= ISCSI_FLAG_CMD_FINAL; - hton24(hdr->dlength, text_length); - hdr->itt = cmd->init_task_tag; - hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); - cmd->stat_sn = conn->stat_sn++; - hdr->statsn = cpu_to_be32(cmd->stat_sn); + u32 tx_size = 0; + int text_length, iov_count = 0, rc; - iscsit_increment_maxcmdsn(cmd, conn->sess); - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); + if (rc < 0) + return rc; + text_length = rc; iov = &cmd->iov_misc[0]; - iov[iov_count].iov_base = cmd->pdu; iov[iov_count++].iov_len = ISCSI_HDR_LEN; iov[iov_count].iov_base = cmd->buf_ptr; - iov[iov_count++].iov_len = text_length + padding; + iov[iov_count++].iov_len = text_length; - tx_size += (ISCSI_HDR_LEN + text_length + padding); + tx_size += (ISCSI_HDR_LEN + text_length); if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3350,7 +3593,7 @@ static int iscsit_send_text_rsp( if (conn->conn_ops->DataDigest) { iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - cmd->buf_ptr, (text_length + padding), + cmd->buf_ptr, text_length, 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; @@ -3358,39 +3601,44 @@ static int iscsit_send_text_rsp( tx_size += ISCSI_CRC_LEN; pr_debug("Attaching DataDigest for %u bytes of text" - " data, CRC 0x%08x\n", (text_length + padding), + " data, CRC 0x%08x\n", text_length, cmd->data_crc); } cmd->iov_misc_count = iov_count; cmd->tx_size = tx_size; - pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," - " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, - text_length, conn->cid); return 0; } -static int iscsit_send_reject( - struct iscsi_cmd *cmd, - struct iscsi_conn *conn) +void +iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + struct iscsi_reject *hdr) { - u32 iov_count = 0, tx_size = 0; - struct iscsi_reject *hdr; - struct kvec *iov; - - hdr = (struct iscsi_reject *) cmd->pdu; hdr->opcode = ISCSI_OP_REJECT; + hdr->reason = cmd->reject_reason; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hton24(hdr->dlength, ISCSI_HDR_LEN); hdr->ffffffff = cpu_to_be32(0xffffffff); cmd->stat_sn = conn->stat_sn++; hdr->statsn = cpu_to_be32(cmd->stat_sn); - hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); - hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); - iov = &cmd->iov_misc[0]; +} +EXPORT_SYMBOL(iscsit_build_reject); + +static int iscsit_send_reject( + struct iscsi_cmd *cmd, + struct iscsi_conn *conn) +{ + struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; + struct kvec *iov; + u32 iov_count = 0, tx_size; + + iscsit_build_reject(cmd, conn, hdr); + iov = &cmd->iov_misc[0]; iov[iov_count].iov_base = cmd->pdu; iov[iov_count++].iov_len = ISCSI_HDR_LEN; iov[iov_count].iov_base = cmd->buf_ptr; @@ -3401,9 +3649,8 @@ static int iscsit_send_reject( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3412,9 +3659,8 @@ static int iscsit_send_reject( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)&cmd->data_crc); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; iov[iov_count++].iov_len = ISCSI_CRC_LEN; @@ -3487,55 +3733,41 @@ static inline void iscsit_thread_check_cpumask( set_cpus_allowed_ptr(p, conn->conn_cpumask); } -static int handle_immediate_queue(struct iscsi_conn *conn) +static int +iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { - struct iscsi_queue_req *qr; - struct iscsi_cmd *cmd; - u8 state; int ret; - while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { - atomic_set(&conn->check_immediate_queue, 0); - cmd = qr->cmd; - state = qr->state; - kmem_cache_free(lio_qr_cache, qr); - - switch (state) { - case ISTATE_SEND_R2T: - ret = iscsit_send_r2t(cmd, conn); - if (ret < 0) - goto err; - break; - case ISTATE_REMOVE: - if (cmd->data_direction == DMA_TO_DEVICE) - iscsit_stop_dataout_timer(cmd); - - spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_conn_node); - spin_unlock_bh(&conn->cmd_lock); + switch (state) { + case ISTATE_SEND_R2T: + ret = iscsit_send_r2t(cmd, conn); + if (ret < 0) + goto err; + break; + case ISTATE_REMOVE: + spin_lock_bh(&conn->cmd_lock); + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); - continue; - case ISTATE_SEND_NOPIN_WANT_RESPONSE: - iscsit_mod_nopin_response_timer(conn); - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 1); - if (ret < 0) - goto err; - break; - case ISTATE_SEND_NOPIN_NO_RESPONSE: - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 0); - if (ret < 0) - goto err; - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, state, - conn->cid); + iscsit_free_cmd(cmd, false); + break; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + iscsit_mod_nopin_response_timer(conn); + ret = iscsit_send_unsolicited_nopin(cmd, conn, 1); + if (ret < 0) goto err; - } + break; + case ISTATE_SEND_NOPIN_NO_RESPONSE: + ret = iscsit_send_unsolicited_nopin(cmd, conn, 0); + if (ret < 0) + goto err; + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, state, + conn->cid); + goto err; } return 0; @@ -3544,123 +3776,135 @@ err: return -1; } -static int handle_response_queue(struct iscsi_conn *conn) +static int +iscsit_handle_immediate_queue(struct iscsi_conn *conn) { + struct iscsit_transport *t = conn->conn_transport; struct iscsi_queue_req *qr; struct iscsi_cmd *cmd; u8 state; int ret; - while ((qr = iscsit_get_cmd_from_response_queue(conn))) { + while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { + atomic_set(&conn->check_immediate_queue, 0); cmd = qr->cmd; state = qr->state; kmem_cache_free(lio_qr_cache, qr); -check_rsp_state: - switch (state) { - case ISTATE_SEND_DATAIN: - ret = iscsit_send_data_in(cmd, conn); - if (ret < 0) - goto err; - else if (!ret) - /* more drs */ - goto check_rsp_state; - else if (ret == 1) { - /* all done */ - spin_lock_bh(&cmd->istate_lock); - cmd->i_state = ISTATE_SENT_STATUS; - spin_unlock_bh(&cmd->istate_lock); - continue; - } else if (ret == 2) { - /* Still must send status, - SCF_TRANSPORT_TASK_SENSE was set */ - spin_lock_bh(&cmd->istate_lock); - cmd->i_state = ISTATE_SEND_STATUS; - spin_unlock_bh(&cmd->istate_lock); - state = ISTATE_SEND_STATUS; - goto check_rsp_state; - } + ret = t->iscsit_immediate_queue(conn, cmd, state); + if (ret < 0) + return ret; + } - break; - case ISTATE_SEND_STATUS: - case ISTATE_SEND_STATUS_RECOVERY: - ret = iscsit_send_status(cmd, conn); - break; - case ISTATE_SEND_LOGOUTRSP: - ret = iscsit_send_logout_response(cmd, conn); - break; - case ISTATE_SEND_ASYNCMSG: - ret = iscsit_send_conn_drop_async_message( - cmd, conn); - break; - case ISTATE_SEND_NOPIN: - ret = iscsit_send_nopin_response(cmd, conn); - break; - case ISTATE_SEND_REJECT: - ret = iscsit_send_reject(cmd, conn); - break; - case ISTATE_SEND_TASKMGTRSP: - ret = iscsit_send_task_mgt_rsp(cmd, conn); - if (ret != 0) - break; - ret = iscsit_tmr_post_handler(cmd, conn); - if (ret != 0) - iscsit_fall_back_to_erl0(conn->sess); - break; - case ISTATE_SEND_TEXTRSP: - ret = iscsit_send_text_rsp(cmd, conn); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - state, conn->cid); - goto err; - } + return 0; +} + +static int +iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) +{ + int ret; + +check_rsp_state: + switch (state) { + case ISTATE_SEND_DATAIN: + ret = iscsit_send_datain(cmd, conn); if (ret < 0) goto err; + else if (!ret) + /* more drs */ + goto check_rsp_state; + else if (ret == 1) { + /* all done */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); - if (iscsit_send_tx_data(cmd, conn, 1) < 0) { - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto err; - } - iscsit_unmap_iovec(cmd); + if (atomic_read(&conn->check_immediate_queue)) + return 1; - switch (state) { - case ISTATE_SEND_LOGOUTRSP: - if (!iscsit_logout_post_handler(cmd, conn)) - goto restart; - /* fall through */ - case ISTATE_SEND_STATUS: - case ISTATE_SEND_ASYNCMSG: - case ISTATE_SEND_NOPIN: - case ISTATE_SEND_STATUS_RECOVERY: - case ISTATE_SEND_TEXTRSP: - case ISTATE_SEND_TASKMGTRSP: + return 0; + } else if (ret == 2) { + /* Still must send status, + SCF_TRANSPORT_TASK_SENSE was set */ spin_lock_bh(&cmd->istate_lock); - cmd->i_state = ISTATE_SENT_STATUS; + cmd->i_state = ISTATE_SEND_STATUS; spin_unlock_bh(&cmd->istate_lock); - break; - case ISTATE_SEND_REJECT: - if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { - cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; - complete(&cmd->reject_comp); - goto err; - } - complete(&cmd->reject_comp); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - cmd->i_state, conn->cid); - goto err; + state = ISTATE_SEND_STATUS; + goto check_rsp_state; } - if (atomic_read(&conn->check_immediate_queue)) + break; + case ISTATE_SEND_STATUS: + case ISTATE_SEND_STATUS_RECOVERY: + ret = iscsit_send_response(cmd, conn); + break; + case ISTATE_SEND_LOGOUTRSP: + ret = iscsit_send_logout(cmd, conn); + break; + case ISTATE_SEND_ASYNCMSG: + ret = iscsit_send_conn_drop_async_message( + cmd, conn); + break; + case ISTATE_SEND_NOPIN: + ret = iscsit_send_nopin(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = iscsit_send_reject(cmd, conn); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = iscsit_send_task_mgt_rsp(cmd, conn); + if (ret != 0) break; + ret = iscsit_tmr_post_handler(cmd, conn); + if (ret != 0) + iscsit_fall_back_to_erl0(conn->sess); + break; + case ISTATE_SEND_TEXTRSP: + ret = iscsit_send_text_rsp(cmd, conn); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + state, conn->cid); + goto err; + } + if (ret < 0) + goto err; + + if (iscsit_send_tx_data(cmd, conn, 1) < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + iscsit_unmap_iovec(cmd); + goto err; } + iscsit_unmap_iovec(cmd); + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + if (!iscsit_logout_post_handler(cmd, conn)) + goto restart; + /* fall through */ + case ISTATE_SEND_STATUS: + case ISTATE_SEND_ASYNCMSG: + case ISTATE_SEND_NOPIN: + case ISTATE_SEND_STATUS_RECOVERY: + case ISTATE_SEND_TEXTRSP: + case ISTATE_SEND_TASKMGTRSP: + case ISTATE_SEND_REJECT: + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + cmd->i_state, conn->cid); + goto err; + } + + if (atomic_read(&conn->check_immediate_queue)) + return 1; return 0; @@ -3670,6 +3914,27 @@ restart: return -EAGAIN; } +static int iscsit_handle_response_queue(struct iscsi_conn *conn) +{ + struct iscsit_transport *t = conn->conn_transport; + struct iscsi_queue_req *qr; + struct iscsi_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_response_queue(conn))) { + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + + ret = t->iscsit_response_queue(conn, cmd, state); + if (ret == 1 || ret < 0) + return ret; + } + + return 0; +} + int iscsi_target_tx_thread(void *arg) { int ret = 0; @@ -3703,12 +3968,15 @@ restart: signal_pending(current)) goto transport_err; - ret = handle_immediate_queue(conn); +get_immediate: + ret = iscsit_handle_immediate_queue(conn); if (ret < 0) goto transport_err; - ret = handle_response_queue(conn); - if (ret == -EAGAIN) + ret = iscsit_handle_response_queue(conn); + if (ret == 1) + goto get_immediate; + else if (ret == -EAGAIN) goto restart; else if (ret < 0) goto transport_err; @@ -3721,6 +3989,85 @@ out: return 0; } +static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) +{ + struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf; + struct iscsi_cmd *cmd; + int ret = 0; + + switch (hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_SCSI_CMD: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_scsi_cmd(conn, cmd, buf); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = iscsit_handle_data_out(conn, buf); + break; + case ISCSI_OP_NOOP_OUT: + cmd = NULL; + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + } + ret = iscsit_handle_nop_out(conn, cmd, buf); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); + break; + case ISCSI_OP_TEXT: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_text_cmd(conn, cmd, buf); + break; + case ISCSI_OP_LOGOUT: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_logout_cmd(conn, cmd, buf); + if (ret > 0) + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP * HZ); + break; + case ISCSI_OP_SNACK: + ret = iscsit_handle_snack(conn, buf); + break; + default: + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Cannot recover from unknown" + " opcode while ERL=0, closing iSCSI connection.\n"); + return -1; + } + if (!conn->conn_ops->OFMarker) { + pr_err("Unable to recover from unknown" + " opcode while OFMarker=No, closing iSCSI" + " connection.\n"); + return -1; + } + if (iscsit_recover_from_unknown_opcode(conn) < 0) { + pr_err("Unable to recover from unknown" + " opcode, closing iSCSI connection.\n"); + return -1; + } + break; + } + + return ret; +reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); +} + int iscsi_target_rx_thread(void *arg) { int ret; @@ -3740,6 +4087,18 @@ restart: if (!conn) goto out; + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { + struct completion comp; + int rc; + + init_completion(&comp); + rc = wait_for_completion_interruptible(&comp); + if (rc < 0) + goto transport_err; + + goto out; + } + while (!kthread_should_stop()) { /* * Ensure that both TX and RX per connection kthreads @@ -3759,11 +4118,6 @@ restart: goto transport_err; } - /* - * Set conn->bad_hdr for use with REJECT PDUs. - */ - memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN); - if (conn->conn_ops->HeaderDigest) { iov.iov_base = &digest; iov.iov_len = ISCSI_CRC_LEN; @@ -3787,9 +4141,7 @@ restart: * hit default in the switch below. */ memset(buffer, 0xff, ISCSI_HDR_LEN); - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->conn_digest_errors++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->conn_digest_errors); } else { pr_debug("Got HeaderDigest CRC32C" " 0x%08x\n", checksum); @@ -3806,67 +4158,14 @@ restart: (!(opcode & ISCSI_OP_LOGOUT)))) { pr_err("Received illegal iSCSI Opcode: 0x%02x" " while in Discovery Session, rejecting.\n", opcode); - iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, - buffer, conn); + iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buffer); goto transport_err; } - switch (opcode) { - case ISCSI_OP_SCSI_CMD: - if (iscsit_handle_scsi_cmd(conn, buffer) < 0) - goto transport_err; - break; - case ISCSI_OP_SCSI_DATA_OUT: - if (iscsit_handle_data_out(conn, buffer) < 0) - goto transport_err; - break; - case ISCSI_OP_NOOP_OUT: - if (iscsit_handle_nop_out(conn, buffer) < 0) - goto transport_err; - break; - case ISCSI_OP_SCSI_TMFUNC: - if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) - goto transport_err; - break; - case ISCSI_OP_TEXT: - if (iscsit_handle_text_cmd(conn, buffer) < 0) - goto transport_err; - break; - case ISCSI_OP_LOGOUT: - ret = iscsit_handle_logout_cmd(conn, buffer); - if (ret > 0) { - wait_for_completion_timeout(&conn->conn_logout_comp, - SECONDS_FOR_LOGOUT_COMP * HZ); - goto transport_err; - } else if (ret < 0) - goto transport_err; - break; - case ISCSI_OP_SNACK: - if (iscsit_handle_snack(conn, buffer) < 0) - goto transport_err; - break; - default: - pr_err("Got unknown iSCSI OpCode: 0x%02x\n", - opcode); - if (!conn->sess->sess_ops->ErrorRecoveryLevel) { - pr_err("Cannot recover from unknown" - " opcode while ERL=0, closing iSCSI connection" - ".\n"); - goto transport_err; - } - if (!conn->conn_ops->OFMarker) { - pr_err("Unable to recover from unknown" - " opcode while OFMarker=No, closing iSCSI" - " connection.\n"); - goto transport_err; - } - if (iscsit_recover_from_unknown_opcode(conn) < 0) { - pr_err("Unable to recover from unknown" - " opcode, closing iSCSI connection.\n"); - goto transport_err; - } - break; - } + ret = iscsi_target_rx_opcode(conn, buffer); + if (ret < 0) + goto transport_err; } transport_err: @@ -3890,12 +4189,12 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); } @@ -3935,7 +4234,9 @@ int iscsit_close_connection( iscsit_stop_timers_for_cmds(conn); iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - iscsit_free_queue_reqs_for_conn(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); /* * During Connection recovery drop unacknowledged out of order @@ -3953,6 +4254,7 @@ int iscsit_close_connection( iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); } + iscsit_free_queue_reqs_for_conn(conn); /* * Handle decrementing session or connection usage count if @@ -4032,6 +4334,12 @@ int iscsit_close_connection( if (conn->sock) sock_release(conn->sock); + + if (conn->conn_transport->iscsit_free_conn) + conn->conn_transport->iscsit_free_conn(conn); + + iscsit_put_transport(conn->conn_transport); + conn->thread_set = NULL; pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); @@ -4123,7 +4431,7 @@ int iscsit_close_connection( int iscsit_close_session(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (atomic_read(&sess->nconn)) { @@ -4263,7 +4571,7 @@ static void iscsit_logout_post_handler_diffcid( /* * Return of 0 causes the TX thread to restart. */ -static int iscsit_logout_post_handler( +int iscsit_logout_post_handler( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { @@ -4321,6 +4629,7 @@ static int iscsit_logout_post_handler( } return ret; } +EXPORT_SYMBOL(iscsit_logout_post_handler); void iscsit_fail_session(struct iscsi_session *sess) { diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index f1e4f3155ba..e936d56fb52 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -7,18 +7,23 @@ extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *); extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); extern void iscsit_del_tiqn(struct iscsi_tiqn *); extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *); -extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *); +extern void iscsit_login_kref_put(struct kref *); +extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *, + struct iscsi_tpg_np *); +extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *, + struct iscsi_np *, int); extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *, char *, int); extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, - struct iscsi_portal_group *); + struct iscsi_portal_group *, bool); extern int iscsit_del_np(struct iscsi_np *); -extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *); +extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *); +extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); -extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery); +extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery); extern void iscsit_thread_get_cpumask(struct iscsi_conn *); extern int iscsi_target_tx_thread(void *); extern int iscsi_target_rx_thread(void *); @@ -34,7 +39,6 @@ extern struct target_fabric_configfs *lio_target_fabric_configfs; extern struct kmem_cache *lio_dr_cache; extern struct kmem_cache *lio_ooo_cache; -extern struct kmem_cache *lio_cmd_cache; extern struct kmem_cache *lio_qr_cache; extern struct kmem_cache *lio_r2t_cache; diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index db0cf7c8add..ab4915c0d93 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file houses the main functions for the iSCSI CHAP support * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -49,32 +47,6 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) } } -static void chap_set_random(char *data, int length) -{ - long r; - unsigned n; - - while (length > 0) { - get_random_bytes(&r, sizeof(long)); - r = r ^ (r >> 8); - r = r ^ (r >> 4); - n = r & 0x7; - - get_random_bytes(&r, sizeof(long)); - r = r ^ (r >> 8); - r = r ^ (r >> 5); - n = (n << 3) | (r & 0x7); - - get_random_bytes(&r, sizeof(long)); - r = r ^ (r >> 8); - r = r ^ (r >> 5); - n = (n << 2) | (r & 0x3); - - *data++ = n; - length--; - } -} - static void chap_gen_challenge( struct iscsi_conn *conn, int caller, @@ -86,7 +58,7 @@ static void chap_gen_challenge( memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); - chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH); + get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH); chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, CHAP_CHALLENGE_LENGTH); /* @@ -99,6 +71,40 @@ static void chap_gen_challenge( challenge_asciihex); } +static int chap_check_algorithm(const char *a_str) +{ + char *tmp, *orig, *token; + + tmp = kstrdup(a_str, GFP_KERNEL); + if (!tmp) { + pr_err("Memory allocation failed for CHAP_A temporary buffer\n"); + return CHAP_DIGEST_UNKNOWN; + } + orig = tmp; + + token = strsep(&tmp, "="); + if (!token) + goto out; + + if (strcmp(token, "CHAP_A")) { + pr_err("Unable to locate CHAP_A key\n"); + goto out; + } + while (token) { + token = strsep(&tmp, ","); + if (!token) + goto out; + + if (!strncmp(token, "5", 1)) { + pr_debug("Selected MD5 Algorithm\n"); + kfree(orig); + return CHAP_DIGEST_MD5; + } + } +out: + kfree(orig); + return CHAP_DIGEST_UNKNOWN; +} static struct iscsi_chap *chap_server_open( struct iscsi_conn *conn, @@ -107,6 +113,7 @@ static struct iscsi_chap *chap_server_open( char *aic_str, unsigned int *aic_len) { + int ret; struct iscsi_chap *chap; if (!(auth->naf_flags & NAF_USERID_SET) || @@ -121,25 +128,28 @@ static struct iscsi_chap *chap_server_open( return NULL; chap = conn->auth_protocol; - /* - * We only support MD5 MDA presently. - */ - if (strncmp(a_str, "CHAP_A=5", 8)) { - pr_err("CHAP_A is not MD5.\n"); + ret = chap_check_algorithm(a_str); + switch (ret) { + case CHAP_DIGEST_MD5: + pr_debug("[server] Got CHAP_A=5\n"); + /* + * Send back CHAP_A set to MD5. + */ + *aic_len = sprintf(aic_str, "CHAP_A=5"); + *aic_len += 1; + chap->digest_type = CHAP_DIGEST_MD5; + pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + break; + case CHAP_DIGEST_UNKNOWN: + default: + pr_err("Unsupported CHAP_A value\n"); return NULL; } - pr_debug("[server] Got CHAP_A=5\n"); - /* - * Send back CHAP_A set to MD5. - */ - *aic_len = sprintf(aic_str, "CHAP_A=5"); - *aic_len += 1; - chap->digest_type = CHAP_DIGEST_MD5; - pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + /* * Set Identifier. */ - chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; + chap->id = conn->tpg->tpg_chap_id++; *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); *aic_len += 1; pr_debug("[server] Sending CHAP_I=%d\n", chap->id); @@ -164,8 +174,8 @@ static int chap_server_compute_md5( char *nr_out_ptr, unsigned int *nr_out_len) { - char *endptr; unsigned long id; + unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; @@ -173,6 +183,7 @@ static int chap_server_compute_md5( unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; + size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; struct crypto_hash *tfm; struct hash_desc desc; @@ -211,7 +222,9 @@ static int chap_server_compute_md5( goto out; } - if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { + /* Include the terminating NULL in the compare */ + compare_len = strlen(auth->userid) + 1; + if (strncmp(chap_n, auth->userid, compare_len) != 0) { pr_err("CHAP_N values do not match!\n"); goto out; } @@ -306,9 +319,14 @@ static int chap_server_compute_md5( } if (type == HEX) - id = simple_strtoul(&identifier[2], &endptr, 0); + ret = kstrtoul(&identifier[2], 0, &id); else - id = simple_strtoul(identifier, &endptr, 0); + ret = kstrtoul(identifier, 0, &id); + + if (ret < 0) { + pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); + goto out; + } if (id > 255) { pr_err("chap identifier: %lu greater than 255\n", id); goto out; @@ -337,6 +355,20 @@ static int chap_server_compute_md5( pr_err("Unable to convert incoming challenge\n"); goto out; } + if (challenge_len > 1024) { + pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); + goto out; + } + /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } /* * Generate CHAP_N and CHAP_R for mutual authentication. */ @@ -355,7 +387,9 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, &id, 1); + /* To handle both endiannesses */ + id_as_uchar = id; + sg_init_one(&sg, &id_as_uchar, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h index 2f463c09626..d22f7b96a06 100644 --- a/drivers/target/iscsi/iscsi_target_auth.h +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -1,6 +1,7 @@ #ifndef _ISCSI_CHAP_H_ #define _ISCSI_CHAP_H_ +#define CHAP_DIGEST_UNKNOWN 0 #define CHAP_DIGEST_MD5 5 #define CHAP_DIGEST_SHA 6 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 78d75c8567d..ae03f3e5de1 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -2,9 +2,7 @@ * This file contains the configfs implementation for iSCSI Target mode * from the LIO-Target Project. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -20,6 +18,7 @@ ****************************************************************************/ #include <linux/configfs.h> +#include <linux/ctype.h> #include <linux/export.h> #include <linux/inet.h> #include <target/target_core_base.h> @@ -27,6 +26,7 @@ #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> +#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -77,11 +77,12 @@ static ssize_t lio_target_np_store_sctp( struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np); struct iscsi_tpg_np *tpg_np_sctp = NULL; - char *endptr; u32 op; int ret; - op = simple_strtoul(page, &endptr, 0); + ret = kstrtou32(page, 0, &op); + if (ret) + return ret; if ((op != 1) && (op != 0)) { pr_err("Illegal value for tpg_enable: %u\n", op); return -EINVAL; @@ -124,8 +125,88 @@ out: TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR); +static ssize_t lio_target_np_show_iser( + struct se_tpg_np *se_tpg_np, + char *page) +{ + struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, + struct iscsi_tpg_np, se_tpg_np); + struct iscsi_tpg_np *tpg_np_iser; + ssize_t rb; + + tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); + if (tpg_np_iser) + rb = sprintf(page, "1\n"); + else + rb = sprintf(page, "0\n"); + + return rb; +} + +static ssize_t lio_target_np_store_iser( + struct se_tpg_np *se_tpg_np, + const char *page, + size_t count) +{ + struct iscsi_np *np; + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, + struct iscsi_tpg_np, se_tpg_np); + struct iscsi_tpg_np *tpg_np_iser = NULL; + char *endptr; + u32 op; + int rc = 0; + + op = simple_strtoul(page, &endptr, 0); + if ((op != 1) && (op != 0)) { + pr_err("Illegal value for tpg_enable: %u\n", op); + return -EINVAL; + } + np = tpg_np->tpg_np; + if (!np) { + pr_err("Unable to locate struct iscsi_np from" + " struct iscsi_tpg_np\n"); + return -EINVAL; + } + + tpg = tpg_np->tpg; + if (iscsit_get_tpg(tpg) < 0) + return -EINVAL; + + if (op) { + rc = request_module("ib_isert"); + if (rc != 0) { + pr_warn("Unable to request_module for ib_isert\n"); + rc = 0; + } + + tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, + np->np_ip, tpg_np, ISCSI_INFINIBAND); + if (IS_ERR(tpg_np_iser)) { + rc = PTR_ERR(tpg_np_iser); + goto out; + } + } else { + tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); + if (tpg_np_iser) { + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); + if (rc < 0) + goto out; + } + } + + iscsit_put_tpg(tpg); + return count; +out: + iscsit_put_tpg(tpg); + return rc; +} + +TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); + static struct configfs_attribute *lio_target_portal_attrs[] = { &lio_target_np_sctp.attr, + &lio_target_np_iser.attr, NULL, }; @@ -182,9 +263,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg( *port_str = '\0'; /* Terminate string for IP */ port_str++; /* Skip over ":" */ - ret = strict_strtoul(port_str, 0, &port); + ret = kstrtoul(port_str, 0, &port); if (ret < 0) { - pr_err("strict_strtoul() failed for port_str: %d\n", ret); + pr_err("kstrtoul() failed for port_str: %d\n", ret); return ERR_PTR(ret); } sock_in6 = (struct sockaddr_in6 *)&sockaddr; @@ -207,9 +288,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg( *port_str = '\0'; /* Terminate string for IP */ port_str++; /* Skip over ":" */ - ret = strict_strtoul(port_str, 0, &port); + ret = kstrtoul(port_str, 0, &port); if (ret < 0) { - pr_err("strict_strtoul() failed for port_str: %d\n", ret); + pr_err("kstrtoul() failed for port_str: %d\n", ret); return ERR_PTR(ret); } sock_in = (struct sockaddr_in *)&sockaddr; @@ -291,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ se_node_acl); \ \ - return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ + return sprintf(page, "%u\n", nacl->node_attrib.name); \ } \ \ static ssize_t iscsi_nacl_attrib_store_##name( \ @@ -301,11 +382,12 @@ static ssize_t iscsi_nacl_attrib_store_##name( \ { \ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ se_node_acl); \ - char *endptr; \ u32 val; \ int ret; \ \ - val = simple_strtoul(page, &endptr, 0); \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ ret = iscsit_na_##name(nacl, val); \ if (ret < 0) \ return ret; \ @@ -392,8 +474,9 @@ static ssize_t __iscsi_##prefix##_store_##name( \ \ if (!capable(CAP_SYS_ADMIN)) \ return -EPERM; \ - \ - snprintf(auth->name, PAGE_SIZE, "%s", page); \ + if (count >= sizeof(auth->name)) \ + return -EINVAL; \ + snprintf(auth->name, sizeof(auth->name), "%s", page); \ if (!strncmp("NULL", auth->name, 4)) \ auth->naf_flags &= ~flags; \ else \ @@ -708,11 +791,12 @@ static ssize_t lio_target_nacl_store_cmdsn_depth( struct iscsi_portal_group *tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); struct config_item *acl_ci, *tpg_ci, *wwn_ci; - char *endptr; u32 cmdsn_depth = 0; int ret; - cmdsn_depth = simple_strtoul(page, &endptr, 0); + ret = kstrtou32(page, 0, &cmdsn_depth); + if (ret) + return ret; if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) { pr_err("Passed cmdsn_depth: %u exceeds" " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth, @@ -814,7 +898,7 @@ static struct se_node_acl *lio_target_make_nodeacl( if (!se_nacl_new) return ERR_PTR(-ENOMEM); - cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth; /* * se_nacl_new may be released by core_tpg_add_initiator_node_acl() * when converting a NdoeACL from demo mode -> explict @@ -837,9 +921,9 @@ static struct se_node_acl *lio_target_make_nodeacl( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; + stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group; stats_cg->default_groups[1] = NULL; - config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, + config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, "iscsi_sess_stats", &iscsi_stat_sess_cit); return se_nacl; @@ -884,7 +968,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \ if (iscsit_get_tpg(tpg) < 0) \ return -EINVAL; \ \ - rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ + rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \ iscsit_put_tpg(tpg); \ return rb; \ } \ @@ -896,14 +980,15 @@ static ssize_t iscsi_tpg_attrib_store_##name( \ { \ struct iscsi_portal_group *tpg = container_of(se_tpg, \ struct iscsi_portal_group, tpg_se_tpg); \ - char *endptr; \ u32 val; \ int ret; \ \ if (iscsit_get_tpg(tpg) < 0) \ return -EINVAL; \ \ - val = simple_strtoul(page, &endptr, 0); \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + goto out; \ ret = iscsit_ta_##name(tpg, val); \ if (ret < 0) \ goto out; \ @@ -957,6 +1042,21 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); */ DEF_TPG_ATTRIB(prod_mode_write_protect); TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_demo_mode_discovery, + */ +DEF_TPG_ATTRIB(demo_mode_discovery); +TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_default_erl + */ +DEF_TPG_ATTRIB(default_erl); +TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_t10_pi + */ +DEF_TPG_ATTRIB(t10_pi); +TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_authentication.attr, @@ -967,11 +1067,139 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_cache_dynamic_acls.attr, &iscsi_tpg_attrib_demo_mode_write_protect.attr, &iscsi_tpg_attrib_prod_mode_write_protect.attr, + &iscsi_tpg_attrib_demo_mode_discovery.attr, + &iscsi_tpg_attrib_default_erl.attr, + &iscsi_tpg_attrib_t10_pi.attr, NULL, }; /* End items for lio_target_tpg_attrib_cit */ +/* Start items for lio_target_tpg_auth_cit */ + +#define __DEF_TPG_AUTH_STR(prefix, name, flags) \ +static ssize_t __iscsi_##prefix##_show_##name( \ + struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + struct iscsi_portal_group *tpg = container_of(se_tpg, \ + struct iscsi_portal_group, tpg_se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \ +} \ + \ +static ssize_t __iscsi_##prefix##_store_##name( \ + struct se_portal_group *se_tpg, \ + const char *page, \ + size_t count) \ +{ \ + struct iscsi_portal_group *tpg = container_of(se_tpg, \ + struct iscsi_portal_group, tpg_se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + snprintf(auth->name, sizeof(auth->name), "%s", page); \ + if (!(strncmp("NULL", auth->name, 4))) \ + auth->naf_flags &= ~flags; \ + else \ + auth->naf_flags |= flags; \ + \ + if ((auth->naf_flags & NAF_USERID_IN_SET) && \ + (auth->naf_flags & NAF_PASSWORD_IN_SET)) \ + auth->authenticate_target = 1; \ + else \ + auth->authenticate_target = 0; \ + \ + return count; \ +} + +#define __DEF_TPG_AUTH_INT(prefix, name) \ +static ssize_t __iscsi_##prefix##_show_##name( \ + struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + struct iscsi_portal_group *tpg = container_of(se_tpg, \ + struct iscsi_portal_group, tpg_se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \ +} + +#define DEF_TPG_AUTH_STR(name, flags) \ + __DEF_TPG_AUTH_STR(tpg_auth, name, flags) \ +static ssize_t iscsi_tpg_auth_show_##name( \ + struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + return __iscsi_tpg_auth_show_##name(se_tpg, page); \ +} \ + \ +static ssize_t iscsi_tpg_auth_store_##name( \ + struct se_portal_group *se_tpg, \ + const char *page, \ + size_t count) \ +{ \ + return __iscsi_tpg_auth_store_##name(se_tpg, page, count); \ +} + +#define DEF_TPG_AUTH_INT(name) \ + __DEF_TPG_AUTH_INT(tpg_auth, name) \ +static ssize_t iscsi_tpg_auth_show_##name( \ + struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + return __iscsi_tpg_auth_show_##name(se_tpg, page); \ +} + +#define TPG_AUTH_ATTR(_name, _mode) TF_TPG_AUTH_ATTR(iscsi, _name, _mode); +#define TPG_AUTH_ATTR_RO(_name) TF_TPG_AUTH_ATTR_RO(iscsi, _name); + +/* + * * One-way authentication userid + * */ +DEF_TPG_AUTH_STR(userid, NAF_USERID_SET); +TPG_AUTH_ATTR(userid, S_IRUGO | S_IWUSR); +/* + * * One-way authentication password + * */ +DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET); +TPG_AUTH_ATTR(password, S_IRUGO | S_IWUSR); +/* + * * Enforce mutual authentication + * */ +DEF_TPG_AUTH_INT(authenticate_target); +TPG_AUTH_ATTR_RO(authenticate_target); +/* + * * Mutual authentication userid + * */ +DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); +TPG_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR); +/* + * * Mutual authentication password + * */ +DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); +TPG_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *lio_target_tpg_auth_attrs[] = { + &iscsi_tpg_auth_userid.attr, + &iscsi_tpg_auth_password.attr, + &iscsi_tpg_auth_authenticate_target.attr, + &iscsi_tpg_auth_userid_mutual.attr, + &iscsi_tpg_auth_password_mutual.attr, + NULL, +}; + +/* End items for lio_target_tpg_auth_cit */ + /* Start items for lio_target_tpg_param_cit */ #define DEF_TPG_PARAM(name) \ @@ -1006,13 +1234,14 @@ static ssize_t iscsi_tpg_param_store_##name( \ struct iscsi_portal_group *tpg = container_of(se_tpg, \ struct iscsi_portal_group, tpg_se_tpg); \ char *buf; \ - int ret; \ + int ret, len; \ \ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \ if (!buf) \ return -ENOMEM; \ - snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \ - buf[strlen(buf)-1] = '\0'; /* Kill newline */ \ + len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \ + if (isspace(buf[len-1])) \ + buf[len-1] = '\0'; /* Kill newline */ \ \ if (iscsit_get_tpg(tpg) < 0) { \ kfree(buf); \ @@ -1149,11 +1378,12 @@ static ssize_t lio_target_tpg_store_enable( { struct iscsi_portal_group *tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); - char *endptr; u32 op; - int ret = 0; + int ret; - op = simple_strtoul(page, &endptr, 0); + ret = kstrtou32(page, 0, &op); + if (ret) + return ret; if ((op != 1) && (op != 0)) { pr_err("Illegal value for tpg_enable: %u\n", op); return -EINVAL; @@ -1201,15 +1431,15 @@ static struct se_portal_group *lio_target_tiqn_addtpg( { struct iscsi_portal_group *tpg; struct iscsi_tiqn *tiqn; - char *tpgt_str, *end_ptr; - int ret = 0; - unsigned short int tpgt; + char *tpgt_str; + int ret; + u16 tpgt; tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); /* * Only tpgt_# directory groups can be created below * target/iscsi/iqn.superturodiskarry/ - */ + */ tpgt_str = strstr(name, "tpgt_"); if (!tpgt_str) { pr_err("Unable to locate \"tpgt_#\" directory" @@ -1217,7 +1447,9 @@ static struct se_portal_group *lio_target_tiqn_addtpg( return NULL; } tpgt_str += 5; /* Skip ahead of "tpgt_" */ - tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); + ret = kstrtou16(tpgt_str, 0, &tpgt); + if (ret) + return NULL; tpg = iscsit_alloc_portal_group(tiqn, tpgt); if (!tpg) @@ -1266,7 +1498,7 @@ static ssize_t lio_target_wwn_show_attr_lio_version( struct target_fabric_configfs *tf, char *page) { - return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n"); + return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n"); } TF_WWN_ATTR_RO(lio_target, lio_version); @@ -1301,21 +1533,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; - stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; - stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; - stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; - stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; + stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group; + stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group; + stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group; + stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group; + stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group; stats_cg->default_groups[5] = NULL; - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, "iscsi_instance", &iscsi_stat_instance_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group, "iscsi_sess_err", &iscsi_stat_sess_err_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group, "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group, "iscsi_login_stats", &iscsi_stat_login_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, "iscsi_logout_stats", &iscsi_stat_logout_cit); pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); @@ -1425,10 +1657,12 @@ static ssize_t iscsi_disc_store_enforce_discovery_auth( { struct iscsi_param *param; struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg; - char *endptr; u32 op; + int err; - op = simple_strtoul(page, &endptr, 0); + err = kstrtou32(page, 0, &op); + if (err) + return -EINVAL; if ((op != 1) && (op != 0)) { pr_err("Illegal value for enforce_discovery_auth:" " %u\n", op); @@ -1536,16 +1770,18 @@ static int lio_queue_data_in(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->i_state = ISTATE_SEND_DATAIN; - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); + return 0; } static int lio_write_pending(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + struct iscsi_conn *conn = cmd->conn; if (!cmd->immediate_data && !cmd->unsolicited_data) - return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); + return conn->conn_transport->iscsit_get_dataout(conn, cmd, false); return 0; } @@ -1567,17 +1803,29 @@ static int lio_queue_status(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->i_state = ISTATE_SEND_STATUS; - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + + if (cmd->se_cmd.scsi_status || cmd->sense_reason) { + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + return 0; + } + cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); + return 0; } -static int lio_queue_tm_rsp(struct se_cmd *se_cmd) +static void lio_queue_tm_rsp(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->i_state = ISTATE_SEND_TASKMGTRSP; iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); - return 0; +} + +static void lio_aborted_task(struct se_cmd *se_cmd) +{ + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + + cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); } static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) @@ -1598,21 +1846,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + return tpg->tpg_attrib.default_cmdsn_depth; } static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int lio_tpg_check_demo_mode_write_protect( @@ -1620,7 +1868,7 @@ static int lio_tpg_check_demo_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int lio_tpg_check_prod_mode_write_protect( @@ -1628,7 +1876,7 @@ static int lio_tpg_check_prod_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; } static void lio_tpg_release_fabric_acl( @@ -1691,15 +1939,24 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl) { struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, se_node_acl); + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct iscsi_portal_group *tpg = container_of(se_tpg, + struct iscsi_portal_group, tpg_se_tpg); - ISCSI_NODE_ATTRIB(acl)->nacl = acl; - iscsit_set_default_node_attribues(acl); + acl->node_attrib.nacl = acl; + iscsit_set_default_node_attribues(acl, tpg); +} + +static int lio_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd->se_sess, se_cmd); } static void lio_release_cmd(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd); iscsit_release_cmd(cmd); } @@ -1740,6 +1997,7 @@ int iscsi_target_register_configfs(void) fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; + fabric->tf_ops.check_stop_free = &lio_check_stop_free, fabric->tf_ops.release_cmd = &lio_release_cmd; fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; fabric->tf_ops.close_session = &lio_tpg_close_session; @@ -1754,6 +2012,7 @@ int iscsi_target_register_configfs(void) fabric->tf_ops.queue_data_in = &lio_queue_data_in; fabric->tf_ops.queue_status = &lio_queue_status; fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; + fabric->tf_ops.aborted_task = &lio_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c */ @@ -1771,16 +2030,17 @@ int iscsi_target_register_configfs(void) * Setup default attribute lists for various fabric->tf_cit_tmpl * sturct config_item_type's */ - TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; + fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; ret = target_fabric_configfs_register(fabric); if (ret < 0) { diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 7a333d28d9a..302eb3b7871 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -9,7 +9,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#define ISCSIT_VERSION "v4.1.0-rc2" +#define ISCSIT_VERSION "v4.1.0" #define ISCSI_MAX_DATASN_MISSING_COUNT 16 #define ISCSI_TX_THREAD_TCP_TIMEOUT 2 #define ISCSI_RX_THREAD_TCP_TIMEOUT 2 @@ -17,6 +17,9 @@ #define SECONDS_FOR_ASYNC_TEXT 10 #define SECONDS_FOR_LOGOUT_COMP 15 #define WHITE_SPACE " \t\v\f\n\r" +#define ISCSIT_MIN_TAGS 16 +#define ISCSIT_EXTRA_TAGS 8 +#define ISCSIT_TCP_BACKLOG 256 /* struct iscsi_node_attrib sanity values */ #define NA_DATAOUT_TIMEOUT 3 @@ -34,9 +37,6 @@ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0 #define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 #define NA_RANDOM_R2T_OFFSETS 0 -#define NA_DEFAULT_ERL 0 -#define NA_DEFAULT_ERL_MAX 2 -#define NA_DEFAULT_ERL_MIN 0 /* struct iscsi_tpg_attrib sanity values */ #define TA_AUTHENTICATION 1 @@ -47,7 +47,7 @@ #define TA_NETIF_TIMEOUT_MAX 15 #define TA_NETIF_TIMEOUT_MIN 2 #define TA_GENERATE_NODE_ACLS 0 -#define TA_DEFAULT_CMDSN_DEPTH 16 +#define TA_DEFAULT_CMDSN_DEPTH 64 #define TA_DEFAULT_CMDSN_DEPTH_MAX 512 #define TA_DEFAULT_CMDSN_DEPTH_MIN 1 #define TA_CACHE_DYNAMIC_ACLS 0 @@ -55,12 +55,15 @@ #define TA_DEMO_MODE_WRITE_PROTECT 1 /* Disabled by default in production mode w/ explict ACLs */ #define TA_PROD_MODE_WRITE_PROTECT 0 +#define TA_DEMO_MODE_DISCOVERY 1 +#define TA_DEFAULT_ERL 0 #define TA_CACHE_CORE_NPS 0 - +/* T10 protection information disabled by default */ +#define TA_DEFAULT_T10_PI 0 #define ISCSI_IOV_DATA_BUFFER 5 -enum tpg_np_network_transport_table { +enum iscsit_transport_type { ISCSI_TCP = 0, ISCSI_SCTP_TCP = 1, ISCSI_SCTP_UDP = 2, @@ -132,7 +135,8 @@ enum cmd_flags_table { ICF_CONTIG_MEMORY = 0x00000020, ICF_ATTACHED_TO_RQUEUE = 0x00000040, ICF_OOO_CMDSN = 0x00000080, - ICF_REJECT_FAIL_CONN = 0x00000100, + IFC_SENDTARGETS_ALL = 0x00000100, + IFC_SENDTARGETS_SINGLE = 0x00000200, }; /* struct iscsi_cmd->i_state */ @@ -188,6 +192,7 @@ enum recover_cmdsn_ret_table { CMDSN_NORMAL_OPERATION = 0, CMDSN_LOWER_THAN_EXP = 1, CMDSN_HIGHER_THAN_EXP = 2, + CMDSN_MAXCMDSN_OVERRUN = 3, }; /* Used for iscsi_handle_immediate_data() return values */ @@ -244,6 +249,11 @@ struct iscsi_conn_ops { u8 IFMarker; /* [0,1] == [No,Yes] */ u32 OFMarkInt; /* [1..65535] */ u32 IFMarkInt; /* [1..65535] */ + /* + * iSER specific connection parameters + */ + u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */ + u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */ }; struct iscsi_sess_ops { @@ -265,6 +275,10 @@ struct iscsi_sess_ops { u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */ u8 ErrorRecoveryLevel; /* [0..2] */ u8 SessionType; /* [0,1] == [Normal,Discovery]*/ + /* + * iSER specific session parameters + */ + u8 RDMAExtensions; /* [0,1] == [No,Yes] */ }; struct iscsi_queue_req { @@ -284,6 +298,7 @@ struct iscsi_data_count { }; struct iscsi_param_list { + bool iser; struct list_head param_list; struct list_head extra_response_list; }; @@ -356,6 +371,8 @@ struct iscsi_cmd { u8 maxcmdsn_inc; /* Immediate Unsolicited Dataout */ u8 unsolicited_data; + /* Reject reason code */ + u8 reject_reason; /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */ u16 logout_cid; /* Command flags */ @@ -417,6 +434,8 @@ struct iscsi_cmd { u32 tx_size; /* Buffer used for various purposes */ void *buf_ptr; + /* Used by SendTargets=[iqn.,eui.] discovery */ + void *text_in_ptr; /* See include/linux/dma-mapping.h */ enum dma_data_direction data_direction; /* iSCSI PDU Header + CRC */ @@ -436,7 +455,6 @@ struct iscsi_cmd { struct list_head datain_list; /* R2T List */ struct list_head cmd_r2t_list; - struct completion reject_comp; /* Timer for DataOUT */ struct timer_list dataout_timer; /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */ @@ -503,6 +521,7 @@ struct iscsi_conn { u16 login_port; u16 local_port; int net_size; + int login_family; u32 auth_id; u32 conn_flags; /* Used for iscsi_tx_login_rsp() */ @@ -516,8 +535,6 @@ struct iscsi_conn { u32 of_marker; /* Used for calculating OFMarker offset to next PDU */ u32 of_marker_offset; - /* Complete Bad PDU for sending reject */ - unsigned char bad_hdr[ISCSI_HDR_LEN]; #define IPV6_ADDRESS_SPACE 48 unsigned char login_ip[IPV6_ADDRESS_SPACE]; unsigned char local_ip[IPV6_ADDRESS_SPACE]; @@ -540,9 +557,19 @@ struct iscsi_conn { struct completion rx_half_close_comp; /* socket used by this connection */ struct socket *sock; + void (*orig_data_ready)(struct sock *); + void (*orig_state_change)(struct sock *); +#define LOGIN_FLAGS_READ_ACTIVE 1 +#define LOGIN_FLAGS_CLOSED 2 +#define LOGIN_FLAGS_READY 4 + unsigned long login_flags; + struct delayed_work login_work; + struct delayed_work login_cleanup_work; + struct iscsi_login *login; struct timer_list nopin_timer; struct timer_list nopin_response_timer; struct timer_list transport_timer; + struct task_struct *login_kworker; /* Spinlock used for add/deleting cmd's from conn_cmd_list */ spinlock_t cmd_lock; spinlock_t conn_usage_lock; @@ -562,11 +589,15 @@ struct iscsi_conn { struct list_head immed_queue_list; struct list_head response_queue_list; struct iscsi_conn_ops *conn_ops; + struct iscsi_login *conn_login; + struct iscsit_transport *conn_transport; struct iscsi_param_list *param_list; /* Used for per connection auth state machine */ void *auth_protocol; + void *context; struct iscsi_login_thread_s *login_thread; struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; /* Pointer to parent session */ struct iscsi_session *sess; /* Pointer to thread_set in use for this conn's threads */ @@ -620,14 +651,13 @@ struct iscsi_session { /* Used for session reference counting */ int session_usage_count; int session_waiting_on_uc; - u32 cmd_pdus; - u32 rsp_pdus; - u64 tx_data_octets; - u64 rx_data_octets; - u32 conn_digest_errors; - u32 conn_timeout_errors; + atomic_long_t cmd_pdus; + atomic_long_t rsp_pdus; + atomic_long_t tx_data_octets; + atomic_long_t rx_data_octets; + atomic_long_t conn_digest_errors; + atomic_long_t conn_timeout_errors; u64 creation_time; - spinlock_t session_stats_lock; /* Number of active connections */ atomic_t nconn; atomic_t session_continuation; @@ -663,6 +693,9 @@ struct iscsi_login { u8 first_request; u8 version_min; u8 version_max; + u8 login_complete; + u8 login_failed; + bool zero_tsih; char isid[6]; u32 cmd_sn; itt_t init_task_tag; @@ -670,10 +703,12 @@ struct iscsi_login { u32 rsp_length; u16 cid; u16 tsih; - char *req; - char *rsp; + char req[ISCSI_HDR_LEN]; + char rsp[ISCSI_HDR_LEN]; char *req_buf; char *rsp_buf; + struct iscsi_conn *conn; + struct iscsi_np *np; } ____cacheline_aligned; struct iscsi_node_attrib { @@ -720,11 +755,6 @@ struct iscsi_node_acl { struct se_node_acl se_node_acl; }; -#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps) - -#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib) -#define ISCSI_NODE_AUTH(t) (&(t)->node_auth) - struct iscsi_tpg_attrib { u32 authentication; u32 login_timeout; @@ -734,6 +764,9 @@ struct iscsi_tpg_attrib { u32 default_cmdsn_depth; u32 demo_mode_write_protect; u32 prod_mode_write_protect; + u32 demo_mode_discovery; + u32 default_erl; + u8 t10_pi; struct iscsi_portal_group *tpg; }; @@ -742,6 +775,7 @@ struct iscsi_np { int np_ip_proto; int np_sock_type; enum np_thread_state_table np_thread_state; + bool enabled; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; @@ -753,8 +787,10 @@ struct iscsi_np { struct __kernel_sockaddr_storage np_sockaddr; struct task_struct *np_thread; struct timer_list np_login_timer; - struct iscsi_portal_group *np_login_tpg; + void *np_context; + struct iscsit_transport *np_transport; struct list_head np_list; + struct iscsi_tpg_np *tpg_np; } ____cacheline_aligned; struct iscsi_tpg_np { @@ -766,6 +802,8 @@ struct iscsi_tpg_np { struct list_head tpg_np_parent_list; struct se_tpg_np se_tpg_np; spinlock_t tpg_np_parent_lock; + struct completion tpg_np_comp; + struct kref tpg_np_kref; }; struct iscsi_portal_group { @@ -787,8 +825,9 @@ struct iscsi_portal_group { spinlock_t tpg_state_lock; struct se_portal_group tpg_se_tpg; struct mutex tpg_access_lock; - struct mutex np_login_lock; + struct semaphore np_login_sem; struct iscsi_tpg_attrib tpg_attrib; + struct iscsi_node_auth tpg_demo_auth; /* Pointer to default list of iSCSI parameters for TPG */ struct iscsi_param_list *param_list; struct iscsi_tiqn *tpg_tiqn; @@ -796,12 +835,6 @@ struct iscsi_portal_group { struct list_head tpg_list; } ____cacheline_aligned; -#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg) -#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l]) -#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg) -#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib) -#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg) - struct iscsi_wwn_stat_grps { struct config_group iscsi_stat_group; struct config_group iscsi_instance_group; @@ -832,8 +865,6 @@ struct iscsi_tiqn { struct iscsi_logout_stats logout_stats; } ____cacheline_aligned; -#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps) - struct iscsit_global { /* In core shutdown */ u32 in_shutdown; diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index 848fee76894..e93d5a7a3f8 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the iSCSI Target DataIN value generation functions. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index bcc409853a6..7087c736daa 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -2,9 +2,7 @@ * This file contains the iSCSI Virtual Device and Disk Transport * agnostic related functions. * - \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -65,3 +63,4 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); mutex_unlock(&sess->cmdsn_mutex); } +EXPORT_SYMBOL(iscsit_increment_maxcmdsn); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 8e6298cc883..0d1e6ee3e99 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -2,9 +2,7 @@ * This file contains error recovery level zero functions used by * the iSCSI Target driver. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -746,13 +744,12 @@ int iscsit_check_post_dataout( if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to recover from DataOUT CRC" " failure while ERL=0, closing session.\n"); - iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, - 1, 0, buf, cmd); + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + buf); return DATAOUT_CANNOT_RECOVER; } - iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, - 0, 0, buf, cmd); + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf); return iscsit_dataout_post_crc_failed(cmd, buf); } } @@ -760,7 +757,7 @@ int iscsit_check_post_dataout( static void iscsit_handle_time2retain_timeout(unsigned long data) { struct iscsi_session *sess = (struct iscsi_session *) data; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; spin_lock_bh(&se_tpg->session_lock); @@ -788,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - sess->conn_timeout_errors++; + atomic_long_inc(&sess->conn_timeout_errors); spin_unlock(&tiqn->sess_err_stats.lock); } } @@ -804,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) * Only start Time2Retain timer when the associated TPG is still in * an ACTIVE (eg: not disabled or shutdown) state. */ - spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); - tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); - spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); + spin_lock(&sess->tpg->tpg_state_lock); + tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&sess->tpg->tpg_state_lock); if (!tpg_active) return; @@ -832,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) */ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) @@ -842,11 +839,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; - spin_unlock_bh(&se_tpg->session_lock); + spin_unlock(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); - spin_lock_bh(&se_tpg->session_lock); + spin_lock(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); @@ -909,6 +906,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) wait_for_completion(&conn->conn_wait_comp); complete(&conn->conn_post_wait_comp); } +EXPORT_SYMBOL(iscsit_cause_connection_reinstatement); void iscsit_fall_back_to_erl0(struct iscsi_session *sess) { diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 0b52a237130..cda4d80cfae 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains error recovery level one used by the iSCSI Target driver. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -22,6 +20,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" @@ -53,6 +52,9 @@ int iscsit_dump_data_payload( u32 length, padding, offset = 0, size; struct kvec iov; + if (conn->sess->sess_ops->RDMAExtensions) + return 0; + length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len; buf = kzalloc(length, GFP_ATOMIC); @@ -158,9 +160,8 @@ static int iscsit_handle_r2t_snack( " protocol error.\n", cmd->init_task_tag, begrun, (begrun + runlength), cmd->acked_data_sn); - return iscsit_add_reject_from_cmd( - ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); } if (runlength) { @@ -169,8 +170,8 @@ static int iscsit_handle_r2t_snack( " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds" " current R2TSN: 0x%08x, protocol error.\n", cmd->init_task_tag, begrun, runlength, cmd->r2t_sn); - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); } last_r2tsn = (begrun + runlength); } else @@ -429,8 +430,7 @@ static int iscsit_handle_recovery_datain( " protocol error.\n", cmd->init_task_tag, begrun, (begrun + runlength), cmd->acked_data_sn); - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, - 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); } /* @@ -441,14 +441,14 @@ static int iscsit_handle_recovery_datain( pr_err("Initiator requesting BegRun: 0x%08x, RunLength" ": 0x%08x greater than maximum DataSN: 0x%08x.\n", begrun, runlength, (cmd->data_sn - 1)); - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, - 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, + buf); } dr = iscsit_allocate_datain_req(); if (!dr) - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 0, buf, cmd); + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, + buf); dr->data_sn = dr->begrun = begrun; dr->runlength = runlength; @@ -507,7 +507,9 @@ int iscsit_handle_status_snack( u32 last_statsn; int found_cmd; - if (conn->exp_statsn > begrun) { + if (!begrun) { + begrun = conn->exp_statsn; + } else if (conn->exp_statsn > begrun) { pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" " 0x%08x but already got ExpStatSN: 0x%08x on CID:" " %hu.\n", begrun, runlength, conn->exp_statsn, @@ -819,7 +821,7 @@ static int iscsit_attach_ooo_cmdsn( /* * CmdSN is greater than the tail of the list. */ - if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) list_add_tail(&ooo_cmdsn->ooo_list, &sess->sess_ooo_cmdsn_list); else { @@ -829,11 +831,12 @@ static int iscsit_attach_ooo_cmdsn( */ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, ooo_list) { - if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) continue; + /* Insert before this entry */ list_add(&ooo_cmdsn->ooo_list, - &ooo_tmp->ooo_list); + ooo_tmp->ooo_list.prev); break; } } @@ -919,6 +922,7 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess) int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) { struct se_cmd *se_cmd = &cmd->se_cmd; + struct iscsi_conn *conn = cmd->conn; int lr = 0; spin_lock_bh(&cmd->istate_lock); @@ -981,7 +985,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) return 0; iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); } return 0; } @@ -999,10 +1003,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) if (transport_check_aborted_status(se_cmd, 1) != 0) return 0; - iscsit_set_dataout_sequence_values(cmd); - spin_lock_bh(&cmd->dataout_timeout_lock); - iscsit_start_dataout_timer(cmd, cmd->conn); - spin_unlock_bh(&cmd->dataout_timeout_lock); + iscsit_set_unsoliticed_dataout(cmd); } return transport_handle_cdb_direct(&cmd->se_cmd); @@ -1087,7 +1088,7 @@ int iscsit_handle_ooo_cmdsn( ooo_cmdsn = iscsit_allocate_ooo_cmdsn(); if (!ooo_cmdsn) - return CMDSN_ERROR_CANNOT_RECOVER; + return -ENOMEM; ooo_cmdsn->cmd = cmd; ooo_cmdsn->batch_count = (batch) ? @@ -1098,10 +1099,10 @@ int iscsit_handle_ooo_cmdsn( if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) { kmem_cache_free(lio_ooo_cache, ooo_cmdsn); - return CMDSN_ERROR_CANNOT_RECOVER; + return -ENOMEM; } - return CMDSN_HIGHER_THAN_EXP; + return 0; } static int iscsit_set_dataout_timeout_values( @@ -1290,3 +1291,4 @@ void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd) cmd->init_task_tag); spin_unlock_bh(&cmd->dataout_timeout_lock); } +EXPORT_SYMBOL(iscsit_stop_dataout_timer); diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index ba6091bf93f..4ca8fd2a70d 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -2,9 +2,7 @@ * This file contains error recovery level two functions used by * the iSCSI Target driver. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -140,10 +138,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -162,10 +160,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -218,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); return --cr->cmd_count; } @@ -248,7 +246,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -299,10 +297,10 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -337,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_conn_node); to release the command to the + * list_del_init(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after @@ -353,9 +351,9 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); continue; } @@ -373,9 +371,9 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - iscsit_free_cmd(cmd); + iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); continue; } @@ -395,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index fdb632f0ab8..5e71ac60941 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the login functions used by the iSCSI Target driver. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -39,8 +37,40 @@ #include "iscsi_target.h" #include "iscsi_target_parameters.h" -static int iscsi_login_init_conn(struct iscsi_conn *conn) +#include <target/iscsi/iscsi_transport.h> + +static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) { + struct iscsi_login *login; + + login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); + if (!login) { + pr_err("Unable to allocate memory for struct iscsi_login.\n"); + return NULL; + } + conn->login = login; + login->conn = conn; + login->first_request = 1; + + login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); + if (!login->req_buf) { + pr_err("Unable to allocate memory for response buffer.\n"); + goto out_login; + } + + login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); + if (!login->rsp_buf) { + pr_err("Unable to allocate memory for request buffer.\n"); + goto out_req_buf; + } + + conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); + if (!conn->conn_ops) { + pr_err("Unable to allocate memory for" + " struct iscsi_conn_ops.\n"); + goto out_rsp_buf; + } + init_waitqueue_head(&conn->queues_wq); INIT_LIST_HEAD(&conn->conn_list); INIT_LIST_HEAD(&conn->conn_cmd_list); @@ -62,10 +92,21 @@ static int iscsi_login_init_conn(struct iscsi_conn *conn) if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { pr_err("Unable to allocate conn->conn_cpumask\n"); - return -ENOMEM; + goto out_conn_ops; } + conn->conn_login = login; - return 0; + return login; + +out_conn_ops: + kfree(conn->conn_ops); +out_rsp_buf: + kfree(login->rsp_buf); +out_req_buf: + kfree(login->req_buf); +out_login: + kfree(login); + return NULL; } /* @@ -208,6 +249,28 @@ static void iscsi_login_set_conn_values( mutex_unlock(&auth_id_lock); } +static __printf(2, 3) int iscsi_change_param_sprintf( + struct iscsi_conn *conn, + const char *fmt, ...) +{ + va_list args; + unsigned char buf[64]; + + memset(buf, 0, sizeof buf); + + va_start(args, fmt); + vsnprintf(buf, sizeof buf, fmt, args); + va_end(args); + + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + return 0; +} + /* * This is the leading connection of a new session, * or session reinstatement. @@ -218,6 +281,7 @@ static int iscsi_login_zero_tsih_s1( { struct iscsi_session *sess = NULL; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + enum target_prot_op sup_pro_ops; int ret; sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); @@ -247,19 +311,16 @@ static int iscsi_login_zero_tsih_s1( spin_lock_init(&sess->session_usage_lock); spin_lock_init(&sess->ttt_lock); - if (!idr_pre_get(&sess_idr, GFP_KERNEL)) { - pr_err("idr_pre_get() for sess_idr failed\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - kfree(sess); - return -ENOMEM; - } + idr_preload(GFP_KERNEL); spin_lock_bh(&sess_idr_lock); - ret = idr_get_new(&sess_idr, NULL, &sess->session_index); + ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT); + if (ret >= 0) + sess->session_index = ret; spin_unlock_bh(&sess_idr_lock); + idr_preload_end(); if (ret < 0) { - pr_err("idr_get_new() for sess_idr failed\n"); + pr_err("idr_alloc() for sess_idr failed\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); kfree(sess); @@ -267,7 +328,6 @@ static int iscsi_login_zero_tsih_s1( } sess->creation_time = get_jiffies_64(); - spin_lock_init(&sess->session_stats_lock); /* * The FFP CmdSN window values will be allocated from the TPG's * Initiator Node's ACL once the login has been successfully completed. @@ -283,8 +343,9 @@ static int iscsi_login_zero_tsih_s1( kfree(sess); return -ENOMEM; } + sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(sup_pro_ops); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -300,7 +361,7 @@ static int iscsi_login_zero_tsih_s2( { struct iscsi_node_attrib *na; struct iscsi_session *sess = conn->sess; - unsigned char buf[32]; + bool iser = false; sess->tpg = conn->tpg; @@ -308,21 +369,24 @@ static int iscsi_login_zero_tsih_s2( * Assign a new TPG Session Handle. Note this is protected with * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). */ - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; if (!sess->tsih) - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; /* * Create the default params from user defined values.. */ if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 1) < 0) { + conn->tpg->param_list, 1) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; } - iscsi_set_keys_to_negotiate(0, conn->param_list); + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + iser = true; + + iscsi_set_keys_to_negotiate(conn->param_list, iser); if (sess->sess_ops->SessionType) return iscsi_set_keys_irrelevant_for_discovery( @@ -337,29 +401,80 @@ static int iscsi_login_zero_tsih_s2( * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } /* * Workaround for Initiators that have broken connection recovery logic. * * "We would really like to get rid of this." Linux-iSCSI.org team */ - memset(buf, 0, 32); - sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) return -1; - } if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) return -1; + /* + * Set RDMAExtensions=Yes by default for iSER enabled network portals + */ + if (iser) { + struct iscsi_param *param; + unsigned long mrdsl, off; + int rc; + + if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes")) + return -1; + + /* + * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for + * Immediate Data + Unsolicitied Data-OUT if necessary.. + */ + param = iscsi_find_param_from_key("MaxRecvDataSegmentLength", + conn->param_list); + if (!param) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + rc = kstrtoul(param->value, 0, &mrdsl); + if (rc < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + off = mrdsl % PAGE_SIZE; + if (!off) + goto check_prot; + + if (mrdsl < PAGE_SIZE) + mrdsl = PAGE_SIZE; + else + mrdsl -= off; + + pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" + " to PAGE_SIZE\n", mrdsl); + + if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl)) + return -1; + /* + * ISER currently requires that ImmediateData + Unsolicited + * Data be disabled when protection / signature MRs are enabled. + */ +check_prot: + if (sess->se_sess->sup_prot_ops & + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | + TARGET_PROT_DOUT_INSERT)) { + + if (iscsi_change_param_sprintf(conn, "ImmediateData=No")) + return -1; + + if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes")) + return -1; + + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" + " T10-PI enabled ISER session\n"); + } + } return 0; } @@ -439,6 +554,7 @@ static int iscsi_login_non_zero_tsih_s2( struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct se_session *se_sess, *se_sess_tmp; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + bool iser = false; spin_lock_bh(&se_tpg->session_lock); list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, @@ -482,13 +598,16 @@ static int iscsi_login_non_zero_tsih_s2( iscsi_login_set_conn_values(sess, conn, pdu->cid); if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 0) < 0) { + conn->tpg->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; } - iscsi_set_keys_to_negotiate(0, conn->param_list); + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + iser = true; + + iscsi_set_keys_to_negotiate(conn->param_list, iser); /* * Need to send TargetPortalGroupTag back in first login response * on any iSCSI connection where the Initiator provides TargetName. @@ -496,13 +615,8 @@ static int iscsi_login_non_zero_tsih_s2( * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } return iscsi_login_disable_FIM_keys(conn->param_list, conn); } @@ -577,12 +691,17 @@ int iscsi_login_post_auth_non_zero_tsih( static void iscsi_post_login_start_timers(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; + /* + * FIXME: Unsolicitied NopIN support for ISER + */ + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + return; if (!sess->sess_ops->SessionType) iscsit_start_nopin_timer(conn); } -static int iscsi_post_login_handler( +int iscsi_post_login_handler( struct iscsi_np *np, struct iscsi_conn *conn, u8 zero_tsih) @@ -590,7 +709,7 @@ static int iscsi_post_login_handler( int stop_timer = 0; struct iscsi_session *sess = conn->sess; struct se_session *se_sess = sess->se_sess; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct iscsi_thread_set *ts; @@ -635,6 +754,7 @@ static int iscsi_post_login_handler( spin_unlock_bh(&sess->conn_lock); iscsi_post_login_start_timers(conn); + iscsi_activate_thread_set(conn, ts); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -764,12 +884,12 @@ static void iscsi_stop_login_thread_timer(struct iscsi_np *np) spin_unlock_bh(&np->np_thread_lock); } -int iscsi_target_setup_login_socket( +int iscsit_setup_np( struct iscsi_np *np, struct __kernel_sockaddr_storage *sockaddr) { - struct socket *sock; - int backlog = 5, ret, opt = 0, len; + struct socket *sock = NULL; + int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; switch (np->np_network_transport) { case ISCSI_TCP: @@ -784,15 +904,15 @@ int iscsi_target_setup_login_socket( np->np_ip_proto = IPPROTO_SCTP; np->np_sock_type = SOCK_SEQPACKET; break; - case ISCSI_IWARP_TCP: - case ISCSI_IWARP_SCTP: - case ISCSI_INFINIBAND: default: pr_err("Unsupported network_transport: %d\n", np->np_network_transport); return -EINVAL; } + np->np_ip_proto = IPPROTO_TCP; + np->np_sock_type = SOCK_STREAM; + ret = sock_create(sockaddr->ss_family, np->np_sock_type, np->np_ip_proto, &sock); if (ret < 0) { @@ -856,7 +976,6 @@ int iscsi_target_setup_login_socket( } return 0; - fail: np->np_socket = NULL; if (sock) @@ -864,100 +983,330 @@ fail: return ret; } +int iscsi_target_setup_login_socket( + struct iscsi_np *np, + struct __kernel_sockaddr_storage *sockaddr) +{ + struct iscsit_transport *t; + int rc; + + t = iscsit_get_transport(np->np_network_transport); + if (!t) + return -EINVAL; + + rc = t->iscsit_setup_np(np, sockaddr); + if (rc < 0) { + iscsit_put_transport(t); + return rc; + } + + np->np_transport = t; + np->enabled = true; + return 0; +} + +int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) +{ + struct socket *new_sock, *sock = np->np_socket; + struct sockaddr_in sock_in; + struct sockaddr_in6 sock_in6; + int rc, err; + + rc = kernel_accept(sock, &new_sock, 0); + if (rc < 0) + return rc; + + conn->sock = new_sock; + conn->login_family = np->np_sockaddr.ss_family; + + if (np->np_sockaddr.ss_family == AF_INET6) { + memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in6, &err, 1); + if (!rc) { + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) + snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]", + &sock_in6.sin6_addr.in6_u); + else + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4", + &sock_in6.sin6_addr.s6_addr32[3]); + conn->login_port = ntohs(sock_in6.sin6_port); + } + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in6, &err, 0); + if (!rc) { + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) + snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]", + &sock_in6.sin6_addr.in6_u); + else + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4", + &sock_in6.sin6_addr.s6_addr32[3]); + conn->local_port = ntohs(sock_in6.sin6_port); + } + } else { + memset(&sock_in, 0, sizeof(struct sockaddr_in)); + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in, &err, 1); + if (!rc) { + sprintf(conn->login_ip, "%pI4", + &sock_in.sin_addr.s_addr); + conn->login_port = ntohs(sock_in.sin_port); + } + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in, &err, 0); + if (!rc) { + sprintf(conn->local_ip, "%pI4", + &sock_in.sin_addr.s_addr); + conn->local_port = ntohs(sock_in.sin_port); + } + } + + return 0; +} + +int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) +{ + struct iscsi_login_req *login_req; + u32 padding = 0, payload_length; + + if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) + return -1; + + login_req = (struct iscsi_login_req *)login->req; + payload_length = ntoh24(login_req->dlength); + padding = ((-payload_length) & 3); + + pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", + login_req->flags, login_req->itt, login_req->cmdsn, + login_req->exp_statsn, login_req->cid, payload_length); + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + if (login->first_request) { + login_req = (struct iscsi_login_req *)login->req; + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + if (iscsi_target_check_login_request(conn, login) < 0) + return -1; + + memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); + if (iscsi_login_rx_data(conn, login->req_buf, + payload_length + padding) < 0) + return -1; + + return 0; +} + +int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + u32 length) +{ + if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0) + return -1; + + return 0; +} + +static int +iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) +{ + int rc; + + if (!t->owner) { + conn->conn_transport = t; + return 0; + } + + rc = try_module_get(t->owner); + if (!rc) { + pr_err("try_module_get() failed for %s\n", t->name); + return -EINVAL; + } + + conn->conn_transport = t; + return 0; +} + +void iscsi_target_login_sess_out(struct iscsi_conn *conn, + struct iscsi_np *np, bool zero_tsih, bool new_sess) +{ + if (!new_sess) + goto old_sess_out; + + pr_err("iSCSI Login negotiation failed.\n"); + iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + if (!zero_tsih || !conn->sess) + goto old_sess_out; + if (conn->sess->se_sess) + transport_free_session(conn->sess->se_sess); + if (conn->sess->session_index != 0) { + spin_lock_bh(&sess_idr_lock); + idr_remove(&sess_idr, conn->sess->session_index); + spin_unlock_bh(&sess_idr_lock); + } + kfree(conn->sess->sess_ops); + kfree(conn->sess); + +old_sess_out: + iscsi_stop_login_thread_timer(np); + /* + * If login negotiation fails check if the Time2Retain timer + * needs to be restarted. + */ + if (!zero_tsih && conn->sess) { + spin_lock_bh(&conn->sess->conn_lock); + if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { + struct se_portal_group *se_tpg = + &conn->tpg->tpg_se_tpg; + + atomic_set(&conn->sess->session_continuation, 0); + spin_unlock_bh(&conn->sess->conn_lock); + spin_lock_bh(&se_tpg->session_lock); + iscsit_start_time2retain_handler(conn->sess); + spin_unlock_bh(&se_tpg->session_lock); + } else + spin_unlock_bh(&conn->sess->conn_lock); + iscsit_dec_session_usage_count(conn->sess); + } + + if (!IS_ERR(conn->conn_rx_hash.tfm)) + crypto_free_hash(conn->conn_rx_hash.tfm); + if (!IS_ERR(conn->conn_tx_hash.tfm)) + crypto_free_hash(conn->conn_tx_hash.tfm); + + if (conn->conn_cpumask) + free_cpumask_var(conn->conn_cpumask); + + kfree(conn->conn_ops); + + if (conn->param_list) { + iscsi_release_param_list(conn->param_list); + conn->param_list = NULL; + } + iscsi_target_nego_release(conn); + + if (conn->sock) { + sock_release(conn->sock); + conn->sock = NULL; + } + + if (conn->conn_transport->iscsit_free_conn) + conn->conn_transport->iscsit_free_conn(conn); + + iscsit_put_transport(conn->conn_transport); + kfree(conn); +} + static int __iscsi_target_login_thread(struct iscsi_np *np) { - u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; - int err, ret = 0, stop; + u8 *buffer, zero_tsih = 0; + int ret = 0, rc; struct iscsi_conn *conn = NULL; struct iscsi_login *login; struct iscsi_portal_group *tpg = NULL; - struct socket *new_sock, *sock; - struct kvec iov; struct iscsi_login_req *pdu; - struct sockaddr_in sock_in; - struct sockaddr_in6 sock_in6; + struct iscsi_tpg_np *tpg_np; + bool new_sess = false; flush_signals(current); - sock = np->np_socket; spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; complete(&np->np_restart_comp); + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; } else { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; } spin_unlock_bh(&np->np_thread_lock); - if (kernel_accept(sock, &new_sock, 0) < 0) { - spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { - spin_unlock_bh(&np->np_thread_lock); - complete(&np->np_restart_comp); - /* Get another socket */ - return 1; - } - spin_unlock_bh(&np->np_thread_lock); - goto out; - } - iscsi_start_login_thread_timer(np); - conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); if (!conn) { pr_err("Could not allocate memory for" " new connection\n"); - sock_release(new_sock); /* Get another socket */ return 1; } - pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); conn->conn_state = TARG_CONN_STATE_FREE; - conn->sock = new_sock; - pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); - conn->conn_state = TARG_CONN_STATE_XPT_UP; + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { + kfree(conn); + return 1; + } - /* - * Allocate conn->conn_ops early as a failure calling - * iscsit_tx_login_rsp() below will call tx_data(). - */ - conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); - if (!conn->conn_ops) { - pr_err("Unable to allocate memory for" - " struct iscsi_conn_ops.\n"); - goto new_sess_out; + rc = np->np_transport->iscsit_accept_np(np, conn); + if (rc == -ENOSYS) { + complete(&np->np_restart_comp); + iscsit_put_transport(conn->conn_transport); + kfree(conn); + conn = NULL; + goto exit; + } else if (rc < 0) { + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + spin_unlock_bh(&np->np_thread_lock); + complete(&np->np_restart_comp); + iscsit_put_transport(conn->conn_transport); + kfree(conn); + conn = NULL; + if (ret == -ENODEV) + goto out; + /* Get another socket */ + return 1; + } + spin_unlock_bh(&np->np_thread_lock); + iscsit_put_transport(conn->conn_transport); + kfree(conn); + conn = NULL; + goto out; } /* * Perform the remaining iSCSI connection initialization items.. */ - if (iscsi_login_init_conn(conn) < 0) - goto new_sess_out; - - memset(buffer, 0, ISCSI_HDR_LEN); - memset(&iov, 0, sizeof(struct kvec)); - iov.iov_base = buffer; - iov.iov_len = ISCSI_HDR_LEN; - - if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) { - pr_err("rx_data() returned an error.\n"); + login = iscsi_login_init_conn(conn); + if (!login) { goto new_sess_out; } - iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK); - if (!(iscsi_opcode & ISCSI_OP_LOGIN)) { - pr_err("First opcode is not login request," - " failing login request.\n"); - goto new_sess_out; - } + iscsi_start_login_thread_timer(np); - pdu = (struct iscsi_login_req *) buffer; + pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); + conn->conn_state = TARG_CONN_STATE_XPT_UP; + /* + * This will process the first login request + payload.. + */ + rc = np->np_transport->iscsit_get_login_rx(conn, login); + if (rc == 1) + return 1; + else if (rc < 0) + goto new_sess_out; + buffer = &login->req[0]; + pdu = (struct iscsi_login_req *)buffer; /* * Used by iscsit_tx_login_rsp() for Login Resonses PDUs * when Status-Class != 0. */ - conn->login_itt = pdu->itt; + conn->login_itt = pdu->itt; spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { @@ -970,61 +1319,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } spin_unlock_bh(&np->np_thread_lock); - if (np->np_sockaddr.ss_family == AF_INET6) { - memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); - - if (conn->sock->ops->getname(conn->sock, - (struct sockaddr *)&sock_in6, &err, 1) < 0) { - pr_err("sock_ops->getname() failed.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_TARGET_ERROR); - goto new_sess_out; - } - snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", - &sock_in6.sin6_addr.in6_u); - conn->login_port = ntohs(sock_in6.sin6_port); - - if (conn->sock->ops->getname(conn->sock, - (struct sockaddr *)&sock_in6, &err, 0) < 0) { - pr_err("sock_ops->getname() failed.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_TARGET_ERROR); - goto new_sess_out; - } - snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", - &sock_in6.sin6_addr.in6_u); - conn->local_port = ntohs(sock_in6.sin6_port); - - } else { - memset(&sock_in, 0, sizeof(struct sockaddr_in)); - - if (conn->sock->ops->getname(conn->sock, - (struct sockaddr *)&sock_in, &err, 1) < 0) { - pr_err("sock_ops->getname() failed.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_TARGET_ERROR); - goto new_sess_out; - } - sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); - conn->login_port = ntohs(sock_in.sin_port); - - if (conn->sock->ops->getname(conn->sock, - (struct sockaddr *)&sock_in, &err, 0) < 0) { - pr_err("sock_ops->getname() failed.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_TARGET_ERROR); - goto new_sess_out; - } - sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr); - conn->local_port = ntohs(sock_in.sin_port); - } - conn->network_transport = np->np_network_transport; pr_debug("Received iSCSI login request from %s on %s Network" - " Portal %s:%hu\n", conn->login_ip, - (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", - conn->local_ip, conn->local_port); + " Portal %s:%hu\n", conn->login_ip, np->np_transport->name, + conn->local_ip, conn->local_port); pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); conn->conn_state = TARG_CONN_STATE_IN_LOGIN; @@ -1053,16 +1352,21 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0) goto new_sess_out; } - /* - * This will process the first login request, and call - * iscsi_target_locate_portal(), and return a valid struct iscsi_login. + * SessionType: Discovery + * + * Locates Default Portal + * + * SessionType: Normal + * + * Locates Target Portal from NP -> Target IQN */ - login = iscsi_target_init_negotiation(np, conn, buffer); - if (!login) { + rc = iscsi_target_locate_portal(np, conn, login); + if (rc < 0) { tpg = conn->tpg; goto new_sess_out; } + login->zero_tsih = zero_tsih; tpg = conn->tpg; if (!tpg) { @@ -1071,18 +1375,15 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } if (zero_tsih) { - if (iscsi_login_zero_tsih_s2(conn) < 0) { - iscsi_target_nego_release(login, conn); + if (iscsi_login_zero_tsih_s2(conn) < 0) goto new_sess_out; - } } else { - if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) { - iscsi_target_nego_release(login, conn); + if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) goto old_sess_out; - } } - if (iscsi_target_start_negotiation(login, conn) < 0) + ret = iscsi_target_start_negotiation(login, conn); + if (ret < 0) goto new_sess_out; if (!conn->sess) { @@ -1095,91 +1396,43 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (signal_pending(current)) goto new_sess_out; - ret = iscsi_post_login_handler(np, conn, zero_tsih); + if (ret == 1) { + tpg_np = conn->tpg_np; - if (ret < 0) - goto new_sess_out; + ret = iscsi_post_login_handler(np, conn, zero_tsih); + if (ret < 0) + goto new_sess_out; + + iscsit_deaccess_np(np, tpg, tpg_np); + } - iscsit_deaccess_np(np, tpg); tpg = NULL; + tpg_np = NULL; /* Get another socket */ return 1; new_sess_out: - pr_err("iSCSI Login negotiation failed.\n"); - iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, - ISCSI_LOGIN_STATUS_INIT_ERR); - if (!zero_tsih || !conn->sess) - goto old_sess_out; - if (conn->sess->se_sess) - transport_free_session(conn->sess->se_sess); - if (conn->sess->session_index != 0) { - spin_lock_bh(&sess_idr_lock); - idr_remove(&sess_idr, conn->sess->session_index); - spin_unlock_bh(&sess_idr_lock); - } - kfree(conn->sess->sess_ops); - kfree(conn->sess); + new_sess = true; old_sess_out: - iscsi_stop_login_thread_timer(np); - /* - * If login negotiation fails check if the Time2Retain timer - * needs to be restarted. - */ - if (!zero_tsih && conn->sess) { - spin_lock_bh(&conn->sess->conn_lock); - if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { - struct se_portal_group *se_tpg = - &ISCSI_TPG_C(conn)->tpg_se_tpg; - - atomic_set(&conn->sess->session_continuation, 0); - spin_unlock_bh(&conn->sess->conn_lock); - spin_lock_bh(&se_tpg->session_lock); - iscsit_start_time2retain_handler(conn->sess); - spin_unlock_bh(&se_tpg->session_lock); - } else - spin_unlock_bh(&conn->sess->conn_lock); - iscsit_dec_session_usage_count(conn->sess); - } - - if (!IS_ERR(conn->conn_rx_hash.tfm)) - crypto_free_hash(conn->conn_rx_hash.tfm); - if (!IS_ERR(conn->conn_tx_hash.tfm)) - crypto_free_hash(conn->conn_tx_hash.tfm); - - if (conn->conn_cpumask) - free_cpumask_var(conn->conn_cpumask); - - kfree(conn->conn_ops); - - if (conn->param_list) { - iscsi_release_param_list(conn->param_list); - conn->param_list = NULL; - } - if (conn->sock) - sock_release(conn->sock); - kfree(conn); + tpg_np = conn->tpg_np; + iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + new_sess = false; if (tpg) { - iscsit_deaccess_np(np, tpg); + iscsit_deaccess_np(np, tpg, tpg_np); tpg = NULL; + tpg_np = NULL; } out: - stop = kthread_should_stop(); - if (!stop && signal_pending(current)) { - spin_lock_bh(&np->np_thread_lock); - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); - spin_unlock_bh(&np->np_thread_lock); - } - /* Wait for another socket.. */ - if (!stop) - return 1; + return 1; +exit: iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; spin_unlock_bh(&np->np_thread_lock); + return 0; } @@ -1190,7 +1443,7 @@ int iscsi_target_login_thread(void *arg) allow_signal(SIGINT); - while (!kthread_should_stop()) { + while (1) { ret = __iscsi_target_login_thread(np); /* * We break and exit here unless another sock_accept() call diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 091dcae2532..29d098324b7 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -4,8 +4,17 @@ extern int iscsi_login_setup_crypto(struct iscsi_conn *); extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *); extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32); +extern int iscsit_setup_np(struct iscsi_np *, + struct __kernel_sockaddr_storage *); extern int iscsi_target_setup_login_socket(struct iscsi_np *, struct __kernel_sockaddr_storage *); +extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); +extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); +extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); +extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); +extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, + bool, bool); extern int iscsi_target_login_thread(void *); extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 9d902aefe01..62a095f36bf 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains main functions related to iSCSI Parameter negotiation. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -22,6 +20,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -89,7 +88,7 @@ int extract_param( if (len < 0) return -1; - if (len > max_length) { + if (len >= max_length) { pr_err("Length of input: %d exceeds max_length:" " %d\n", len, max_length); return -1; @@ -111,6 +110,7 @@ static u32 iscsi_handle_authentication( struct iscsi_session *sess = conn->sess; struct iscsi_node_auth *auth; struct iscsi_node_acl *iscsi_nacl; + struct iscsi_portal_group *iscsi_tpg; struct se_node_acl *se_nacl; if (!sess->sess_ops->SessionType) { @@ -131,7 +131,17 @@ static u32 iscsi_handle_authentication( return -1; } - auth = ISCSI_NODE_AUTH(iscsi_nacl); + if (se_nacl->dynamic_node_acl) { + iscsi_tpg = container_of(se_nacl->se_tpg, + struct iscsi_portal_group, tpg_se_tpg); + + auth = &iscsi_tpg->tpg_demo_auth; + } else { + iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, + se_node_acl); + + auth = &iscsi_nacl->node_auth; + } } else { /* * For SessionType=Discovery @@ -169,7 +179,7 @@ static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn) kfree(conn->auth_protocol); } -static int iscsi_target_check_login_request( +int iscsi_target_check_login_request( struct iscsi_conn *conn, struct iscsi_login *login) { @@ -200,8 +210,8 @@ static int iscsi_target_check_login_request( return -1; } - req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; - req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); + req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); + req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags); if (req_csg != login->current_stage) { pr_err("Initiator unexpectedly changed login stage" @@ -352,11 +362,8 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log padding = ((-login->rsp_length) & 3); - if (iscsi_login_tx_data( - conn, - login->rsp, - login->rsp_buf, - login->rsp_length + padding) < 0) + if (conn->conn_transport->iscsit_put_login_tx(conn, login, + login->rsp_length + padding) < 0) return -1; login->rsp_length = 0; @@ -368,75 +375,284 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log return 0; } -static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login) +static void iscsi_target_sk_data_ready(struct sock *sk) { - u32 padding = 0, payload_length; - struct iscsi_login_req *login_req; + struct iscsi_conn *conn = sk->sk_user_data; + bool rc; - if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) - return -1; + pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn); - login_req = (struct iscsi_login_req *) login->req; - payload_length = ntoh24(login_req->dlength); + write_lock_bh(&sk->sk_callback_lock); + if (!sk->sk_user_data) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn); + return; + } + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn); + return; + } + if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + return; + } - pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," - " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", - login_req->flags, login_req->itt, login_req->cmdsn, - login_req->exp_statsn, login_req->cid, payload_length); + rc = schedule_delayed_work(&conn->login_work, 0); + if (!rc) { + pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" + " got false\n"); + } + write_unlock_bh(&sk->sk_callback_lock); +} - if (iscsi_target_check_login_request(conn, login) < 0) - return -1; +static void iscsi_target_sk_state_change(struct sock *); - padding = ((-payload_length) & 3); - memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); +static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn) +{ + struct sock *sk; - if (iscsi_login_rx_data( - conn, - login->req_buf, - payload_length + padding) < 0) - return -1; + if (!conn->sock) + return; - return 0; + sk = conn->sock->sk; + pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn); + + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = conn; + conn->orig_data_ready = sk->sk_data_ready; + conn->orig_state_change = sk->sk_state_change; + sk->sk_data_ready = iscsi_target_sk_data_ready; + sk->sk_state_change = iscsi_target_sk_state_change; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ; + sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ; } -static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) +static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) { - if (iscsi_target_do_tx_login_io(conn, login) < 0) - return -1; + struct sock *sk; - if (iscsi_target_do_rx_login_io(conn, login) < 0) - return -1; + if (!conn->sock) + return; - return 0; + sk = conn->sock->sk; + pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn); + + write_lock_bh(&sk->sk_callback_lock); + if (!sk->sk_user_data) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + sk->sk_user_data = NULL; + sk->sk_data_ready = conn->orig_data_ready; + sk->sk_state_change = conn->orig_state_change; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; + sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; } -static int iscsi_target_get_initial_payload( - struct iscsi_conn *conn, - struct iscsi_login *login) +static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); + +static bool iscsi_target_sk_state_check(struct sock *sk) { - u32 padding = 0, payload_length; - struct iscsi_login_req *login_req; + if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { + pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," + "returning FALSE\n"); + return false; + } + return true; +} - login_req = (struct iscsi_login_req *) login->req; - payload_length = ntoh24(login_req->dlength); +static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) +{ + struct iscsi_np *np = login->np; + bool zero_tsih = login->zero_tsih; - pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," - " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", - login_req->flags, login_req->itt, login_req->cmdsn, - login_req->exp_statsn, payload_length); + iscsi_remove_failed_auth_entry(conn); + iscsi_target_nego_release(conn); + iscsi_target_login_sess_out(conn, np, zero_tsih, true); +} - if (iscsi_target_check_login_request(conn, login) < 0) - return -1; +static void iscsi_target_login_timeout(unsigned long data) +{ + struct iscsi_conn *conn = (struct iscsi_conn *)data; - padding = ((-payload_length) & 3); + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); - if (iscsi_login_rx_data( - conn, - login->req_buf, - payload_length + padding) < 0) - return -1; + if (conn->login_kworker) { + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", + conn->login_kworker->comm, conn->login_kworker->pid); + send_sig(SIGINT, conn->login_kworker, 1); + } +} - return 0; +static void iscsi_target_do_login_rx(struct work_struct *work) +{ + struct iscsi_conn *conn = container_of(work, + struct iscsi_conn, login_work.work); + struct iscsi_login *login = conn->login; + struct iscsi_np *np = login->np; + struct iscsi_portal_group *tpg = conn->tpg; + struct iscsi_tpg_np *tpg_np = conn->tpg_np; + struct timer_list login_timer; + int rc, zero_tsih = login->zero_tsih; + bool state; + + pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", + conn, current->comm, current->pid); + + spin_lock(&tpg->tpg_state_lock); + state = (tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&tpg->tpg_state_lock); + + if (!state) { + pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); + return; + } + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = iscsi_target_sk_state_check(sk); + read_unlock_bh(&sk->sk_callback_lock); + + if (!state) { + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); + return; + } + } + + conn->login_kworker = current; + allow_signal(SIGINT); + + init_timer(&login_timer); + login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); + login_timer.data = (unsigned long)conn; + login_timer.function = iscsi_target_login_timeout; + add_timer(&login_timer); + pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid); + + rc = conn->conn_transport->iscsit_get_login_rx(conn, login); + del_timer_sync(&login_timer); + flush_signals(current); + conn->login_kworker = NULL; + + if (rc < 0) { + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); + return; + } + + pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", + conn, current->comm, current->pid); + + rc = iscsi_target_do_login(conn, login); + if (rc < 0) { + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); + } else if (!rc) { + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + } else if (rc == 1) { + iscsi_target_nego_release(conn); + iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } +} + +static void iscsi_target_do_cleanup(struct work_struct *work) +{ + struct iscsi_conn *conn = container_of(work, + struct iscsi_conn, login_cleanup_work.work); + struct sock *sk = conn->sock->sk; + struct iscsi_login *login = conn->login; + struct iscsi_np *np = login->np; + struct iscsi_portal_group *tpg = conn->tpg; + struct iscsi_tpg_np *tpg_np = conn->tpg_np; + + pr_debug("Entering iscsi_target_do_cleanup\n"); + + cancel_delayed_work_sync(&conn->login_work); + conn->orig_state_change(sk); + + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); + + pr_debug("iscsi_target_do_cleanup done()\n"); +} + +static void iscsi_target_sk_state_change(struct sock *sk) +{ + struct iscsi_conn *conn; + void (*orig_state_change)(struct sock *); + bool state; + + pr_debug("Entering iscsi_target_sk_state_change\n"); + + write_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + orig_state_change = conn->orig_state_change; + + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n", + conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" + " conn: %p\n", conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", + conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + + state = iscsi_target_sk_state_check(sk); + write_unlock_bh(&sk->sk_callback_lock); + + pr_debug("iscsi_target_sk_state_change: state: %d\n", state); + + if (!state) { + pr_debug("iscsi_target_sk_state_change got failed state\n"); + schedule_delayed_work(&conn->login_cleanup_work, 0); + return; + } + orig_state_change(sk); } /* @@ -557,6 +773,12 @@ static int iscsi_target_handle_csg_zero( } goto do_auth; + } else if (!payload_length) { + pr_err("Initiator sent zero length security payload," + " login failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; } if (login->first_request) @@ -573,7 +795,7 @@ static int iscsi_target_handle_csg_zero( return -1; if (!iscsi_check_negotiated_keys(conn->param_list)) { - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !strncmp(param->value, NONE, 4)) { pr_err("Initiator sent AuthMethod=None but" " Target is enforcing iSCSI Authentication," @@ -583,7 +805,7 @@ static int iscsi_target_handle_csg_zero( return -1; } - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !login->auth_complete) return 0; @@ -646,7 +868,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log } if (!login->auth_complete && - ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { + conn->tpg->tpg_attrib.authentication) { pr_err("Initiator is requesting CSG: 1, has not been" " successfully authenticated, and the Target is" " enforcing iSCSI Authentication, login failed.\n"); @@ -681,9 +903,9 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo return -1; } - switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) { + switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) { case 0: - login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK); + login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK; if (iscsi_target_handle_csg_zero(conn, login) < 0) return -1; break; @@ -693,27 +915,44 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo return -1; if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { login->tsih = conn->sess->tsih; + login->login_complete = 1; + iscsi_target_restore_sock_callbacks(conn); if (iscsi_target_do_tx_login_io(conn, login) < 0) return -1; - return 0; + return 1; } break; default: pr_err("Illegal CSG: %d received from" " Initiator, protocol error.\n", - (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) - >> 2); + ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)); break; } - if (iscsi_target_do_login_io(conn, login) < 0) + if (iscsi_target_do_tx_login_io(conn, login) < 0) return -1; if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT; login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; } + break; + } + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + bool state; + + read_lock_bh(&sk->sk_callback_lock); + state = iscsi_target_sk_state_check(sk); + read_unlock_bh(&sk->sk_callback_lock); + + if (!state) { + pr_debug("iscsi_target_do_login() failed state for" + " conn: %p\n", conn); + return -1; + } } return 0; @@ -737,7 +976,7 @@ static void iscsi_initiatorname_tolower( /* * Processes the first Login Request.. */ -static int iscsi_target_locate_portal( +int iscsi_target_locate_portal( struct iscsi_np *np, struct iscsi_conn *conn, struct iscsi_login *login) @@ -746,29 +985,21 @@ static int iscsi_target_locate_portal( char *tmpbuf, *start = NULL, *end = NULL, *key, *value; struct iscsi_session *sess = conn->sess; struct iscsi_tiqn *tiqn; + struct iscsi_tpg_np *tpg_np = NULL; struct iscsi_login_req *login_req; - u32 payload_length; - int sessiontype = 0, ret = 0; + struct se_node_acl *se_nacl; + u32 payload_length, queue_depth = 0; + int sessiontype = 0, ret = 0, tag_num, tag_size; + + INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); + INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup); + iscsi_target_set_sock_callbacks(conn); + + login->np = np; login_req = (struct iscsi_login_req *) login->req; payload_length = ntoh24(login_req->dlength); - login->first_request = 1; - login->leading_connection = (!login_req->tsih) ? 1 : 0; - login->current_stage = - (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; - login->version_min = login_req->min_version; - login->version_max = login_req->max_version; - memcpy(login->isid, login_req->isid, 6); - login->cmd_sn = be32_to_cpu(login_req->cmdsn); - login->init_task_tag = login_req->itt; - login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); - login->cid = be16_to_cpu(login_req->cid); - login->tsih = be16_to_cpu(login_req->tsih); - - if (iscsi_target_get_initial_payload(conn, login) < 0) - return -1; - tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL); if (!tmpbuf) { pr_err("Unable to allocate memory for tmpbuf.\n"); @@ -799,7 +1030,6 @@ static int iscsi_target_locate_portal( start += strlen(key) + strlen(value) + 2; } - /* * See 5.3. Login Phase. */ @@ -859,7 +1089,7 @@ static int iscsi_target_locate_portal( goto out; } ret = 0; - goto out; + goto alloc_tags; } get_target: @@ -890,7 +1120,7 @@ get_target: /* * Locate Target Portal Group from Storage Node. */ - conn->tpg = iscsit_get_tpg_from_np(tiqn, np); + conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np); if (!conn->tpg) { pr_err("Unable to locate Target Portal Group" " on %s\n", tiqn->tiqn); @@ -900,12 +1130,16 @@ get_target: ret = -1; goto out; } + conn->tpg_np = tpg_np; pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt); /* * Setup crc32c modules from libcrypto */ if (iscsi_login_setup_crypto(conn) < 0) { pr_err("iscsi_login_setup_crypto() failed\n"); + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + iscsit_put_tiqn_for_login(tiqn); + conn->tpg = NULL; ret = -1; goto out; } @@ -914,11 +1148,12 @@ get_target: * process login attempt. */ if (iscsit_access_np(np, conn->tpg) < 0) { + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); iscsit_put_tiqn_for_login(tiqn); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); - ret = -1; conn->tpg = NULL; + ret = -1; goto out; } @@ -951,107 +1186,69 @@ get_target: ret = -1; goto out; } - - ret = 0; -out: - kfree(tmpbuf); - return ret; -} - -struct iscsi_login *iscsi_target_init_negotiation( - struct iscsi_np *np, - struct iscsi_conn *conn, - char *login_pdu) -{ - struct iscsi_login *login; - - login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); - if (!login) { - pr_err("Unable to allocate memory for struct iscsi_login.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - return NULL; - } - - login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL); - if (!login->req) { - pr_err("Unable to allocate memory for Login Request.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - goto out; - } - - login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); - if (!login->req_buf) { - pr_err("Unable to allocate memory for response buffer.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - goto out; - } + se_nacl = sess->se_sess->se_node_acl; + queue_depth = se_nacl->queue_depth; /* - * SessionType: Discovery + * Setup pre-allocated tags based upon allowed per NodeACL CmdSN + * depth for non immediate commands, plus extra tags for immediate + * commands. * - * Locates Default Portal - * - * SessionType: Normal - * - * Locates Target Portal from NP -> Target IQN + * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention + * in per-cpu-ida tag allocation logic + small queue_depth. */ - if (iscsi_target_locate_portal(np, conn, login) < 0) { - goto out; - } +alloc_tags: + tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; + tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; - return login; + ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); + if (ret < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + ret = -1; + } out: - kfree(login->req); - kfree(login->req_buf); - kfree(login); - - return NULL; + kfree(tmpbuf); + return ret; } int iscsi_target_start_negotiation( struct iscsi_login *login, struct iscsi_conn *conn) { - int ret = -1; + int ret; - login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); - if (!login->rsp) { - pr_err("Unable to allocate memory for" - " Login Response.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - ret = -1; - goto out; - } + ret = iscsi_target_do_login(conn, login); + if (!ret) { + if (conn->sock) { + struct sock *sk = conn->sock->sk; - login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); - if (!login->rsp_buf) { - pr_err("Unable to allocate memory for" - " request buffer.\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); - ret = -1; - goto out; + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + } else if (ret < 0) { + cancel_delayed_work_sync(&conn->login_work); + cancel_delayed_work_sync(&conn->login_cleanup_work); + iscsi_target_restore_sock_callbacks(conn); + iscsi_remove_failed_auth_entry(conn); } - - ret = iscsi_target_do_login(conn, login); -out: if (ret != 0) - iscsi_remove_failed_auth_entry(conn); + iscsi_target_nego_release(conn); - iscsi_target_nego_release(login, conn); return ret; } -void iscsi_target_nego_release( - struct iscsi_login *login, - struct iscsi_conn *conn) +void iscsi_target_nego_release(struct iscsi_conn *conn) { - kfree(login->req); - kfree(login->rsp); + struct iscsi_login *login = conn->conn_login; + + if (!login) + return; + kfree(login->req_buf); kfree(login->rsp_buf); kfree(login); + + conn->conn_login = NULL; } diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h index 92e133a5158..f021cbd330e 100644 --- a/drivers/target/iscsi/iscsi_target_nego.h +++ b/drivers/target/iscsi/iscsi_target_nego.h @@ -7,11 +7,14 @@ extern void convert_null_to_semi(char *, int); extern int extract_param(const char *, const char *, unsigned int, char *, unsigned char *); -extern struct iscsi_login *iscsi_target_init_negotiation( - struct iscsi_np *, struct iscsi_conn *, char *); +extern int iscsi_target_check_login_request(struct iscsi_conn *, + struct iscsi_login *); +extern int iscsi_target_get_initial_payload(struct iscsi_conn *, + struct iscsi_login *); +extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *, + struct iscsi_login *); extern int iscsi_target_start_negotiation( struct iscsi_login *, struct iscsi_conn *); -extern void iscsi_target_nego_release( - struct iscsi_login *, struct iscsi_conn *); +extern void iscsi_target_nego_release(struct iscsi_conn *); #endif /* ISCSI_TARGET_NEGO_H */ diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index 11dc2936af7..16454a922e2 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the main functions related to Initiator Node Attributes. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -35,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname( } void iscsit_set_default_node_attribues( - struct iscsi_node_acl *acl) + struct iscsi_node_acl *acl, + struct iscsi_portal_group *tpg) { struct iscsi_node_attrib *a = &acl->node_attrib; @@ -46,7 +45,7 @@ void iscsit_set_default_node_attribues( a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; - a->default_erl = NA_DEFAULT_ERL; + a->default_erl = tpg->tpg_attrib.default_erl; } int iscsit_na_dataout_timeout( diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h index c970b326ef2..0c69a46a62e 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.h +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -1,7 +1,8 @@ #ifndef ISCSI_TARGET_NODEATTRIB_H #define ISCSI_TARGET_NODEATTRIB_H -extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); +extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, + struct iscsi_portal_group *); extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index d89164287d0..02f9de26f38 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains main functions related to iSCSI Parameter negotiation. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -59,7 +57,7 @@ int iscsi_login_tx_data( char *text_buf, int text_length) { - int length, tx_sent; + int length, tx_sent, iov_cnt = 1; struct kvec iov[2]; length = (ISCSI_HDR_LEN + text_length); @@ -67,8 +65,12 @@ int iscsi_login_tx_data( memset(&iov[0], 0, 2 * sizeof(struct kvec)); iov[0].iov_len = ISCSI_HDR_LEN; iov[0].iov_base = pdu_buf; - iov[1].iov_len = text_length; - iov[1].iov_base = text_buf; + + if (text_buf && text_length) { + iov[1].iov_len = text_length; + iov[1].iov_base = text_buf; + iov_cnt++; + } /* * Initial Marker-less Interval. @@ -77,7 +79,7 @@ int iscsi_login_tx_data( */ conn->if_marker += length; - tx_sent = tx_data(conn, &iov[0], 2, length); + tx_sent = tx_data(conn, &iov[0], iov_cnt, length); if (tx_sent != length) { pr_err("tx_data returned %d, expecting %d.\n", tx_sent, length); @@ -429,6 +431,28 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) TYPERANGE_MARKINT, USE_INITIAL_ONLY); if (!param) goto out; + /* + * Extra parameters for ISER from RFC-5046 + */ + param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_BOOL_AND, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH, + INITIAL_INITIATORRECVDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH, + INITIAL_TARGETRECVDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; *param_list_ptr = pl; return 0; @@ -438,19 +462,23 @@ out: } int iscsi_set_keys_to_negotiate( - int sessiontype, - struct iscsi_param_list *param_list) + struct iscsi_param_list *param_list, + bool iser) { struct iscsi_param *param; + param_list->iser = iser; + list_for_each_entry(param, ¶m_list->param_list, p_list) { param->state = 0; if (!strcmp(param->name, AUTHMETHOD)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, HEADERDIGEST)) { - SET_PSTATE_NEGOTIATE(param); + if (!iser) + SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, DATADIGEST)) { - SET_PSTATE_NEGOTIATE(param); + if (!iser) + SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXCONNECTIONS)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, TARGETNAME)) { @@ -469,7 +497,8 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, IMMEDIATEDATA)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { - SET_PSTATE_NEGOTIATE(param); + if (!iser) + SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { continue; } else if (!strcmp(param->name, MAXBURSTLENGTH)) { @@ -498,6 +527,15 @@ int iscsi_set_keys_to_negotiate( SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); } } @@ -540,6 +578,12 @@ int iscsi_set_keys_irrelevant_for_discovery( param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, OFMARKINT)) param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, RDMAEXTENSIONS)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; } return 0; @@ -712,9 +756,9 @@ static int iscsi_add_notunderstood_response( } INIT_LIST_HEAD(&extra_response->er_list); - strncpy(extra_response->key, key, strlen(key) + 1); - strncpy(extra_response->value, NOTUNDERSTOOD, - strlen(NOTUNDERSTOOD) + 1); + strlcpy(extra_response->key, key, sizeof(extra_response->key)); + strlcpy(extra_response->value, NOTUNDERSTOOD, + sizeof(extra_response->value)); list_add_tail(&extra_response->er_list, ¶m_list->extra_response_list); @@ -1095,11 +1139,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value, SET_PSTATE_REPLY_OPTIONAL(param); } } else if (IS_TYPE_NUMBER(param)) { - char *tmpptr, buf[10]; + char *tmpptr, buf[11]; u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0); u32 proposer_value = simple_strtoul(value, &tmpptr, 0); - memset(buf, 0, 10); + memset(buf, 0, sizeof(buf)); if (!strcmp(param->name, MAXCONNECTIONS) || !strcmp(param->name, MAXBURSTLENGTH) || @@ -1136,7 +1180,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value, unsigned long long tmp; int rc; - rc = strict_strtoull(param->value, 0, &tmp); + rc = kstrtoull(param->value, 0, &tmp); if (rc < 0) return -1; @@ -1503,8 +1547,8 @@ static int iscsi_enforce_integrity_rules( FirstBurstLength = simple_strtoul(param->value, &tmpptr, 0); if (FirstBurstLength > MaxBurstLength) { - char tmpbuf[10]; - memset(tmpbuf, 0, 10); + char tmpbuf[11]; + memset(tmpbuf, 0, sizeof(tmpbuf)); sprintf(tmpbuf, "%u", MaxBurstLength); if (iscsi_update_param_value(param, tmpbuf)) return -1; @@ -1561,7 +1605,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { - pr_err("Unable to allocate memory for tmpbuf.\n"); + pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); return -1; } @@ -1583,8 +1627,6 @@ int iscsi_decode_text_input( if (phase & PHASE_SECURITY) { if (iscsi_check_for_auth_key(key) > 0) { - char *tmpptr = key + strlen(key); - *tmpptr = '='; kfree(tmpbuf); return 1; } @@ -1800,6 +1842,22 @@ void iscsi_set_connection_parameters( simple_strtoul(param->value, &tmpptr, 0); pr_debug("IFMarkInt: %s\n", param->value); + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { + ops->InitiatorRecvDataSegmentLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("InitiatorRecvDataSegmentLength: %s\n", + param->value); + ops->MaxRecvDataSegmentLength = + ops->InitiatorRecvDataSegmentLength; + pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n"); + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { + ops->TargetRecvDataSegmentLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("TargetRecvDataSegmentLength: %s\n", + param->value); + ops->MaxXmitDataSegmentLength = + ops->TargetRecvDataSegmentLength; + pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n"); } } pr_debug("----------------------------------------------------" @@ -1912,6 +1970,10 @@ void iscsi_set_session_parameters( ops->SessionType = !strcmp(param->value, DISCOVERY); pr_debug("SessionType: %s\n", param->value); + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + ops->RDMAExtensions = !strcmp(param->value, YES); + pr_debug("RDMAExtensions: %s\n", + param->value); } } pr_debug("----------------------------------------------------" diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index 1e1b7504a76..a47046a752a 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -1,8 +1,10 @@ #ifndef ISCSI_PARAMETERS_H #define ISCSI_PARAMETERS_H +#include <scsi/iscsi_proto.h> + struct iscsi_extra_response { - char key[64]; + char key[KEY_MAXLEN]; char value[32]; struct list_head er_list; } ____cacheline_aligned; @@ -27,7 +29,7 @@ extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *); extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *); extern void iscsi_print_params(struct iscsi_param_list *); extern int iscsi_create_default_params(struct iscsi_param_list **); -extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *); +extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool); extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *); extern int iscsi_copy_param_list(struct iscsi_param_list **, struct iscsi_param_list *, int); @@ -89,6 +91,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, #define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft" /* + * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 + */ +#define RDMAEXTENSIONS "RDMAExtensions" +#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" +#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" + +/* * For AuthMethod. */ #define KRB5 "KRB5" @@ -133,6 +142,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, #define INITIAL_OFMARKINT "2048~65535" /* + * Initial values for iSER parameters following RFC-5046 Section 6 + */ +#define INITIAL_RDMAEXTENSIONS NO +#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" +#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" + +/* * For [Header,Data]Digests. */ #define CRC32C "CRC32C" diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c index edb592a368e..ca41b583f2f 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c @@ -2,9 +2,7 @@ * This file contains main functions related to iSCSI DataSequenceInOrder=No * and DataPDUInOrder=No. * - \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index 421d6947dc6..10339551030 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -2,9 +2,7 @@ * Modern ConfigFS group context specific iSCSI statistics based on original * iscsi_target_mib.c code * - * Copyright (c) 2011 Rising Tide Systems - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * Copyright (c) 2011-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -177,7 +175,7 @@ ISCSI_STAT_INSTANCE_ATTR_RO(description); static ssize_t iscsi_stat_instance_show_attr_vendor( struct iscsi_wwn_stat_grps *igrps, char *page) { - return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n"); + return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n"); } ISCSI_STAT_INSTANCE_ATTR_RO(vendor); @@ -410,14 +408,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type( struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; - unsigned char buf[8]; + int ret; spin_lock(&lstat->lock); - snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ? - "ipv6" : "ipv4"); + if (lstat->last_intr_fail_ip_family == AF_INET6) + ret = snprintf(page, PAGE_SIZE, "ipv6\n"); + else + ret = snprintf(page, PAGE_SIZE, "ipv4\n"); spin_unlock(&lstat->lock); - return snprintf(page, PAGE_SIZE, "%s\n", buf); + return ret; } ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type); @@ -427,16 +427,13 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr( struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; - unsigned char buf[32]; + int ret; spin_lock(&lstat->lock); - if (lstat->last_intr_fail_ip_family == AF_INET6) - snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr); - else - snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr); + ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr); spin_unlock(&lstat->lock); - return snprintf(page, PAGE_SIZE, "%s\n", buf); + return ret; } ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr); @@ -795,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->cmd_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -818,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rsp_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -841,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->tx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->tx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -865,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->rx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -889,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_digest_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_digest_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -913,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_timeout_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_timeout_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 9d4417aae92..78404b1cc0b 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the iSCSI Target specific Task Management functions. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -23,6 +21,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" @@ -301,7 +300,7 @@ static int iscsit_task_reassign_complete_write( /* * iscsit_build_r2ts_for_cmd() can handle the rest from here. */ - return iscsit_build_r2ts_for_cmd(cmd, conn, true); + return conn->conn_transport->iscsit_get_dataout(conn, cmd, true); } static int iscsit_task_reassign_complete_read( @@ -471,6 +470,7 @@ int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn) return 0; } +EXPORT_SYMBOL(iscsit_tmr_post_handler); /* * Nothing to do here, but leave it for good measure. :-) diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index de9ea32b610..c3cb5c15efd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains iSCSI Target Portal Group related functions. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -31,6 +29,8 @@ #include "iscsi_target.h" #include "iscsi_target_parameters.h" +#include <target/iscsi/iscsi_transport.h> + struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt) { struct iscsi_portal_group *tpg; @@ -47,7 +47,7 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1 INIT_LIST_HEAD(&tpg->tpg_gnp_list); INIT_LIST_HEAD(&tpg->tpg_list); mutex_init(&tpg->tpg_access_lock); - mutex_init(&tpg->np_login_lock); + sema_init(&tpg->np_login_sem, 1); spin_lock_init(&tpg->tpg_state_lock); spin_lock_init(&tpg->tpg_np_lock); @@ -127,7 +127,8 @@ void iscsit_release_discovery_tpg(void) struct iscsi_portal_group *iscsit_get_tpg_from_np( struct iscsi_tiqn *tiqn, - struct iscsi_np *np) + struct iscsi_np *np, + struct iscsi_tpg_np **tpg_np_out) { struct iscsi_portal_group *tpg = NULL; struct iscsi_tpg_np *tpg_np; @@ -136,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { spin_lock(&tpg->tpg_state_lock); - if (tpg->tpg_state == TPG_STATE_FREE) { + if (tpg->tpg_state != TPG_STATE_ACTIVE) { spin_unlock(&tpg->tpg_state_lock); continue; } @@ -145,6 +146,8 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( spin_lock(&tpg->tpg_np_lock); list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { if (tpg_np->tpg_np == np) { + *tpg_np_out = tpg_np; + kref_get(&tpg_np->tpg_np_kref); spin_unlock(&tpg->tpg_np_lock); spin_unlock(&tiqn->tiqn_tpg_lock); return tpg; @@ -173,18 +176,22 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg) static void iscsit_clear_tpg_np_login_thread( struct iscsi_tpg_np *tpg_np, - struct iscsi_portal_group *tpg) + struct iscsi_portal_group *tpg, + bool shutdown) { if (!tpg_np->tpg_np) { pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); return; } - iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg); + if (shutdown) + tpg_np->tpg_np->enabled = false; + iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); } -void iscsit_clear_tpg_np_login_threads( - struct iscsi_portal_group *tpg) +static void iscsit_clear_tpg_np_login_threads( + struct iscsi_portal_group *tpg, + bool shutdown) { struct iscsi_tpg_np *tpg_np; @@ -195,7 +202,7 @@ void iscsit_clear_tpg_np_login_threads( continue; } spin_unlock(&tpg->tpg_np_lock); - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown); spin_lock(&tpg->tpg_np_lock); } spin_unlock(&tpg->tpg_np_lock); @@ -218,6 +225,9 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; + a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; + a->default_erl = TA_DEFAULT_ERL; + a->t10_pi = TA_DEFAULT_T10_PI; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -232,7 +242,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro if (iscsi_create_default_params(&tpg->param_list) < 0) goto err_out; - ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; + tpg->tpg_attrib.tpg = tpg; spin_lock(&tpg->tpg_state_lock); tpg->tpg_state = TPG_STATE_INACTIVE; @@ -323,7 +333,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) return -EINVAL; } - if (ISCSI_TPG_ATTRIB(tpg)->authentication) { + if (tpg->tpg_attrib.authentication) { if (!strcmp(param->value, NONE)) { ret = iscsi_update_param_value(param, CHAP); if (ret) @@ -366,7 +376,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) tpg->tpg_state = TPG_STATE_INACTIVE; spin_unlock(&tpg->tpg_state_lock); - iscsit_clear_tpg_np_login_threads(tpg); + iscsit_clear_tpg_np_login_threads(tpg, false); if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { spin_lock(&tpg->tpg_state_lock); @@ -422,6 +432,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np( return NULL; } +static bool iscsit_tpg_check_network_portal( + struct iscsi_tiqn *tiqn, + struct __kernel_sockaddr_storage *sockaddr, + int network_transport) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; + struct iscsi_np *np; + bool match = false; + + spin_lock(&tiqn->tiqn_tpg_lock); + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + + spin_lock(&tpg->tpg_np_lock); + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { + np = tpg_np->tpg_np; + + match = iscsit_check_np_match(sockaddr, np, + network_transport); + if (match) + break; + } + spin_unlock(&tpg->tpg_np_lock); + } + spin_unlock(&tiqn->tiqn_tpg_lock); + + return match; +} + struct iscsi_tpg_np *iscsit_tpg_add_network_portal( struct iscsi_portal_group *tpg, struct __kernel_sockaddr_storage *sockaddr, @@ -432,6 +471,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( struct iscsi_np *np; struct iscsi_tpg_np *tpg_np; + if (!tpg_np_parent) { + if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, + network_transport)) { + pr_err("Network Portal: %s already exists on a" + " different TPG on %s\n", ip_str, + tpg->tpg_tiqn->tiqn); + return ERR_PTR(-EEXIST); + } + } + tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL); if (!tpg_np) { pr_err("Unable to allocate memory for" @@ -449,7 +498,10 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( INIT_LIST_HEAD(&tpg_np->tpg_np_child_list); INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list); spin_lock_init(&tpg_np->tpg_np_parent_lock); + init_completion(&tpg_np->tpg_np_comp); + kref_init(&tpg_np->tpg_np_kref); tpg_np->tpg_np = np; + np->tpg_np = tpg_np; tpg_np->tpg = tpg; spin_lock(&tpg->tpg_np_lock); @@ -469,7 +521,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n", tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, - (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); + np->np_transport->name); return tpg_np; } @@ -479,11 +531,11 @@ static int iscsit_tpg_release_np( struct iscsi_portal_group *tpg, struct iscsi_np *np) { - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true); pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, - (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); + np->np_transport->name); tpg_np->tpg_np = NULL; tpg_np->tpg = NULL; @@ -772,3 +824,58 @@ int iscsit_ta_prod_mode_write_protect( return 0; } + +int iscsit_ta_demo_mode_discovery( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->demo_mode_discovery = flag; + pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:" + " %s\n", tpg->tpgt, (a->demo_mode_discovery) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_default_erl( + struct iscsi_portal_group *tpg, + u32 default_erl) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) { + pr_err("Illegal value for default_erl: %u\n", default_erl); + return -EINVAL; + } + + a->default_erl = default_erl; + pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl); + + return 0; +} + +int iscsit_ta_t10_pi( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->t10_pi = flag; + pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:" + " %s\n", tpg->tpgt, (a->t10_pi) ? + "ON" : "OFF"); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index dda48c141a8..e7265337bc4 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -5,10 +5,9 @@ extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, extern int iscsit_load_discovery_tpg(void); extern void iscsit_release_discovery_tpg(void); extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, - struct iscsi_np *); + struct iscsi_np *, struct iscsi_tpg_np **); extern int iscsit_get_tpg(struct iscsi_portal_group *); extern void iscsit_put_tpg(struct iscsi_portal_group *); -extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *); extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, @@ -37,5 +36,8 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32); extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); +extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c index 81289520f96..601e9cc61e9 100644 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ b/drivers/target/iscsi/iscsi_target_tq.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the iSCSI Login Thread and Thread Queue functions. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -105,12 +103,11 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count) ts->status = ISCSI_THREAD_SET_FREE; INIT_LIST_HEAD(&ts->ts_list); spin_lock_init(&ts->ts_state_lock); - init_completion(&ts->rx_post_start_comp); - init_completion(&ts->tx_post_start_comp); init_completion(&ts->rx_restart_comp); init_completion(&ts->tx_restart_comp); init_completion(&ts->rx_start_comp); init_completion(&ts->tx_start_comp); + sema_init(&ts->ts_activate_sem, 0); ts->create_threads = 1; ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", @@ -139,35 +136,44 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count) return allocated_thread_pair_count; } -void iscsi_deallocate_thread_sets(void) +static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts) { - u32 released_count = 0; - struct iscsi_thread_set *ts = NULL; - - while ((ts = iscsi_get_ts_from_inactive_list())) { + spin_lock_bh(&ts->ts_state_lock); + ts->status = ISCSI_THREAD_SET_DIE; + if (ts->rx_thread) { + complete(&ts->rx_start_comp); + spin_unlock_bh(&ts->ts_state_lock); + kthread_stop(ts->rx_thread); spin_lock_bh(&ts->ts_state_lock); - ts->status = ISCSI_THREAD_SET_DIE; + } + if (ts->tx_thread) { + complete(&ts->tx_start_comp); spin_unlock_bh(&ts->ts_state_lock); + kthread_stop(ts->tx_thread); + spin_lock_bh(&ts->ts_state_lock); + } + spin_unlock_bh(&ts->ts_state_lock); + /* + * Release this thread_id in the thread_set_bitmap + */ + spin_lock(&ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, + ts->thread_id, get_order(1)); + spin_unlock(&ts_bitmap_lock); - if (ts->rx_thread) { - send_sig(SIGINT, ts->rx_thread, 1); - kthread_stop(ts->rx_thread); - } - if (ts->tx_thread) { - send_sig(SIGINT, ts->tx_thread, 1); - kthread_stop(ts->tx_thread); - } - /* - * Release this thread_id in the thread_set_bitmap - */ - spin_lock(&ts_bitmap_lock); - bitmap_release_region(iscsit_global->ts_bitmap, - ts->thread_id, get_order(1)); - spin_unlock(&ts_bitmap_lock); + kfree(ts); +} +void iscsi_deallocate_thread_sets(void) +{ + struct iscsi_thread_set *ts = NULL; + u32 released_count = 0; + + while ((ts = iscsi_get_ts_from_inactive_list())) { + + iscsi_deallocate_thread_one(ts); released_count++; - kfree(ts); } if (released_count) @@ -187,34 +193,13 @@ static void iscsi_deallocate_extra_thread_sets(void) if (!ts) break; - spin_lock_bh(&ts->ts_state_lock); - ts->status = ISCSI_THREAD_SET_DIE; - spin_unlock_bh(&ts->ts_state_lock); - - if (ts->rx_thread) { - send_sig(SIGINT, ts->rx_thread, 1); - kthread_stop(ts->rx_thread); - } - if (ts->tx_thread) { - send_sig(SIGINT, ts->tx_thread, 1); - kthread_stop(ts->tx_thread); - } - /* - * Release this thread_id in the thread_set_bitmap - */ - spin_lock(&ts_bitmap_lock); - bitmap_release_region(iscsit_global->ts_bitmap, - ts->thread_id, get_order(1)); - spin_unlock(&ts_bitmap_lock); - + iscsi_deallocate_thread_one(ts); released_count++; - kfree(ts); } - if (released_count) { + if (released_count) pr_debug("Stopped %d thread set(s) (%d total threads)." "\n", released_count, released_count * 2); - } } void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) @@ -224,37 +209,23 @@ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set spin_lock_bh(&ts->ts_state_lock); conn->thread_set = ts; ts->conn = conn; + ts->status = ISCSI_THREAD_SET_ACTIVE; spin_unlock_bh(&ts->ts_state_lock); - /* - * Start up the RX thread and wait on rx_post_start_comp. The RX - * Thread will then do the same for the TX Thread in - * iscsi_rx_thread_pre_handler(). - */ + complete(&ts->rx_start_comp); - wait_for_completion(&ts->rx_post_start_comp); + complete(&ts->tx_start_comp); + + down(&ts->ts_activate_sem); } struct iscsi_thread_set *iscsi_get_thread_set(void) { - int allocate_ts = 0; - struct completion comp; - struct iscsi_thread_set *ts = NULL; - /* - * If no inactive thread set is available on the first call to - * iscsi_get_ts_from_inactive_list(), sleep for a second and - * try again. If still none are available after two attempts, - * allocate a set ourselves. - */ + struct iscsi_thread_set *ts; + get_set: ts = iscsi_get_ts_from_inactive_list(); if (!ts) { - if (allocate_ts == 2) - iscsi_allocate_thread_sets(1); - - init_completion(&comp); - wait_for_completion_timeout(&comp, 1 * HZ); - - allocate_ts++; + iscsi_allocate_thread_sets(1); goto get_set; } @@ -263,6 +234,7 @@ get_set: ts->thread_count = 2; init_completion(&ts->rx_restart_comp); init_completion(&ts->tx_restart_comp); + sema_init(&ts->ts_activate_sem, 0); return ts; } @@ -400,7 +372,8 @@ static void iscsi_check_to_add_additional_sets(void) static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) { spin_lock_bh(&ts->ts_state_lock); - if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) { + if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() || + signal_pending(current)) { spin_unlock_bh(&ts->ts_state_lock); return -1; } @@ -419,7 +392,8 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) goto sleep; } - flush_signals(current); + if (ts->status != ISCSI_THREAD_SET_DIE) + flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); @@ -446,18 +420,19 @@ sleep: if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; + iscsi_check_to_add_additional_sets(); + + spin_lock_bh(&ts->ts_state_lock); if (!ts->conn) { pr_err("struct iscsi_thread_set->conn is NULL for" - " thread_id: %d, going back to sleep\n", ts->thread_id); - goto sleep; + " RX thread_id: %s/%d\n", current->comm, current->pid); + spin_unlock_bh(&ts->ts_state_lock); + return NULL; } - iscsi_check_to_add_additional_sets(); - /* - * The RX Thread starts up the TX Thread and sleeps. - */ ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; - complete(&ts->tx_start_comp); - wait_for_completion(&ts->tx_post_start_comp); + spin_unlock_bh(&ts->ts_state_lock); + + up(&ts->ts_activate_sem); return ts->conn; } @@ -472,7 +447,8 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) goto sleep; } - flush_signals(current); + if (ts->status != ISCSI_THREAD_SET_DIE) + flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); @@ -498,27 +474,20 @@ sleep: if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; - if (!ts->conn) { - pr_err("struct iscsi_thread_set->conn is NULL for " - " thread_id: %d, going back to sleep\n", - ts->thread_id); - goto sleep; - } - iscsi_check_to_add_additional_sets(); - /* - * From the TX thread, up the tx_post_start_comp that the RX Thread is - * sleeping on in iscsi_rx_thread_pre_handler(), then up the - * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on. - */ - ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; - complete(&ts->tx_post_start_comp); - complete(&ts->rx_post_start_comp); spin_lock_bh(&ts->ts_state_lock); - ts->status = ISCSI_THREAD_SET_ACTIVE; + if (!ts->conn) { + pr_err("struct iscsi_thread_set->conn is NULL for" + " TX thread_id: %s/%d\n", current->comm, current->pid); + spin_unlock_bh(&ts->ts_state_lock); + return NULL; + } + ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; spin_unlock_bh(&ts->ts_state_lock); + up(&ts->ts_activate_sem); + return ts->conn; } diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h index 547d1183128..cc1eede5ab3 100644 --- a/drivers/target/iscsi/iscsi_target_tq.h +++ b/drivers/target/iscsi/iscsi_target_tq.h @@ -64,10 +64,6 @@ struct iscsi_thread_set { struct iscsi_conn *conn; /* used for controlling ts state accesses */ spinlock_t ts_state_lock; - /* Used for rx side post startup */ - struct completion rx_post_start_comp; - /* Used for tx side post startup */ - struct completion tx_post_start_comp; /* used for restarting thread queue */ struct completion rx_restart_comp; /* used for restarting thread queue */ @@ -82,6 +78,7 @@ struct iscsi_thread_set { struct task_struct *tx_thread; /* struct iscsi_thread_set in list list head*/ struct list_head ts_list; + struct semaphore ts_activate_sem; }; #endif /*** ISCSI_THREAD_QUEUE_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c new file mode 100644 index 00000000000..882728fac30 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -0,0 +1,55 @@ +#include <linux/spinlock.h> +#include <linux/list.h> +#include <target/iscsi/iscsi_transport.h> + +static LIST_HEAD(g_transport_list); +static DEFINE_MUTEX(transport_mutex); + +struct iscsit_transport *iscsit_get_transport(int type) +{ + struct iscsit_transport *t; + + mutex_lock(&transport_mutex); + list_for_each_entry(t, &g_transport_list, t_node) { + if (t->transport_type == type) { + if (t->owner && !try_module_get(t->owner)) { + t = NULL; + } + mutex_unlock(&transport_mutex); + return t; + } + } + mutex_unlock(&transport_mutex); + + return NULL; +} + +void iscsit_put_transport(struct iscsit_transport *t) +{ + if (t->owner) + module_put(t->owner); +} + +int iscsit_register_transport(struct iscsit_transport *t) +{ + INIT_LIST_HEAD(&t->t_node); + + mutex_lock(&transport_mutex); + list_add_tail(&t->t_node, &g_transport_list); + mutex_unlock(&transport_mutex); + + pr_debug("Registered iSCSI transport: %s\n", t->name); + + return 0; +} +EXPORT_SYMBOL(iscsit_register_transport); + +void iscsit_unregister_transport(struct iscsit_transport *t) +{ + mutex_lock(&transport_mutex); + list_del(&t->t_node); + mutex_unlock(&transport_mutex); + + pr_debug("Unregistered iSCSI transport: %s\n", t->name); +} +EXPORT_SYMBOL(iscsit_unregister_transport); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 7ce350578c8..fd90b28f1d9 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1,9 +1,7 @@ /******************************************************************************* * This file contains the iSCSI Target specific utility functions. * - * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. - * - * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -19,11 +17,13 @@ ******************************************************************************/ #include <linux/list.h> +#include <linux/percpu_ida.h> #include <scsi/scsi_tcq.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include <target/iscsi/iscsi_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -152,21 +152,25 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) * May be called from software interrupt (timer) context for allocating * iSCSI NopINs. */ -struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) +struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) { struct iscsi_cmd *cmd; + struct se_session *se_sess = conn->sess->se_sess; + int size, tag; - cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); - if (!cmd) { - pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) return NULL; - } - cmd->conn = conn; + size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; + cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); + memset(cmd, 0, size); + + cmd->se_cmd.map_tag = tag; + cmd->conn = conn; INIT_LIST_HEAD(&cmd->i_conn_node); INIT_LIST_HEAD(&cmd->datain_list); INIT_LIST_HEAD(&cmd->cmd_r2t_list); - init_completion(&cmd->reject_comp); spin_lock_init(&cmd->datain_lock); spin_lock_init(&cmd->dataout_timeout_lock); spin_lock_init(&cmd->istate_lock); @@ -175,6 +179,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) return cmd; } +EXPORT_SYMBOL(iscsit_allocate_cmd); struct iscsi_seq *iscsit_get_seq_holder_for_datain( struct iscsi_cmd *cmd, @@ -240,9 +245,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm */ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { pr_err("Received CmdSN: 0x%08x is greater than" - " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, + " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, sess->max_cmd_sn); - ret = CMDSN_ERROR_CANNOT_RECOVER; + ret = CMDSN_MAXCMDSN_OVERRUN; } else if (cmdsn == sess->exp_cmd_sn) { sess->exp_cmd_sn++; @@ -271,13 +276,12 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm * Commands may be received out of order if MC/S is in use. * Ensure they are executed in CmdSN order. */ -int iscsit_sequence_cmd( - struct iscsi_conn *conn, - struct iscsi_cmd *cmd, - __be32 cmdsn) +int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char *buf, __be32 cmdsn) { - int ret; - int cmdsn_ret; + int ret, cmdsn_ret; + bool reject = false; + u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; mutex_lock(&conn->sess->cmdsn_mutex); @@ -287,23 +291,41 @@ int iscsit_sequence_cmd( ret = iscsit_execute_cmd(cmd, 0); if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) iscsit_execute_ooo_cmdsns(conn->sess); + else if (ret < 0) { + reject = true; + ret = CMDSN_ERROR_CANNOT_RECOVER; + } break; case CMDSN_HIGHER_THAN_EXP: ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); + if (ret < 0) { + reject = true; + ret = CMDSN_ERROR_CANNOT_RECOVER; + break; + } + ret = CMDSN_HIGHER_THAN_EXP; break; case CMDSN_LOWER_THAN_EXP: + case CMDSN_MAXCMDSN_OVERRUN: + default: cmd->i_state = ISTATE_REMOVE; iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); - ret = cmdsn_ret; - break; - default: - ret = cmdsn_ret; + /* + * Existing callers for iscsit_sequence_cmd() will silently + * ignore commands with CMDSN_LOWER_THAN_EXP, so force this + * return for CMDSN_MAXCMDSN_OVERRUN as well.. + */ + ret = CMDSN_LOWER_THAN_EXP; break; } mutex_unlock(&conn->sess->cmdsn_mutex); + if (reject) + iscsit_reject_cmd(cmd, reason, buf); + return ret; } +EXPORT_SYMBOL(iscsit_sequence_cmd); int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) { @@ -662,35 +684,68 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) void iscsit_release_cmd(struct iscsi_cmd *cmd) { - struct iscsi_conn *conn = cmd->conn; + struct iscsi_session *sess; + struct se_cmd *se_cmd = &cmd->se_cmd; - iscsit_free_r2ts_from_list(cmd); - iscsit_free_all_datain_reqs(cmd); + if (cmd->conn) + sess = cmd->conn->sess; + else + sess = cmd->sess; + + BUG_ON(!sess || !sess->se_sess); kfree(cmd->buf_ptr); kfree(cmd->pdu_list); kfree(cmd->seq_list); kfree(cmd->tmr_req); kfree(cmd->iov_data); + kfree(cmd->text_in_ptr); + + percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); +} +EXPORT_SYMBOL(iscsit_release_cmd); + +void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, + bool check_queues) +{ + struct iscsi_conn *conn = cmd->conn; - if (conn) { + if (scsi_cmd) { + if (cmd->data_direction == DMA_TO_DEVICE) { + iscsit_stop_dataout_timer(cmd); + iscsit_free_r2ts_from_list(cmd); + } + if (cmd->data_direction == DMA_FROM_DEVICE) + iscsit_free_all_datain_reqs(cmd); + } + + if (conn && check_queues) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); } - - kmem_cache_free(lio_cmd_cache, cmd); } -void iscsit_free_cmd(struct iscsi_cmd *cmd) +void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { + struct se_cmd *se_cmd = NULL; + int rc; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, true, shutdown); + /* + * Fallthrough + */ case ISCSI_OP_SCSI_TMFUNC: - transport_generic_free_cmd(&cmd->se_cmd, 1); + rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); + if (!rc && shutdown && se_cmd && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, true, shutdown); + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } break; case ISCSI_OP_REJECT: /* @@ -699,11 +754,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - transport_generic_free_cmd(&cmd->se_cmd, 1); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, true, shutdown); + + rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, true, shutdown); + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } break; } /* Fall-through */ default: + __iscsit_free_cmd(cmd, false, shutdown); iscsit_release_cmd(cmd); break; } @@ -866,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) u8 state; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); if (!cmd) return -1; @@ -922,7 +985,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - conn->sess->conn_timeout_errors++; + atomic_long_inc(&conn->sess->conn_timeout_errors); spin_unlock_bh(&tiqn->sess_err_stats.lock); } } @@ -1226,34 +1289,21 @@ send_datacrc: */ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) { - u8 iscsi_hdr[ISCSI_HDR_LEN]; - int err; - struct kvec iov; struct iscsi_login_rsp *hdr; + struct iscsi_login *login = conn->conn_login; + login->login_failed = 1; iscsit_collect_login_stats(conn, status_class, status_detail); - memset(&iov, 0, sizeof(struct kvec)); - memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN); + memset(&login->rsp[0], 0, ISCSI_HDR_LEN); - hdr = (struct iscsi_login_rsp *)&iscsi_hdr; + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; hdr->opcode = ISCSI_OP_LOGIN_RSP; hdr->status_class = status_class; hdr->status_detail = status_detail; hdr->itt = conn->login_itt; - iov.iov_base = &iscsi_hdr; - iov.iov_len = ISCSI_HDR_LEN; - - PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN); - - err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN); - if (err != ISCSI_HDR_LEN) { - pr_err("tx_data returned less than expected\n"); - return -1; - } - - return 0; + return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); } void iscsit_print_session_params(struct iscsi_session *sess) @@ -1432,7 +1482,8 @@ void iscsit_collect_login_stats( strcpy(ls->last_intr_fail_name, (intrname ? intrname->value : "Unknown")); - ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; + ls->last_intr_fail_ip_family = conn->login_family; + snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, "%s", conn->login_ip); ls->last_fail_time = get_jiffies_64(); diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 894d0f83792..a68508c4fec 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -8,11 +8,13 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32); extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); -int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn); +extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + unsigned char * ,__be32 cmdsn); extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, @@ -28,7 +30,8 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); -extern void iscsit_free_cmd(struct iscsi_cmd *); +extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); +extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 2d444b1ccd3..8c64b8776a9 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -3,7 +3,7 @@ * This file contains the Linux/SCSI LLD virtual SCSI initiator driver * for emulated SAS initiator ports * - * © Copyright 2011 RisingTide Systems LLC. + * © Copyright 2011-2013 Datera, Inc. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * @@ -79,11 +79,10 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd) kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); } -static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, - char **start, off_t offset, - int length, int inout) +static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) { - return sprintf(buffer, "tcm_loop_proc_info()\n"); + seq_printf(m, "tcm_loop_proc_info()\n"); + return 0; } static int tcm_loop_driver_probe(struct device *); @@ -136,6 +135,21 @@ static int tcm_loop_change_queue_depth( return sdev->queue_depth; } +static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag); + + if (tag) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag = 0; + + return tag; +} + /* * Locate the SAM Task Attr from struct scsi_cmnd * */ @@ -165,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work) struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; struct scatterlist *sgl_bidi = NULL; - u32 sgl_bidi_count = 0; + u32 sgl_bidi_count = 0, transfer_length; int rc; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); @@ -179,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work) set_host_byte(sc, DID_NO_CONNECT); goto out_done; } - + if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + goto out_done; + } tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" @@ -195,12 +212,26 @@ static void tcm_loop_submission_work(struct work_struct *work) se_cmd->se_cmd_flags |= SCF_BIDI; } + + transfer_length = scsi_transfer_length(sc); + if (!scsi_prot_sg_count(sc) && + scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { + se_cmd->prot_pto = true; + /* + * loopback transport doesn't support + * WRITE_GENERATE, READ_STRIP protection + * information operations, go ahead unprotected. + */ + transfer_length = scsi_bufflen(sc); + } + rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - scsi_bufflen(sc), tcm_loop_sam_attr(sc), + transfer_length, tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), - sgl_bidi, sgl_bidi_count); + sgl_bidi, sgl_bidi_count, + scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); if (rc < 0) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; @@ -208,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work) return; out_done: + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); sc->scsi_done(sc); return; } @@ -234,6 +266,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) } tl_cmd->sc = sc; + tl_cmd->sc_cmd_tag = sc->tag; INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); queue_work(tcm_loop_workqueue, &tl_cmd->work); return 0; @@ -243,41 +276,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */ -static int tcm_loop_device_reset(struct scsi_cmnd *sc) +static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, + struct tcm_loop_nexus *tl_nexus, + int lun, int task, enum tcm_tmreq_table tmr) { struct se_cmd *se_cmd = NULL; - struct se_portal_group *se_tpg; struct se_session *se_sess; + struct se_portal_group *se_tpg; struct tcm_loop_cmd *tl_cmd = NULL; - struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tmr *tl_tmr = NULL; - struct tcm_loop_tpg *tl_tpg; - int ret = FAILED, rc; - /* - * Locate the tcm_loop_hba_t pointer - */ - tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - se_sess = tl_nexus->se_sess; - /* - * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id - */ - tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - se_tpg = &tl_tpg->tl_se_tpg; + int ret = TMR_FUNCTION_FAILED, rc; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd\n"); - return FAILED; + return ret; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); @@ -288,6 +301,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) init_waitqueue_head(&tl_tmr->tl_tmr_wait); se_cmd = &tl_cmd->tl_se_cmd; + se_tpg = &tl_tpg->tl_se_tpg; + se_sess = tl_nexus->se_sess; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ @@ -295,17 +310,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) DMA_NONE, MSG_SIMPLE_TAG, &tl_cmd->tl_sense_buf[0]); - rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); + rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); if (rc < 0) goto release; + + if (tmr == TMR_ABORT_TASK) + se_cmd->se_tmr_req->ref_task_tag = task; + /* - * Locate the underlying TCM struct se_lun from sc->device->lun + * Locate the underlying TCM struct se_lun */ - if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { + ret = TMR_LUN_DOES_NOT_EXIST; goto release; + } /* - * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() - * to wake us up. + * Queue the TMR to TCM Core and sleep waiting for + * tcm_loop_queue_tm_rsp() to wake us up. */ transport_generic_handle_tmr(se_cmd); wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); @@ -313,8 +334,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * The TMR LUN_RESET has completed, check the response status and * then release allocations. */ - ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? - SUCCESS : FAILED; + ret = se_cmd->se_tmr_req->response; release: if (se_cmd) transport_generic_free_cmd(se_cmd, 1); @@ -324,6 +344,94 @@ release: return ret; } +static int tcm_loop_abort_task(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + sc->tag, TMR_ABORT_TASK); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_device_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + 0, TMR_LUN_RESET); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +static int tcm_loop_target_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + if (!tl_hba) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + if (tl_tpg) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return SUCCESS; + } + return FAILED; +} + static int tcm_loop_slave_alloc(struct scsi_device *sd) { set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); @@ -332,16 +440,28 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd) static int tcm_loop_slave_configure(struct scsi_device *sd) { + if (sd->tagged_supported) { + scsi_activate_tcq(sd, sd->queue_depth); + scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, + sd->host->cmd_per_lun); + } else { + scsi_adjust_queue_depth(sd, 0, + sd->host->cmd_per_lun); + } + return 0; } static struct scsi_host_template tcm_loop_driver_template = { - .proc_info = tcm_loop_proc_info, + .show_info = tcm_loop_show_info, .proc_name = "tcm_loopback", .name = "TCM_Loopback", .queuecommand = tcm_loop_queuecommand, .change_queue_depth = tcm_loop_change_queue_depth, + .change_queue_type = tcm_loop_change_queue_type, + .eh_abort_handler = tcm_loop_abort_task, .eh_device_reset_handler = tcm_loop_device_reset, + .eh_target_reset_handler = tcm_loop_target_reset, .can_queue = 1024, .this_id = -1, .sg_tablesize = 256, @@ -357,7 +477,7 @@ static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; - int error; + int error, host_prot; tl_hba = to_tcm_loop_hba(dev); @@ -381,6 +501,13 @@ static int tcm_loop_driver_probe(struct device *dev) sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(sh, host_prot); + scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); + error = scsi_add_host(sh, &tl_hba->dev); if (error) { pr_err("%s: scsi_add_host failed\n", __func__); @@ -700,7 +827,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) { - return 1; + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + return tl_cmd->sc_cmd_tag; } static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) @@ -787,7 +917,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) return 0; } -static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) +static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) { struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; @@ -797,7 +927,11 @@ static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) */ atomic_set(&tl_tmr->tmr_complete, 1); wake_up(&tl_tmr->tl_tmr_wait); - return 0; +} + +static void tcm_loop_aborted_task(struct se_cmd *se_cmd) +{ + return; } static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) @@ -827,7 +961,7 @@ static int tcm_loop_port_link( struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; atomic_inc(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Add Linux/SCSI struct scsi_device by HCTL */ @@ -862,7 +996,7 @@ static void tcm_loop_port_unlink( scsi_device_put(sd); atomic_dec(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } @@ -894,7 +1028,7 @@ static int tcm_loop_make_nexus( /* * Initialize the struct se_session pointer */ - tl_nexus->se_sess = transport_init_session(); + tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); if (IS_ERR(tl_nexus->se_sess)) { ret = PTR_ERR(tl_nexus->se_sess); goto out; @@ -934,7 +1068,10 @@ static int tcm_loop_drop_nexus( struct tcm_loop_nexus *tl_nexus; struct tcm_loop_hba *tl_hba = tpg->tl_hba; - tl_nexus = tpg->tl_hba->tl_nexus; + if (!tl_hba) + return -ENODEV; + + tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) return -ENODEV; @@ -1063,14 +1200,62 @@ check_newline: TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); +static ssize_t tcm_loop_tpg_show_transport_status( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + const char *status = NULL; + ssize_t ret = -EINVAL; + + switch (tl_tpg->tl_transport_status) { + case TCM_TRANSPORT_ONLINE: + status = "online"; + break; + case TCM_TRANSPORT_OFFLINE: + status = "offline"; + break; + default: + break; + } + + if (status) + ret = snprintf(page, PAGE_SIZE, "%s\n", status); + + return ret; +} + +static ssize_t tcm_loop_tpg_store_transport_status( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + + if (!strncmp(page, "online", 6)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return count; + } + if (!strncmp(page, "offline", 7)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; + return count; + } + return -EINVAL; +} + +TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR); + static struct configfs_attribute *tcm_loop_tpg_attrs[] = { &tcm_loop_tpg_nexus.attr, + &tcm_loop_tpg_transport_status.attr, NULL, }; /* Start items for tcm_loop_naa_cit */ -struct se_portal_group *tcm_loop_make_naa_tpg( +static struct se_portal_group *tcm_loop_make_naa_tpg( struct se_wwn *wwn, struct config_group *group, const char *name) @@ -1115,7 +1300,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( return &tl_tpg->tl_se_tpg; } -void tcm_loop_drop_naa_tpg( +static void tcm_loop_drop_naa_tpg( struct se_portal_group *se_tpg) { struct se_wwn *wwn = se_tpg->se_tpg_wwn; @@ -1147,7 +1332,7 @@ void tcm_loop_drop_naa_tpg( /* Start items for tcm_loop_cit */ -struct se_wwn *tcm_loop_make_scsi_hba( +static struct se_wwn *tcm_loop_make_scsi_hba( struct target_fabric_configfs *tf, struct config_group *group, const char *name) @@ -1217,7 +1402,7 @@ out: return ERR_PTR(ret); } -void tcm_loop_drop_scsi_hba( +static void tcm_loop_drop_scsi_hba( struct se_wwn *wwn) { struct tcm_loop_hba *tl_hba = container_of(wwn, @@ -1317,6 +1502,7 @@ static int tcm_loop_register_configfs(void) fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; + fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c @@ -1336,11 +1522,11 @@ static int tcm_loop_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; /* * Once fabric->tf_ops has been setup, now register the fabric for * use within TCM diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index dd7a84ee78e..54c59d0b660 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -10,6 +10,8 @@ struct tcm_loop_cmd { /* State of Linux/SCSI CDB+Data descriptor */ u32 sc_cmd_state; + /* Tagged command queueing */ + u32 sc_cmd_tag; /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ struct scsi_cmnd *sc; /* The TCM I/O descriptor that is accessed via container_of() */ @@ -40,8 +42,12 @@ struct tcm_loop_nacl { struct se_node_acl se_node_acl; }; +#define TCM_TRANSPORT_ONLINE 0 +#define TCM_TRANSPORT_OFFLINE 1 + struct tcm_loop_tpg { unsigned short tl_tpgt; + unsigned short tl_transport_status; atomic_t tl_tpg_port_count; struct se_portal_group tl_se_tpg; struct tcm_loop_hba *tl_hba; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 2e8d06f198a..e7e93727553 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create( return ERR_PTR(-ENOMEM); } - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { pr_err("failed to init se_session\n"); @@ -1719,7 +1719,7 @@ static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg) nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL); if (!nacl) { - pr_err("Unable to alocate struct sbp_nacl\n"); + pr_err("Unable to allocate struct sbp_nacl\n"); return NULL; } @@ -1842,9 +1842,13 @@ static int sbp_queue_status(struct se_cmd *se_cmd) return sbp_send_sense(req); } -static int sbp_queue_tm_rsp(struct se_cmd *se_cmd) +static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) { - return 0; +} + +static void sbp_aborted_task(struct se_cmd *se_cmd) +{ + return; } static int sbp_check_stop_free(struct se_cmd *se_cmd) @@ -2527,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { .queue_data_in = sbp_queue_data_in, .queue_status = sbp_queue_status, .queue_tm_rsp = sbp_queue_tm_rsp, + .aborted_task = sbp_aborted_task, .check_stop_free = sbp_check_stop_free, .fabric_make_wwn = sbp_make_tport, @@ -2557,15 +2562,15 @@ static int sbp_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; ret = target_fabric_configfs_register(fabric); if (ret < 0) { @@ -2598,7 +2603,7 @@ static int __init sbp_init(void) return 0; }; -static void sbp_exit(void) +static void __exit sbp_exit(void) { sbp_deregister_configfs(); }; diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 7d4ec02e29a..fbc5ebb5f76 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -3,7 +3,7 @@ * * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) * - * (c) Copyright 2009-2012 RisingTide Systems LLC. + * (c) Copyright 2009-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -27,6 +27,7 @@ #include <linux/spinlock.h> #include <linux/configfs.h> #include <linux/export.h> +#include <linux/file.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <asm/unaligned.h> @@ -40,10 +41,13 @@ #include "target_core_alua.h" #include "target_core_ua.h" -static sense_reason_t core_alua_check_transition(int state, int *primary); +static sense_reason_t core_alua_check_transition(int state, int valid, + int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, int explict, int offline); + struct se_port *port, int explicit, int offline); + +static char *core_alua_dump_state(int state); static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; @@ -54,6 +58,86 @@ static LIST_HEAD(lu_gps_list); struct t10_alua_lu_gp *default_lu_gp; /* + * REPORT REFERRALS + * + * See sbc3r35 section 5.23 + */ +sense_reason_t +target_emulate_report_referrals(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *map_mem; + unsigned char *buf; + u32 rd_len = 0, off; + + if (cmd->data_length < 4) { + pr_warn("REPORT REFERRALS allocation length %u too" + " small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + off = 4; + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + transport_kunmap_data_sg(cmd); + + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + int desc_num = off + 3; + int pg_num; + + off += 4; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); + off += 8; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); + off += 8; + rd_len += 20; + pg_num = 0; + list_for_each_entry(map_mem, &map->lba_map_mem_list, + lba_map_mem_list) { + int alua_state = map_mem->lba_map_mem_alua_state; + int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; + + if (cmd->data_length > off) + buf[off] = alua_state & 0x0f; + off += 2; + if (cmd->data_length > off) + buf[off] = (alua_pg_id >> 8) & 0xff; + off++; + if (cmd->data_length > off) + buf[off] = (alua_pg_id & 0xff); + off++; + rd_len += 4; + pg_num++; + } + if (cmd->data_length > desc_num) + buf[desc_num] = pg_num; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + put_unaligned_be16(rd_len, &buf[2]); + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, GOOD); + return 0; +} + +/* * REPORT_TARGET_PORT_GROUPS * * See spc4r17 section 6.27 @@ -116,12 +200,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set supported ASYMMETRIC ACCESS State bits */ - buf[off] = 0x80; /* T_SUP */ - buf[off] |= 0x40; /* O_SUP */ - buf[off] |= 0x8; /* U_SUP */ - buf[off] |= 0x4; /* S_SUP */ - buf[off] |= 0x2; /* AN_SUP */ - buf[off++] |= 0x1; /* AO_SUP */ + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; /* * TARGET PORT GROUP */ @@ -174,7 +253,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) if (ext_hdr != 0) { buf[4] = 0x10; /* - * Set the implict transition time (in seconds) for the application + * Set the implicit transition time (in seconds) for the application * client to use as a base for it's transition timeout value. * * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN @@ -187,7 +266,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) - buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; + buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } } @@ -198,7 +277,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) } /* - * SET_TARGET_PORT_GROUPS for explict ALUA operation. + * SET_TARGET_PORT_GROUPS for explicit ALUA operation. * * See spc4r17 section 6.35 */ @@ -214,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) unsigned char *ptr; sense_reason_t rc = TCM_NO_SENSE; u32 len = 4; /* Skip over RESERVED area in header */ - int alua_access_state, primary = 0; + int alua_access_state, primary = 0, valid_states; u16 tg_pt_id, rtpi; if (!l_port) @@ -231,7 +310,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* - * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed + * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; @@ -250,12 +329,13 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) } spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" - " while TPGS_EXPLICT_ALUA is disabled\n"); + " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -267,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, &primary); + rc = core_alua_check_transition(alua_access_state, + valid_states, &primary); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -312,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) continue; atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -323,13 +404,13 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } else { /* - * Extact the RELATIVE TARGET PORT IDENTIFIER to identify + * Extract the RELATIVE TARGET PORT IDENTIFIER to identify * the Target Port in question for the the incoming * SET_TARGET_PORT_GROUPS op. */ @@ -374,11 +455,26 @@ out: return rc; } -static inline int core_alua_state_nonoptimized( +static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) +{ + /* + * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; + * The ALUA additional sense code qualifier (ASCQ) is determined + * by the ALUA primary or secondary access state.. + */ + pr_debug("[%s]: ALUA TG Port not available, " + "SenseKey: NOT_READY, ASC/ASCQ: " + "0x04/0x%02x\n", + cmd->se_tfo->get_fabric_name(), alua_ascq); + + cmd->scsi_asc = 0x04; + cmd->scsi_ascq = alua_ascq; +} + +static inline void core_alua_state_nonoptimized( struct se_cmd *cmd, unsigned char *cdb, - int nonop_delay_msecs, - u8 *alua_ascq) + int nonop_delay_msecs) { /* * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked @@ -387,13 +483,85 @@ static inline int core_alua_state_nonoptimized( */ cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; cmd->alua_nonop_delay = nonop_delay_msecs; +} + +static inline int core_alua_state_lba_dependent( + struct se_cmd *cmd, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_device *dev = cmd->se_dev; + u64 segment_size, segment_mult, sectors, lba; + + /* Only need to check for cdb actually containing LBAs */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) + return 0; + + spin_lock(&dev->t10_alua.lba_map_lock); + segment_size = dev->t10_alua.lba_map_segment_size; + segment_mult = dev->t10_alua.lba_map_segment_multiplier; + sectors = cmd->data_length / dev->dev_attrib.block_size; + + lba = cmd->t_task_lba; + while (lba < cmd->t_task_lba + sectors) { + struct t10_alua_lba_map *cur_map = NULL, *map; + struct t10_alua_lba_map_member *map_mem; + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + u64 start_lba, last_lba; + u64 first_lba = map->lba_map_first_lba; + + if (segment_mult) { + u64 tmp = lba; + start_lba = do_div(tmp, segment_size * segment_mult); + + last_lba = first_lba + segment_size - 1; + if (start_lba >= first_lba && + start_lba <= last_lba) { + lba += segment_size; + cur_map = map; + break; + } + } else { + last_lba = map->lba_map_last_lba; + if (lba >= first_lba && lba <= last_lba) { + lba = last_lba + 1; + cur_map = map; + break; + } + } + } + if (!cur_map) { + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); + return 1; + } + list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, + lba_map_mem_list) { + if (map_mem->lba_map_mem_alua_pg_id != + tg_pt_gp->tg_pt_gp_id) + continue; + switch(map_mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_STANDBY: + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); + return 1; + case ALUA_ACCESS_STATE_UNAVAILABLE: + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); + return 1; + default: + break; + } + } + } + spin_unlock(&dev->t10_alua.lba_map_lock); return 0; } static inline int core_alua_state_standby( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by @@ -408,12 +576,22 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + case READ_CAPACITY: + return 0; + case SERVICE_ACTION_IN: + switch (cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + return 0; + default: + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); + return 1; + } case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } case MAINTENANCE_OUT: @@ -421,7 +599,7 @@ static inline int core_alua_state_standby( case MO_SET_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } case REQUEST_SENSE: @@ -431,7 +609,7 @@ static inline int core_alua_state_standby( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } @@ -440,8 +618,7 @@ static inline int core_alua_state_standby( static inline int core_alua_state_unavailable( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by @@ -450,12 +627,13 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } case MAINTENANCE_OUT: @@ -463,7 +641,7 @@ static inline int core_alua_state_unavailable( case MO_SET_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } case REQUEST_SENSE: @@ -471,7 +649,7 @@ static inline int core_alua_state_unavailable( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } @@ -480,22 +658,22 @@ static inline int core_alua_state_unavailable( static inline int core_alua_state_transition( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* - * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by + * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by * spc4r17 section 5.9.2.5 */ switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); return 1; } case REQUEST_SENSE: @@ -503,7 +681,7 @@ static inline int core_alua_state_transition( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); return 1; } @@ -511,9 +689,9 @@ static inline int core_alua_state_transition( } /* - * return 1: Is used to signal LUN not accecsable, and check condition/not ready + * return 1: Is used to signal LUN not accessible, and check condition/not ready * return 0: Used to signal success - * reutrn -1: Used to signal failure, and invalid cdb field + * return -1: Used to signal failure, and invalid cdb field */ sense_reason_t target_alua_state_check(struct se_cmd *cmd) @@ -525,8 +703,6 @@ target_alua_state_check(struct se_cmd *cmd) struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; int out_alua_state, nonop_delay_msecs; - u8 alua_ascq; - int ret; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; @@ -542,9 +718,8 @@ target_alua_state_check(struct se_cmd *cmd) if (atomic_read(&port->sep_tg_pt_secondary_offline)) { pr_debug("ALUA: Got secondary offline status for local" " target port\n"); - alua_ascq = ASCQ_04H_ALUA_OFFLINE; - ret = 1; - goto out; + set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); + return TCM_CHECK_CONDITION_NOT_READY; } /* * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the @@ -553,33 +728,42 @@ target_alua_state_check(struct se_cmd *cmd) * a ALUA logical unit group. */ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (!tg_pt_gp_mem) + return 0; + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); /* - * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional + * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. * For the Optimized ALUA access state case, we want to process the * incoming fabric cmd ASAP.. */ - if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) + if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) return 0; switch (out_alua_state) { case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: - ret = core_alua_state_nonoptimized(cmd, cdb, - nonop_delay_msecs, &alua_ascq); + core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); break; case ALUA_ACCESS_STATE_STANDBY: - ret = core_alua_state_standby(cmd, cdb, &alua_ascq); + if (core_alua_state_standby(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; break; case ALUA_ACCESS_STATE_UNAVAILABLE: - ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); + if (core_alua_state_unavailable(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; break; case ALUA_ACCESS_STATE_TRANSITION: - ret = core_alua_state_transition(cmd, cdb, &alua_ascq); + if (core_alua_state_transition(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) + return TCM_CHECK_CONDITION_NOT_READY; break; /* * OFFLINE is a secondary ALUA target port group access state, that is @@ -592,41 +776,43 @@ target_alua_state_check(struct se_cmd *cmd) return TCM_INVALID_CDB_FIELD; } -out: - if (ret > 0) { - /* - * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; - * The ALUA additional sense code qualifier (ASCQ) is determined - * by the ALUA primary or secondary access state.. - */ - pr_debug("[%s]: ALUA TG Port not available, " - "SenseKey: NOT_READY, ASC/ASCQ: " - "0x04/0x%02x\n", - cmd->se_tfo->get_fabric_name(), alua_ascq); - - cmd->scsi_asc = 0x04; - cmd->scsi_ascq = alua_ascq; - return TCM_CHECK_CONDITION_NOT_READY; - } - return 0; } /* - * Check implict and explict ALUA state change request. + * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int *primary) +core_alua_check_transition(int state, int valid, int *primary) { + /* + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are + * defined as primary target port asymmetric access states. + */ switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + if (!(valid & ALUA_AO_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + if (!(valid & ALUA_AN_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_STANDBY: + if (!(valid & ALUA_S_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_UNAVAILABLE: - /* - * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are - * defined as primary target port asymmetric access states. - */ + if (!(valid & ALUA_U_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (!(valid & ALUA_LBD_SUP)) + goto not_supported; *primary = 1; break; case ALUA_ACCESS_STATE_OFFLINE: @@ -634,29 +820,46 @@ core_alua_check_transition(int state, int *primary) * OFFLINE state is defined as a secondary target port * asymmetric access state. */ + if (!(valid & ALUA_O_SUP)) + goto not_supported; *primary = 0; break; + case ALUA_ACCESS_STATE_TRANSITION: + /* + * Transitioning is set internally, and + * cannot be selected manually. + */ + goto not_supported; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; } return 0; + +not_supported: + pr_err("ALUA access state %s not supported", + core_alua_dump_state(state)); + return TCM_INVALID_PARAMETER_LIST; } static char *core_alua_dump_state(int state) { switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + return "LBA Dependent"; case ALUA_ACCESS_STATE_STANDBY: return "Standby"; case ALUA_ACCESS_STATE_UNAVAILABLE: return "Unavailable"; case ALUA_ACCESS_STATE_OFFLINE: return "Offline"; + case ALUA_ACCESS_STATE_TRANSITION: + return "Transitioning"; default: return "Unknown"; } @@ -669,10 +872,10 @@ char *core_alua_dump_status(int status) switch (status) { case ALUA_STATUS_NONE: return "None"; - case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: - return "Altered by Explict STPG"; - case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: - return "Altered by Implict ALUA"; + case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: + return "Altered by Explicit STPG"; + case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: + return "Altered by Implicit ALUA"; default: return "Unknown"; } @@ -715,94 +918,67 @@ static int core_alua_write_tpg_metadata( unsigned char *md_buf, u32 md_buf_len) { - mm_segment_t old_fs; - struct file *file; - struct iovec iov[1]; - int flags = O_RDWR | O_CREAT | O_TRUNC, ret; - - memset(iov, 0, sizeof(struct iovec)); + struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); + int ret; - file = filp_open(path, flags, 0600); - if (IS_ERR(file) || !file || !file->f_dentry) { - pr_err("filp_open(%s) for ALUA metadata failed\n", - path); + if (IS_ERR(file)) { + pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } - - iov[0].iov_base = &md_buf[0]; - iov[0].iov_len = md_buf_len; - - old_fs = get_fs(); - set_fs(get_ds()); - ret = vfs_writev(file, &iov[0], 1, &file->f_pos); - set_fs(old_fs); - - if (ret < 0) { + ret = kernel_write(file, md_buf, md_buf_len, 0); + if (ret < 0) pr_err("Error writing ALUA metadata file: %s\n", path); - filp_close(file, NULL); - return -EIO; - } - filp_close(file, NULL); - - return 0; + fput(file); + return (ret < 0) ? -EIO : 0; } /* * Called with tg_pt_gp->tg_pt_gp_md_mutex held */ static int core_alua_update_tpg_primary_metadata( - struct t10_alua_tg_pt_gp *tg_pt_gp, - int primary_state, - unsigned char *md_buf) + struct t10_alua_tg_pt_gp *tg_pt_gp) { + unsigned char *md_buf; struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; char path[ALUA_METADATA_PATH_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); - len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "tg_pt_gp_id=%hu\n" "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", - tg_pt_gp->tg_pt_gp_id, primary_state, + tg_pt_gp->tg_pt_gp_id, + tg_pt_gp->tg_pt_gp_alua_pending_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + return rc; } -static int core_alua_do_transition_tg_pt( - struct t10_alua_tg_pt_gp *tg_pt_gp, - struct se_port *l_port, - struct se_node_acl *nacl, - unsigned char *md_buf, - int new_state, - int explict) +static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct se_dev_entry *se_deve; struct se_lun_acl *lacl; struct se_port *port; struct t10_alua_tg_pt_gp_member *mem; - int old_state = 0; - /* - * Save the old primary ALUA access state, and set the current state - * to ALUA_ACCESS_STATE_TRANSITION. - */ - old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); - tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; - /* - * Check for the optional ALUA primary state transition delay - */ - if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) - msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, @@ -813,7 +989,7 @@ static int core_alua_do_transition_tg_pt( * change, a device server shall establish a unit attention * condition for the initiator port associated with every I_T * nexus with the additional sense code set to ASYMMETRIC - * ACCESS STATE CHAGED. + * ACCESS STATE CHANGED. * * After an explicit target port asymmetric access state * change, a device server shall establish a unit attention @@ -823,7 +999,7 @@ static int core_alua_do_transition_tg_pt( * TARGET PORT GROUPS command */ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); @@ -832,14 +1008,17 @@ static int core_alua_do_transition_tg_pt( lacl = se_deve->se_lun_acl; /* * se_deve->se_lun_acl pointer may be NULL for a - * entry created without explict Node+MappedLUN ACLs + * entry created without explicit Node+MappedLUN ACLs */ if (!lacl) continue; - if (explict && - (nacl != NULL) && (nacl == lacl->se_lun_nacl) && - (l_port != NULL) && (l_port == port)) + if ((tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && + (tg_pt_gp->tg_pt_gp_alua_port != NULL) && + (tg_pt_gp->tg_pt_gp_alua_port == port)) continue; core_scsi3_ua_allocate(lacl->se_lun_nacl, @@ -850,7 +1029,7 @@ static int core_alua_do_transition_tg_pt( spin_lock(&tg_pt_gp->tg_pt_gp_lock); atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* @@ -867,20 +1046,102 @@ static int core_alua_do_transition_tg_pt( */ if (tg_pt_gp->tg_pt_gp_write_metadata) { mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp, - new_state, md_buf); + core_alua_update_tpg_primary_metadata(tg_pt_gp); mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); } /* * Set the current primary ALUA access state to the requested new state */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + tg_pt_gp->tg_pt_gp_alua_pending_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), - core_alua_dump_state(new_state)); + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (tg_pt_gp->tg_pt_gp_transition_complete) + complete(tg_pt_gp->tg_pt_gp_transition_complete); +} + +static int core_alua_do_transition_tg_pt( + struct t10_alua_tg_pt_gp *tg_pt_gp, + int new_state, + int explicit) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + DECLARE_COMPLETION_ONSTACK(wait); + + /* Nothing to be done here */ + if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + return 0; + + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return -EAGAIN; + + /* + * Flush any pending transitions + */ + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == + ALUA_ACCESS_STATE_TRANSITION) { + /* Just in case */ + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + return 0; + } + + /* + * Save the old primary ALUA access state, and set the current state + * to ALUA_ACCESS_STATE_TRANSITION. + */ + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_TRANSITION); + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + + /* + * Check for the optional ALUA primary state transition delay + */ + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + + /* + * Take a reference for workqueue item + */ + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { + unsigned long transition_tmo; + + transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, + transition_tmo); + } else { + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, 0); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + } return 0; } @@ -891,31 +1152,23 @@ int core_alua_do_port_transition( struct se_port *l_port, struct se_node_acl *l_nacl, int new_state, - int explict) + int explicit) { struct se_device *dev; - struct se_port *port; - struct se_node_acl *nacl; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; - int primary; + int primary, valid_states, rc = 0; - if (core_alua_check_transition(new_state, &primary) != 0) + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; + if (core_alua_check_transition(new_state, valid_states, &primary) != 0) return -EINVAL; - md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate buf for ALUA metadata\n"); - return -ENOMEM; - } - local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', @@ -927,12 +1180,13 @@ int core_alua_do_port_transition( * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - md_buf, new_state, explict); + l_tg_pt_gp->tg_pt_gp_alua_port = l_port; + l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; + rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, + new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); - kfree(md_buf); - return 0; + smp_mb__after_atomic(); + return rc; } /* * For all other LU groups aside from 'default_lu_gp', walk all of @@ -945,7 +1199,7 @@ int core_alua_do_port_transition( dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -957,7 +1211,7 @@ int core_alua_do_port_transition( continue; /* * If the target behavior port asymmetric access state - * is changed for any target port group accessiable via + * is changed for any target port group accessible via * a logical unit within a LU group, the target port * behavior group asymmetric access states for the same * target port group accessible via other logical units @@ -967,44 +1221,48 @@ int core_alua_do_port_transition( continue; if (l_tg_pt_gp == tg_pt_gp) { - port = l_port; - nacl = l_nacl; + tg_pt_gp->tg_pt_gp_alua_port = l_port; + tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; } else { - port = NULL; - nacl = NULL; + tg_pt_gp->tg_pt_gp_alua_port = NULL; + tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, md_buf, new_state, explict); + rc = core_alua_do_transition_tg_pt(tg_pt_gp, + new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); + if (rc) + break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&lu_gp->lu_gp_lock); - pr_debug("Successfully processed LU Group: %s all ALUA TG PT" - " Group IDs: %hu %s transition to primary state: %s\n", - config_item_name(&lu_gp->lu_gp_group.cg_item), - l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", - core_alua_dump_state(new_state)); + if (!rc) { + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" + " Group IDs: %hu %s transition to primary state: %s\n", + config_item_name(&lu_gp->lu_gp_group.cg_item), + l_tg_pt_gp->tg_pt_gp_id, + (explicit) ? "explicit" : "implicit", + core_alua_dump_state(new_state)); + } atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); - kfree(md_buf); - return 0; + smp_mb__after_atomic(); + return rc; } /* @@ -1012,13 +1270,18 @@ int core_alua_do_port_transition( */ static int core_alua_update_tpg_secondary_metadata( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, - unsigned char *md_buf, - u32 md_buf_len) + struct se_port *port) { + unsigned char *md_buf; struct se_portal_group *se_tpg = port->sep_tpg; char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); @@ -1030,7 +1293,7 @@ static int core_alua_update_tpg_secondary_metadata( snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", atomic_read(&port->sep_tg_pt_secondary_offline), port->sep_tg_pt_secondary_stat); @@ -1039,18 +1302,19 @@ static int core_alua_update_tpg_secondary_metadata( se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + + return rc; } static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, - int explict, + int explicit, int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; - u32 md_buf_len; int trans_delay_msecs; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1071,14 +1335,13 @@ static int core_alua_set_tg_pt_secondary_state( else atomic_set(&port->sep_tg_pt_secondary_offline, 0); - md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; - port->sep_tg_pt_secondary_stat = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + port->sep_tg_pt_secondary_stat = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " to secondary access state: %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + " to secondary access state: %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1093,23 +1356,115 @@ static int core_alua_set_tg_pt_secondary_state( * secondary state and status */ if (port->sep_tg_pt_secondary_write_md) { - md_buf = kzalloc(md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate md_buf for" - " secondary ALUA access metadata\n"); - return -ENOMEM; - } mutex_lock(&port->sep_tg_pt_md_mutex); - core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, - md_buf, md_buf_len); + core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); mutex_unlock(&port->sep_tg_pt_md_mutex); + } + + return 0; +} + +struct t10_alua_lba_map * +core_alua_allocate_lba_map(struct list_head *list, + u64 first_lba, u64 last_lba) +{ + struct t10_alua_lba_map *lba_map; + + lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); + if (!lba_map) { + pr_err("Unable to allocate struct t10_alua_lba_map\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lba_map->lba_map_mem_list); + lba_map->lba_map_first_lba = first_lba; + lba_map->lba_map_last_lba = last_lba; + + list_add_tail(&lba_map->lba_map_list, list); + return lba_map; +} + +int +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, + int pg_id, int state) +{ + struct t10_alua_lba_map_member *lba_map_mem; - kfree(md_buf); + list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, + lba_map_mem_list) { + if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { + pr_err("Duplicate pg_id %d in lba_map\n", pg_id); + return -EINVAL; + } } + lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); + if (!lba_map_mem) { + pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); + return -ENOMEM; + } + lba_map_mem->lba_map_mem_alua_state = state; + lba_map_mem->lba_map_mem_alua_pg_id = pg_id; + + list_add_tail(&lba_map_mem->lba_map_mem_list, + &lba_map->lba_map_mem_list); return 0; } +void +core_alua_free_lba_map(struct list_head *lba_list) +{ + struct t10_alua_lba_map *lba_map, *lba_map_tmp; + struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; + + list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, + lba_map_list) { + list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, + &lba_map->lba_map_mem_list, + lba_map_mem_list) { + list_del(&lba_map_mem->lba_map_mem_list); + kmem_cache_free(t10_alua_lba_map_mem_cache, + lba_map_mem); + } + list_del(&lba_map->lba_map_list); + kmem_cache_free(t10_alua_lba_map_cache, lba_map); + } +} + +void +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, + int segment_size, int segment_mult) +{ + struct list_head old_lba_map_list; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int activate = 0, supported; + + INIT_LIST_HEAD(&old_lba_map_list); + spin_lock(&dev->t10_alua.lba_map_lock); + dev->t10_alua.lba_map_segment_size = segment_size; + dev->t10_alua.lba_map_segment_multiplier = segment_mult; + list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); + if (lba_map_list) { + list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); + activate = 1; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + supported = tg_pt_gp->tg_pt_gp_alua_supported_states; + if (activate) + supported |= ALUA_LBD_SUP; + else + supported &= ~ALUA_LBD_SUP; + tg_pt_gp->tg_pt_gp_alua_supported_states = supported; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + core_alua_free_lba_map(&old_lba_map_list); +} + struct t10_alua_lu_gp * core_alua_allocate_lu_gp(const char *name, int def_group) { @@ -1243,7 +1598,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * struct se_device is released via core_alua_free_lu_gp_mem(). * * If the passed lu_gp does NOT match the default_lu_gp, assume - * we want to re-assocate a given lu_gp_mem with default_lu_gp. + * we want to re-associate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); if (lu_gp != default_lu_gp) @@ -1362,21 +1717,29 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); + INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; - tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); /* - * Enable both explict and implict ALUA support by default + * Enable both explicit and implicit ALUA support by default */ tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; + TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; /* * Set the default Active/NonOptimized Delay in milliseconds */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; - tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; + + /* + * Enable all supported states + */ + tg_pt_gp->tg_pt_gp_alua_supported_states = + ALUA_T_SUP | ALUA_O_SUP | + ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; if (def_group) { spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1476,7 +1839,7 @@ void core_alua_free_tg_pt_gp( * been called from target_core_alua_drop_tg_pt_gp(). * * Here we remove *tg_pt_gp from the global list so that - * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS + * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1484,6 +1847,8 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1512,7 +1877,7 @@ void core_alua_free_tg_pt_gp( * core_alua_free_tg_pt_gp_mem(). * * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, - * assume we want to re-assocate a given tg_pt_gp_mem with + * assume we want to re-associate a given tg_pt_gp_mem with * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1751,13 +2116,13 @@ ssize_t core_alua_show_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && - (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) - return sprintf(page, "Implict and Explict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) - return sprintf(page, "Implict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) - return sprintf(page, "Explict\n"); + if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && + (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) + return sprintf(page, "Implicit and Explicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) + return sprintf(page, "Implicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) + return sprintf(page, "Explicit\n"); else return sprintf(page, "None\n"); } @@ -1770,10 +2135,10 @@ ssize_t core_alua_store_access_type( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_access_type\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { pr_err("Illegal value for alua_access_type:" @@ -1782,11 +2147,11 @@ ssize_t core_alua_store_access_type( } if (tmp == 3) tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; + TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; else if (tmp == 2) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; else if (tmp == 1) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; else tg_pt_gp->tg_pt_gp_alua_access_type = 0; @@ -1808,10 +2173,10 @@ ssize_t core_alua_store_nonop_delay_msecs( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract nonop_delay_msecs\n"); - return -EINVAL; + return ret; } if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { pr_err("Passed nonop_delay_msecs: %lu, exceeds" @@ -1839,10 +2204,10 @@ ssize_t core_alua_store_trans_delay_msecs( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract trans_delay_msecs\n"); - return -EINVAL; + return ret; } if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { pr_err("Passed trans_delay_msecs: %lu, exceeds" @@ -1855,14 +2220,14 @@ ssize_t core_alua_store_trans_delay_msecs( return count; } -ssize_t core_alua_show_implict_trans_secs( +ssize_t core_alua_show_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); } -ssize_t core_alua_store_implict_trans_secs( +ssize_t core_alua_store_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) @@ -1870,18 +2235,18 @@ ssize_t core_alua_store_implict_trans_secs( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { - pr_err("Unable to extract implict_trans_secs\n"); - return -EINVAL; + pr_err("Unable to extract implicit_trans_secs\n"); + return ret; } - if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { - pr_err("Passed implict_trans_secs: %lu, exceeds" - " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, - ALUA_MAX_IMPLICT_TRANS_SECS); + if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { + pr_err("Passed implicit_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICIT_TRANS_SECS); return -EINVAL; } - tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; return count; } @@ -1901,10 +2266,10 @@ ssize_t core_alua_store_preferred_bit( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract preferred ALUA value\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for preferred ALUA: %lu\n", tmp); @@ -1936,10 +2301,10 @@ ssize_t core_alua_store_offline_bit( if (!lun->lun_sep) return -ENODEV; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_offline value\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for alua_tg_pt_offline: %lu\n", @@ -1975,14 +2340,14 @@ ssize_t core_alua_store_secondary_status( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_status\n"); - return -EINVAL; + return ret; } if ((tmp != ALUA_STATUS_NONE) && - (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; @@ -2008,10 +2373,10 @@ ssize_t core_alua_store_secondary_write_metadata( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_write_md\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for alua_tg_pt_write_md:" diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index e539c3e7f4a..0a7d65e8040 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -7,29 +7,41 @@ * from spc4r17 section 6.4.2 Table 135 */ #define TPGS_NO_ALUA 0x00 -#define TPGS_IMPLICT_ALUA 0x10 -#define TPGS_EXPLICT_ALUA 0x20 +#define TPGS_IMPLICIT_ALUA 0x10 +#define TPGS_EXPLICIT_ALUA 0x20 /* * ASYMMETRIC ACCESS STATE field * - * from spc4r17 section 6.27 Table 245 + * from spc4r36j section 6.37 Table 307 */ -#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 +#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 #define ALUA_ACCESS_STATE_STANDBY 0x2 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 +#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4 #define ALUA_ACCESS_STATE_OFFLINE 0xe #define ALUA_ACCESS_STATE_TRANSITION 0xf /* + * from spc4r36j section 6.37 Table 306 + */ +#define ALUA_T_SUP 0x80 +#define ALUA_O_SUP 0x40 +#define ALUA_LBD_SUP 0x10 +#define ALUA_U_SUP 0x08 +#define ALUA_S_SUP 0x04 +#define ALUA_AN_SUP 0x02 +#define ALUA_AO_SUP 0x01 + +/* * REPORT_TARGET_PORT_GROUP STATUS CODE * * from spc4r17 section 6.27 Table 246 */ #define ALUA_STATUS_NONE 0x00 -#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 -#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 +#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02 /* * From spc4r17, Table D.1: ASC and ASCQ Assignement @@ -46,17 +58,17 @@ #define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 #define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ /* - * Used for implict and explict ALUA transitional delay, that is disabled + * Used for implicit and explicit ALUA transitional delay, that is disabled * by default, and is intended to be used for debugging client side ALUA code. */ #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ /* - * Used for the recommended application client implict transition timeout + * Used for the recommended application client implicit transition timeout * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. */ -#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 -#define ALUA_MAX_IMPLICT_TRANS_SECS 255 +#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICIT_TRANS_SECS 255 /* * Used by core_alua_update_tpg_primary_metadata() and * core_alua_update_tpg_secondary_metadata() @@ -67,18 +79,30 @@ */ #define ALUA_SECONDARY_METADATA_WWN_LEN 256 +/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ +#define ALUA_MD_BUF_LEN 1024 + extern struct kmem_cache *t10_alua_lu_gp_cache; extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +extern struct kmem_cache *t10_alua_lba_map_cache; +extern struct kmem_cache *t10_alua_lba_map_mem_cache; extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, struct se_node_acl *, int, int); extern char *core_alua_dump_status(int); +extern struct t10_alua_lba_map *core_alua_allocate_lba_map( + struct list_head *, u64, u64); +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); +extern void core_alua_free_lba_map(struct list_head *); +extern void core_alua_set_lba_map(struct se_device *, struct list_head *, + int, int); extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); @@ -113,9 +137,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, const char *, size_t); -extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *, char *); -extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *, const char *, size_t); extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, char *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 4efb61b8d00..bf55c5a04cf 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -3,7 +3,7 @@ * * This file contains ConfigFS logic for the Generic Target Engine project. * - * (c) Copyright 2008-2012 RisingTide Systems LLC. + * (c) Copyright 2008-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -48,6 +48,7 @@ #include "target_core_alua.h" #include "target_core_pr.h" #include "target_core_rd.h" +#include "target_core_xcopy.h" extern struct t10_alua_lu_gp *default_lu_gp; @@ -176,16 +177,16 @@ static struct config_group *target_core_register_fabric( * struct target_fabric_configfs *tf will contain a usage reference. */ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; tf->tf_group.default_groups[0] = &tf->tf_disc_group; tf->tf_group.default_groups[1] = NULL; config_group_init_type_name(&tf->tf_group, name, - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", - &TF_CIT_TMPL(tf)->tfc_discovery_cit); + &tf->tf_cit_tmpl.tfc_discovery_cit); pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); @@ -268,7 +269,7 @@ static struct configfs_subsystem target_core_fabrics = { }, }; -static struct configfs_subsystem *target_core_subsystem[] = { +struct configfs_subsystem *target_core_subsystem[] = { &target_core_fabrics, NULL, }; @@ -456,6 +457,10 @@ static int target_fabric_tf_ops_check( pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } + if (!tfo->aborted_task) { + pr_err("Missing tfo->aborted_task()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in @@ -577,9 +582,9 @@ static ssize_t target_core_dev_store_attr_##_name( \ unsigned long val; \ int ret; \ \ - ret = strict_strtoul(page, 0, &val); \ + ret = kstrtoul(page, 0, &val); \ if (ret < 0) { \ - pr_err("strict_strtoul() failed with" \ + pr_err("kstrtoul() failed with" \ " ret: %d\n", ret); \ return -EINVAL; \ } \ @@ -609,6 +614,9 @@ static struct target_core_dev_attrib_attribute \ __CONFIGFS_EATTR_RO(_name, \ target_core_dev_show_attr_##_name); +DEF_DEV_ATTRIB(emulate_model_alias); +SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB(emulate_dpo); SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); @@ -633,6 +641,21 @@ SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_tpws); SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(emulate_caw); +SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_3pc); +SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(pi_prot_type); +SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_pi_prot_type); +SE_DEV_ATTR_RO(hw_pi_prot_type); + +DEF_DEV_ATTRIB(pi_prot_format); +SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); @@ -681,6 +704,7 @@ SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); static struct configfs_attribute *target_core_dev_attrib_attrs[] = { + &target_core_dev_attrib_emulate_model_alias.attr, &target_core_dev_attrib_emulate_dpo.attr, &target_core_dev_attrib_emulate_fua_write.attr, &target_core_dev_attrib_emulate_fua_read.attr, @@ -689,6 +713,11 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tas.attr, &target_core_dev_attrib_emulate_tpu.attr, &target_core_dev_attrib_emulate_tpws.attr, + &target_core_dev_attrib_emulate_caw.attr, + &target_core_dev_attrib_emulate_3pc.attr, + &target_core_dev_attrib_pi_prot_type.attr, + &target_core_dev_attrib_hw_pi_prot_type.attr, + &target_core_dev_attrib_pi_prot_format.attr, &target_core_dev_attrib_enforce_pr_isids.attr, &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_emulate_rest_reord.attr, @@ -979,7 +1008,6 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, struct se_node_acl *se_nacl; struct t10_pr_registration *pr_reg; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); @@ -988,12 +1016,11 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, return sprintf(page, "No SPC-3 Reservation holder\n"); se_nacl = pr_reg->pr_reg_nacl; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), - se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); + se_nacl->initiatorname, i_buf); } static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, @@ -1112,7 +1139,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( unsigned char buf[384]; char i_buf[PR_REG_ISID_ID_LEN]; ssize_t len = 0; - int reg_count = 0, prf_isid; + int reg_count = 0; len += sprintf(page+len, "SPC-3 PR Registrations:\n"); @@ -1123,12 +1150,11 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( memset(buf, 0, 384); memset(i_buf, 0, PR_REG_ISID_ID_LEN); tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", tfo->get_fabric_name(), - pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? - &i_buf[0] : "", pr_reg->pr_res_key, + pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, pr_reg->pr_res_generation); if (len + strlen(buf) >= PAGE_SIZE) @@ -1309,9 +1335,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( ret = -ENOMEM; goto out; } - ret = strict_strtoull(arg_p, 0, &tmp_ll); + ret = kstrtoull(arg_p, 0, &tmp_ll); if (ret < 0) { - pr_err("strict_strtoull() failed for" + pr_err("kstrtoull() failed for" " sa_res_key=\n"); goto out; } @@ -1580,6 +1606,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { .store = target_core_store_dev_udev_path, }; +static ssize_t target_core_show_dev_enable(void *p, char *page) +{ + struct se_device *dev = p; + + return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); +} + static ssize_t target_core_store_dev_enable( void *p, const char *page, @@ -1605,8 +1638,8 @@ static ssize_t target_core_store_dev_enable( static struct target_core_configfs_attribute target_core_attr_dev_enable = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "enable", - .ca_mode = S_IWUSR }, - .show = NULL, + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_enable, .store = target_core_store_dev_enable, }; @@ -1724,6 +1757,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { .store = target_core_store_alua_lu_gp, }; +static ssize_t target_core_show_dev_lba_map(void *p, char *page) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *mem; + char *b = page; + int bl = 0; + char state; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + bl += sprintf(b + bl, "%u %u\n", + dev->t10_alua.lba_map_segment_size, + dev->t10_alua.lba_map_segment_multiplier); + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { + bl += sprintf(b + bl, "%llu %llu", + map->lba_map_first_lba, map->lba_map_last_lba); + list_for_each_entry(mem, &map->lba_map_mem_list, + lba_map_mem_list) { + switch (mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + state = 'O'; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + state = 'A'; + break; + case ALUA_ACCESS_STATE_STANDBY: + state = 'S'; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + state = 'U'; + break; + default: + state = '.'; + break; + } + bl += sprintf(b + bl, " %d:%c", + mem->lba_map_mem_alua_pg_id, state); + } + bl += sprintf(b + bl, "\n"); + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return bl; +} + +static ssize_t target_core_store_dev_lba_map( + void *p, + const char *page, + size_t count) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *lba_map = NULL; + struct list_head lba_list; + char *map_entries, *ptr; + char state; + int pg_num = -1, pg; + int ret = 0, num = 0, pg_id, alua_state; + unsigned long start_lba = -1, end_lba = -1; + unsigned long segment_size = -1, segment_mult = -1; + + map_entries = kstrdup(page, GFP_KERNEL); + if (!map_entries) + return -ENOMEM; + + INIT_LIST_HEAD(&lba_list); + while ((ptr = strsep(&map_entries, "\n")) != NULL) { + if (!*ptr) + continue; + + if (num == 0) { + if (sscanf(ptr, "%lu %lu\n", + &segment_size, &segment_mult) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + num++; + continue; + } + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing end lba\n", num); + ret = -EINVAL; + break; + } + ptr++; + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing state definitions\n", + num); + ret = -EINVAL; + break; + } + ptr++; + lba_map = core_alua_allocate_lba_map(&lba_list, + start_lba, end_lba); + if (IS_ERR(lba_map)) { + ret = PTR_ERR(lba_map); + break; + } + pg = 0; + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { + switch (state) { + case 'O': + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + break; + case 'A': + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; + break; + case 'S': + alua_state = ALUA_ACCESS_STATE_STANDBY; + break; + case 'U': + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; + break; + default: + pr_err("Invalid ALUA state '%c'\n", state); + ret = -EINVAL; + goto out; + } + + ret = core_alua_allocate_lba_map_mem(lba_map, + pg_id, alua_state); + if (ret) { + pr_err("Invalid target descriptor %d:%c " + "at line %d\n", + pg_id, state, num); + break; + } + pg++; + ptr = strchr(ptr, ' '); + if (ptr) + ptr++; + else + break; + } + if (pg_num == -1) + pg_num = pg; + else if (pg != pg_num) { + pr_err("Only %d from %d port groups definitions " + "at line %d\n", pg, pg_num, num); + ret = -EINVAL; + break; + } + num++; + } +out: + if (ret) { + core_alua_free_lba_map(&lba_list); + count = ret; + } else + core_alua_set_lba_map(dev, &lba_list, + segment_size, segment_mult); + kfree(map_entries); + return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "lba_map", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_lba_map, + .store = target_core_store_dev_lba_map, +}; + static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_info.attr, &target_core_attr_dev_control.attr, @@ -1731,6 +1934,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_udev_path.attr, &target_core_attr_dev_enable.attr, &target_core_attr_dev_alua_lu_gp.attr, + &target_core_attr_dev_lba_map.attr, NULL, }; @@ -1828,11 +2032,11 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( unsigned long lu_gp_id; int ret; - ret = strict_strtoul(page, 0, &lu_gp_id); + ret = kstrtoul(page, 0, &lu_gp_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " lu_gp_id\n", ret); - return -EINVAL; + return ret; } if (lu_gp_id > 0x0000ffff) { pr_err("ALUA lu_gp_id: %lu exceeds maximum:" @@ -2019,22 +2223,34 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( int new_state, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { - pr_err("Unable to do implict ALUA on non valid" + pr_err("Unable to do implicit ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("Unable to set alua_access_state while device is" + " not configured\n"); + return -ENODEV; + } - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access state from" " %s\n", page); - return -EINVAL; + return ret; } new_state = (int)tmp; - if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - pr_err("Unable to process implict configfs ALUA" - " transition while TPGS_IMPLICT_ALUA is disabled\n"); + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { + pr_err("Unable to process implicit configfs ALUA" + " transition while TPGS_IMPLICIT_ALUA is disabled\n"); + return -EINVAL; + } + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { + /* LBA DEPENDENT is only allowed with implicit ALUA */ + pr_err("Unable to process implicit configfs ALUA transition" + " while explicit ALUA management is enabled\n"); return -EINVAL; } @@ -2071,17 +2287,17 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( return -EINVAL; } - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access status" " from %s\n", page); - return -EINVAL; + return ret; } new_status = (int)tmp; if ((new_status != ALUA_STATUS_NONE) && - (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; @@ -2114,6 +2330,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); /* + * alua_supported_states + */ + +#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \ + struct t10_alua_tg_pt_gp *t, char *p) \ +{ \ + return sprintf(p, "%d\n", !!(t->_var & _bit)); \ +} + +#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\ + struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \ +{ \ + unsigned long tmp; \ + int ret; \ + \ + if (!t->tg_pt_gp_valid_id) { \ + pr_err("Unable to do set ##_name ALUA state on non" \ + " valid tg_pt_gp ID: %hu\n", \ + t->tg_pt_gp_valid_id); \ + return -EINVAL; \ + } \ + \ + ret = kstrtoul(p, 0, &tmp); \ + if (ret < 0) { \ + pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ + return -EINVAL; \ + } \ + if (tmp > 1) { \ + pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ + return -EINVAL; \ + } \ + if (!tmp) \ + t->_var |= _bit; \ + else \ + t->_var &= ~_bit; \ + \ + return c; \ +} + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR); + +/* * alua_write_metadata */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( @@ -2131,10 +2431,10 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_write_metadata\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1)) { @@ -2193,24 +2493,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* - * implict_trans_secs + * implicit_trans_secs */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return core_alua_show_implict_trans_secs(tg_pt_gp, page); + return core_alua_show_implicit_trans_secs(tg_pt_gp, page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { - return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); + return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count); } -SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR); /* * preferred @@ -2255,11 +2555,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( unsigned long tg_pt_gp_id; int ret; - ret = strict_strtoul(page, 0, &tg_pt_gp_id); + ret = kstrtoul(page, 0, &tg_pt_gp_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " tg_pt_gp_id\n", ret); - return -EINVAL; + return ret; } if (tg_pt_gp_id > 0x0000ffff) { pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" @@ -2333,10 +2633,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_access_state.attr, &target_core_alua_tg_pt_gp_alua_access_status.attr, &target_core_alua_tg_pt_gp_alua_access_type.attr, + &target_core_alua_tg_pt_gp_alua_support_transitioning.attr, + &target_core_alua_tg_pt_gp_alua_support_offline.attr, + &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr, + &target_core_alua_tg_pt_gp_alua_support_unavailable.attr, + &target_core_alua_tg_pt_gp_alua_support_standby.attr, + &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr, + &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr, &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, - &target_core_alua_tg_pt_gp_implict_trans_secs.attr, + &target_core_alua_tg_pt_gp_implicit_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, @@ -2668,10 +2975,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, if (transport->pmode_enable_hba == NULL) return -EINVAL; - ret = strict_strtoul(page, 0, &mode_flag); + ret = kstrtoul(page, 0, &mode_flag); if (ret < 0) { pr_err("Unable to extract hba mode flag: %d\n", ret); - return -EINVAL; + return ret; } if (hba->dev_count) { @@ -2759,11 +3066,11 @@ static struct config_group *target_core_call_addhbatotarget( str++; /* Skip to start of plugin dependent ID */ } - ret = strict_strtoul(str, 0, &plugin_dep_id); + ret = kstrtoul(str, 0, &plugin_dep_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " plugin_dep_id\n", ret); - return ERR_PTR(-EINVAL); + return ERR_PTR(ret); } /* * Load up TCM subsystem plugins if they have not already been loaded. @@ -2829,7 +3136,7 @@ static int __init target_core_init_configfs(void) * and ALUA Logical Unit Group and Target Port Group infrastructure. */ target_cg = &subsys->su_group; - target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, + target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); @@ -2919,6 +3226,10 @@ static int __init target_core_init_configfs(void) if (ret < 0) goto out; + ret = target_xcopy_setup_pt(); + if (ret < 0) + goto out; + return 0; out: @@ -2991,6 +3302,7 @@ static void __exit target_core_exit_configfs(void) core_dev_release_virtual_lun0(); rd_module_exit(); + target_xcopy_release_pt(); release_se_kmem_caches(); } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e2695101bb9..98da9016715 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -4,7 +4,7 @@ * This file contains the TCM Virtual Device and Disk Transport * agnostic related functions. * - * (c) Copyright 2003-2012 RisingTide Systems LLC. + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -47,6 +47,9 @@ #include "target_core_pr.h" #include "target_core_ua.h" +DEFINE_MUTEX(g_device_mutex); +LIST_HEAD(g_device_list); + static struct se_hba *lun0_hba; /* not static, needed by tpg.c */ struct se_device *g_lun0_dev; @@ -68,7 +71,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) struct se_dev_entry *deve = se_cmd->se_deve; deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; if ((se_cmd->data_direction == DMA_TO_DEVICE) && (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { @@ -85,13 +87,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) else if (se_cmd->data_direction == DMA_FROM_DEVICE) deve->read_bytes += se_cmd->data_length; - deve->deve_cmds++; - se_lun = deve->se_lun; se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } /* Directly associate cmd with se_dev */ se_cmd->se_dev = se_lun->lun_se_dev; - /* TODO: get rid of this and use atomics for stats */ dev = se_lun->lun_se_dev; - spin_lock_irqsave(&dev->stats_lock, flags); - dev->num_cmds++; + atomic_long_inc(&dev->num_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - dev->write_bytes += se_cmd->data_length; + atomic_long_add(se_cmd->data_length, &dev->write_bytes); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - dev->read_bytes += se_cmd->data_length; - spin_unlock_irqrestore(&dev->stats_lock, flags); - - spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); - spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); + atomic_long_add(se_cmd->data_length, &dev->read_bytes); return 0; } @@ -226,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; atomic_inc(&deve->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&nacl->device_list_lock); return deve; @@ -275,17 +274,6 @@ int core_free_device_list_for_node( return 0; } -void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) -{ - struct se_dev_entry *deve; - unsigned long flags; - - spin_lock_irqsave(&se_nacl->device_list_lock, flags); - deve = se_nacl->device_list[se_cmd->orig_fe_lun]; - deve->deve_cmds--; - spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); -} - void core_update_device_list_access( u32 mapped_lun, u32 lun_access, @@ -325,14 +313,14 @@ int core_enable_device_list_for_node( deve = nacl->device_list[mapped_lun]; /* - * Check if the call is handling demo mode -> explict LUN ACL + * Check if the call is handling demo mode -> explicit LUN ACL * transition. This transition must be for the same struct se_lun * + mapped_lun that was setup in demo mode.. */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { pr_err("struct se_dev_entry->se_lun_acl" - " already set for demo mode -> explict" + " already set for demo mode -> explicit" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; @@ -340,7 +328,7 @@ int core_enable_device_list_for_node( if (deve->se_lun != lun) { pr_err("struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" - " -> explict LUN ACL transition\n"); + " -> explicit LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; } @@ -628,6 +616,7 @@ void core_dev_unexport( dev->export_count--; spin_unlock(&hba->device_lock); + lun->lun_sep = NULL; lun->lun_se_dev = NULL; } @@ -713,6 +702,44 @@ int se_dev_set_max_write_same_len( return 0; } +static void dev_set_t10_wwn_model_alias(struct se_device *dev) +{ + const char *configname; + + configname = config_item_name(&dev->dev_group.cg_item); + if (strlen(configname) >= 16) { + pr_warn("dev[%p]: Backstore name '%s' is too long for " + "INQUIRY_MODEL, truncating to 16 bytes\n", dev, + configname); + } + snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); +} + +int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) +{ + if (dev->export_count) { + pr_err("dev[%p]: Unable to change model alias" + " while export_count is %d\n", + dev, dev->export_count); + return -EINVAL; + } + + if (flag != 0 && flag != 1) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + if (flag) { + dev_set_t10_wwn_model_alias(dev); + } else { + strncpy(&dev->t10_wwn.model[0], + dev->transport->inquiry_prod, 16); + } + dev->dev_attrib.emulate_model_alias = flag; + + return 0; +} + int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if (flag != 0 && flag != 1) { @@ -772,6 +799,12 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("emulate_write_cache not supported for pSCSI\n"); return -EINVAL; } + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; + } + dev->dev_attrib.emulate_write_cache = flag; pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", dev, dev->dev_attrib.emulate_write_cache); @@ -860,6 +893,120 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) return 0; } +int se_dev_set_emulate_caw(struct se_device *dev, int flag) +{ + if (flag != 0 && flag != 1) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + dev->dev_attrib.emulate_caw = flag; + pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", + dev, flag); + + return 0; +} + +int se_dev_set_emulate_3pc(struct se_device *dev, int flag) +{ + if (flag != 0 && flag != 1) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + dev->dev_attrib.emulate_3pc = flag; + pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", + dev, flag); + + return 0; +} + +int se_dev_set_pi_prot_type(struct se_device *dev, int flag) +{ + int rc, old_prot = dev->dev_attrib.pi_prot_type; + + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { + pr_err("Illegal value %d for pi_prot_type\n", flag); + return -EINVAL; + } + if (flag == 2) { + pr_err("DIF TYPE2 protection currently not supported\n"); + return -ENOSYS; + } + if (dev->dev_attrib.hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return 0; + } + if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return 0; + + pr_err("DIF protection not supported by backend: %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + dev->dev_attrib.pi_prot_type = flag; + + if (flag && !old_prot) { + rc = dev->transport->init_prot(dev); + if (rc) { + dev->dev_attrib.pi_prot_type = old_prot; + return rc; + } + + } else if (!flag && old_prot) { + dev->transport->free_prot(dev); + } + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + + return 0; +} + +int se_dev_set_pi_prot_format(struct se_device *dev, int flag) +{ + int rc; + + if (!flag) + return 0; + + if (flag != 1) { + pr_err("Illegal value %d for pi_prot_format\n", flag); + return -EINVAL; + } + if (!dev->transport->format_prot) { + pr_err("DIF protection format not supported by backend %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection format requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to format SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + rc = dev->transport->format_prot(dev); + if (rc) + return rc; + + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + + return 0; +} + int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { @@ -941,6 +1088,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { + int block_size = dev->dev_attrib.block_size; + if (dev->export_count) { pr_err("dev[%p]: Unable to change SE Device" " fabric_max_sectors while export_count is %d\n", @@ -978,8 +1127,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) /* * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() */ + if (!block_size) { + block_size = 512; + pr_warn("Defaulting to 512 for zero block_size\n"); + } fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, - dev->dev_attrib.block_size); + block_size); dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", @@ -1042,29 +1195,34 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) dev->dev_attrib.block_size = block_size; pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); + + if (dev->dev_attrib.max_bytes_per_io) + dev->dev_attrib.hw_max_sectors = + dev->dev_attrib.max_bytes_per_io / block_size; + return 0; } struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, struct se_device *dev, - u32 lun) + u32 unpacked_lun) { - struct se_lun *lun_p; + struct se_lun *lun; int rc; - lun_p = core_tpg_pre_addlun(tpg, lun); - if (IS_ERR(lun_p)) - return lun_p; + lun = core_tpg_alloc_lun(tpg, unpacked_lun); + if (IS_ERR(lun)) + return lun; - rc = core_tpg_post_addlun(tpg, lun_p, + rc = core_tpg_add_lun(tpg, lun, TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), - tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); /* * Update LUN maps for dynamically added initiators when @@ -1085,7 +1243,7 @@ struct se_lun *core_dev_add_lun( spin_unlock_irq(&tpg->acl_node_lock); } - return lun_p; + return lun; } /* core_dev_del_lun(): @@ -1176,24 +1334,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_portal_group *tpg, + struct se_node_acl *nacl, u32 mapped_lun, - char *initiatorname, int *ret) { struct se_lun_acl *lacl; - struct se_node_acl *nacl; - if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { + if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { pr_err("%s InitiatorName exceeds maximum size.\n", tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } - nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!nacl) { - *ret = -EINVAL; - return NULL; - } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); if (!lacl) { pr_err("Unable to allocate memory for struct se_lun_acl.\n"); @@ -1204,7 +1356,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( INIT_LIST_HEAD(&lacl->lacl_list); lacl->mapped_lun = mapped_lun; lacl->se_lun_nacl = nacl; - snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", + nacl->initiatorname); return lacl; } @@ -1244,7 +1397,7 @@ int core_dev_add_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); atomic_inc(&lun->lun_acl_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1278,7 +1431,7 @@ int core_dev_del_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); atomic_dec(&lun->lun_acl_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, @@ -1347,6 +1500,7 @@ static void scsi_dump_inquiry(struct se_device *dev) struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; + struct se_lun *xcopy_lun; dev = hba->transport->alloc_device(hba, name); if (!dev) @@ -1355,6 +1509,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_link_magic = SE_DEV_LINK_MAGIC; dev->se_hba = hba; dev->transport = hba->transport; + dev->prot_length = sizeof(struct se_dif_v1_tuple); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); @@ -1362,13 +1517,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); - spin_lock_init(&dev->stats_lock); + INIT_LIST_HEAD(&dev->g_dev_node); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); + sema_init(&dev->caw_sem, 1); atomic_set(&dev->dev_ordered_id, 0); INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); spin_lock_init(&dev->t10_wwn.t10_vpd_lock); @@ -1378,12 +1534,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) spin_lock_init(&dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); + spin_lock_init(&dev->t10_alua.lba_map_lock); - dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; dev->t10_wwn.t10_dev = dev; dev->t10_alua.t10_dev = dev; dev->dev_attrib.da_dev = dev; + dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; @@ -1392,6 +1550,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; + dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; + dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->dev_attrib.is_nonrot = DA_IS_NONROT; dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; @@ -1405,6 +1566,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; + xcopy_lun = &dev->xcopy_lun; + xcopy_lun->lun_se_dev = dev; + init_completion(&xcopy_lun->lun_shutdown_comp); + INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); + spin_lock_init(&xcopy_lun->lun_acl_lock); + spin_lock_init(&xcopy_lun->lun_sep_lock); + init_completion(&xcopy_lun->lun_ref_comp); + return dev; } @@ -1479,6 +1648,11 @@ int target_configure_device(struct se_device *dev) spin_lock(&hba->device_lock); hba->dev_count++; spin_unlock(&hba->device_lock); + + mutex_lock(&g_device_mutex); + list_add_tail(&dev->g_dev_node, &g_device_list); + mutex_unlock(&g_device_mutex); + return 0; out_free_alua: @@ -1497,15 +1671,23 @@ void target_free_device(struct se_device *dev) if (dev->dev_flags & DF_CONFIGURED) { destroy_workqueue(dev->tmr_wq); + mutex_lock(&g_device_mutex); + list_del(&dev->g_dev_node); + mutex_unlock(&g_device_mutex); + spin_lock(&hba->device_lock); hba->dev_count--; spin_unlock(&hba->device_lock); } core_alua_free_lu_gp_mem(dev); + core_alua_set_lba_map(dev, NULL, 0, 0); core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); + if (dev->transport->free_prot) + dev->transport->free_prot(dev); + dev->transport->free_device(dev); } @@ -1513,7 +1695,7 @@ int core_dev_setup_virtual_lun0(void) { struct se_hba *hba; struct se_device *dev; - char buf[16]; + char buf[] = "rd_pages=8,rd_nullio=1"; int ret; hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); @@ -1526,8 +1708,6 @@ int core_dev_setup_virtual_lun0(void) goto out_free_hba; } - memset(buf, 0, 16); - sprintf(buf, "rd_pages=8"); hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); ret = target_configure_device(dev); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 810263dfa4a..7de9f0475d0 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -4,7 +4,7 @@ * This file contains generic fabric module configfs infrastructure for * TCM v4.x code * - * (c) Copyright 2010-2012 RisingTide Systems LLC. + * (c) Copyright 2010-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -189,9 +189,11 @@ static ssize_t target_fabric_mappedlun_store_write_protect( struct se_node_acl *se_nacl = lacl->se_lun_nacl; struct se_portal_group *se_tpg = se_nacl->se_tpg; unsigned long op; + int ret; - if (strict_strtoul(page, 0, &op)) - return -EINVAL; + ret = kstrtoul(page, 0, &op); + if (ret) + return ret; if ((op != 1) && (op != 0)) return -EINVAL; @@ -350,13 +352,24 @@ static struct config_group *target_fabric_make_mappedlun( * Determine the Mapped LUN value. This is what the SCSI Initiator * Port will actually see. */ - if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { + ret = kstrtoul(buf + 4, 0, &mapped_lun); + if (ret) + goto out; + if (mapped_lun > UINT_MAX) { + ret = -EINVAL; + goto out; + } + if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" + "-1: %u for Target Portal Group: %u\n", mapped_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); ret = -EINVAL; goto out; } - lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, - config_item_name(acl_ci), &ret); + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, + mapped_lun, &ret); if (!lacl) { ret = -EINVAL; goto out; @@ -372,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun( } config_group_init_type_name(&lacl->se_lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); + &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit); config_group_init_type_name(&lacl->ml_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit); lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; @@ -491,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl( nacl_cg->default_groups[4] = NULL; config_group_init_type_name(&se_nacl->acl_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit); config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit); config_group_init_type_name(&se_nacl->acl_auth_group, "auth", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit); config_group_init_type_name(&se_nacl->acl_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit); config_group_init_type_name(&se_nacl->acl_fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit); return &se_nacl->acl_group; } @@ -582,7 +595,7 @@ static struct config_group *target_fabric_make_np( se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_base_cit); return &se_tpg_np->tpg_np_group; } @@ -754,6 +767,11 @@ static int target_fabric_port_link( return -EFAULT; } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("se_device not configured yet, cannot port link\n"); + return -ENODEV; + } + tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; se_tpg = container_of(to_config_group(tpg_ci), struct se_portal_group, tpg_group); @@ -862,7 +880,10 @@ static struct config_group *target_fabric_make_lun( " \"lun_$LUN_NUMBER\"\n"); return ERR_PTR(-EINVAL); } - if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) + errno = kstrtoul(name + 4, 0, &unpacked_lun); + if (errno) + return ERR_PTR(errno); + if (unpacked_lun > UINT_MAX) return ERR_PTR(-EINVAL); lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); @@ -878,14 +899,14 @@ static struct config_group *target_fabric_make_lun( } config_group_init_type_name(&lun->lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); + &tf->tf_cit_tmpl.tfc_tpg_port_cit); config_group_init_type_name(&lun->port_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit); lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; port_stat_grp = &lun->port_stat_grps.stat_group; - port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4, GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); @@ -952,6 +973,19 @@ TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); /* End of tfc_tpg_attrib_cit */ +/* Start of tfc_tpg_auth_cit */ + +CONFIGFS_EATTR_OPS(target_fabric_tpg_auth, se_portal_group, tpg_auth_group); + +static struct configfs_item_operations target_fabric_tpg_auth_item_ops = { + .show_attribute = target_fabric_tpg_auth_attr_show, + .store_attribute = target_fabric_tpg_auth_attr_store, +}; + +TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL); + +/* End of tfc_tpg_attrib_cit */ + /* Start of tfc_tpg_param_cit */ CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group); @@ -1017,21 +1051,24 @@ static struct config_group *target_fabric_make_tpg( se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group; se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group; se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group; - se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group; - se_tpg->tpg_group.default_groups[5] = NULL; + se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group; + se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group; + se_tpg->tpg_group.default_groups[6] = NULL; config_group_init_type_name(&se_tpg->tpg_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_base_cit); config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", - &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); + &tf->tf_cit_tmpl.tfc_tpg_lun_cit); config_group_init_type_name(&se_tpg->tpg_np_group, "np", - &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_cit); config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_cit); config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_attrib_cit); + config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", + &tf->tf_cit_tmpl.tfc_tpg_auth_cit); config_group_init_type_name(&se_tpg->tpg_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_param_cit); return &se_tpg->tpg_group; } @@ -1118,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn( wwn->wwn_group.default_groups[1] = NULL; config_group_init_type_name(&wwn->wwn_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_cit); + &tf->tf_cit_tmpl.tfc_tpg_cit); config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); + &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit); return &wwn->wwn_group; } @@ -1189,6 +1226,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) target_fabric_setup_tpg_np_cit(tf); target_fabric_setup_tpg_np_base_cit(tf); target_fabric_setup_tpg_attrib_cit(tf); + target_fabric_setup_tpg_auth_cit(tf); target_fabric_setup_tpg_param_cit(tf); target_fabric_setup_tpg_nacl_cit(tf); target_fabric_setup_tpg_nacl_base_cit(tf); diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 687b0b0a4aa..0d1cf8b4f49 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -4,7 +4,7 @@ * This file contains generic high level protocol identifier and PR * handlers for TCM fabric modules * - * (c) Copyright 2010-2012 RisingTide Systems LLC. + * (c) Copyright 2010-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@linux-iscsi.org> * diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b9c88497e8f..7d6cddaec52 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -3,7 +3,7 @@ * * This file contains the Storage Engine <-> FILEIO transport specific functions * - * (c) Copyright 2005-2012 RisingTide Systems LLC. + * (c) Copyright 2005-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -30,8 +30,10 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/module.h> +#include <linux/falloc.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> +#include <asm/unaligned.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> @@ -64,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" - " MaxSectors: %u\n", - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", + hba->hba_id, fd_host->fd_host_id); return 0; } @@ -151,10 +152,7 @@ static int fd_configure_device(struct se_device *dev) struct request_queue *q = bdev_get_queue(inode->i_bdev); unsigned long long dev_size; - dev->dev_attrib.hw_block_size = - bdev_logical_block_size(inode->i_bdev); - dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); - + fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device @@ -166,6 +164,33 @@ static int fd_configure_device(struct se_device *dev) " block_device blocks: %llu logical_block_size: %d\n", dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); + /* + * Check if the underlying struct block_device request_queue supports + * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM + * in ATA and we need to set TPE=1 + */ + if (blk_queue_discard(q)) { + dev->dev_attrib.max_unmap_lba_count = + q->limits.max_discard_sectors; + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ + dev->dev_attrib.max_unmap_block_desc_count = 1; + dev->dev_attrib.unmap_granularity = + q->limits.discard_granularity >> 9; + dev->dev_attrib.unmap_granularity_alignment = + q->limits.discard_alignment; + pr_debug("IFILE: BLOCK Discard support available," + " disabled by default\n"); + } + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. + */ + dev->dev_attrib.max_write_same_len = 0xFFFF; + + if (blk_queue_nonrot(q)) + dev->dev_attrib.is_nonrot = 1; } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { pr_err("FILEIO: Missing fd_dev_size=" @@ -174,12 +199,28 @@ static int fd_configure_device(struct se_device *dev) goto fail; } - dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; - } + fd_dev->fd_block_size = FD_BLOCKSIZE; + /* + * Limit UNMAP emulation to 8k Number of LBAs (NoLB) + */ + dev->dev_attrib.max_unmap_lba_count = 0x2000; + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ + dev->dev_attrib.max_unmap_block_desc_count = 1; + dev->dev_attrib.unmap_granularity = 1; + dev->dev_attrib.unmap_granularity_alignment = 0; - fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; + /* + * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) + * based upon struct iovec limit for vfs_writev() + */ + dev->dev_attrib.max_write_same_len = 0x1000; + } + dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { @@ -216,6 +257,72 @@ static void fd_free_device(struct se_device *dev) kfree(fd_dev); } +static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, + int is_write) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *dev = FD_DEV(se_dev); + struct file *prot_fd = dev->fd_prot_file; + struct scatterlist *sg; + loff_t pos = (cmd->t_task_lba * se_dev->prot_length); + unsigned char *buf; + u32 prot_size, len, size; + int rc, ret = 1, i; + + prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * + se_dev->prot_length; + + if (!is_write) { + fd_prot->prot_buf = vzalloc(prot_size); + if (!fd_prot->prot_buf) { + pr_err("Unable to allocate fd_prot->prot_buf\n"); + return -ENOMEM; + } + buf = fd_prot->prot_buf; + + fd_prot->prot_sg_nents = cmd->t_prot_nents; + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * + fd_prot->prot_sg_nents, GFP_KERNEL); + if (!fd_prot->prot_sg) { + pr_err("Unable to allocate fd_prot->prot_sg\n"); + vfree(fd_prot->prot_buf); + return -ENOMEM; + } + size = prot_size; + + for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { + + len = min_t(u32, PAGE_SIZE, size); + sg_set_buf(sg, buf, len); + size -= len; + buf += len; + } + } + + if (is_write) { + rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); + if (rc < 0 || prot_size != rc) { + pr_err("kernel_write() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } else { + rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); + if (rc < 0) { + pr_err("kernel_read() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } + + if (is_write || ret < 0) { + kfree(fd_prot->prot_sg); + vfree(fd_prot->prot_buf); + } + + return ret; +} + static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, int is_write) { @@ -265,7 +372,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, * the expected virt_size for struct file w/o a backing struct * block_device. */ - if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { + if (S_ISBLK(file_inode(fd)->i_mode)) { if (ret < 0 || ret != cmd->data_length) { pr_err("%s() returned %d, expecting %u for " "S_ISBLK\n", __func__, ret, @@ -328,13 +435,190 @@ fd_execute_sync_cache(struct se_cmd *cmd) return 0; } +static unsigned char * +fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, + unsigned int len) +{ + struct se_device *se_dev = cmd->se_dev; + unsigned int block_size = se_dev->dev_attrib.block_size; + unsigned int i = 0, end; + unsigned char *buf, *p, *kmap_buf; + + buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate fd_execute_write_same buf\n"); + return NULL; + } + + kmap_buf = kmap(sg_page(sg)) + sg->offset; + if (!kmap_buf) { + pr_err("kmap() failed in fd_setup_write_same\n"); + kfree(buf); + return NULL; + } + /* + * Fill local *buf to contain multiple WRITE_SAME blocks up to + * min(len, PAGE_SIZE) + */ + p = buf; + end = min_t(unsigned int, len, PAGE_SIZE); + + while (i < end) { + memcpy(p, kmap_buf, block_size); + + i += block_size; + p += block_size; + } + kunmap(sg_page(sg)); + + return buf; +} + static sense_reason_t -fd_execute_rw(struct se_cmd *cmd) +fd_execute_write_same(struct se_cmd *cmd) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(se_dev); + struct file *f = fd_dev->fd_file; + struct scatterlist *sg; + struct iovec *iov; + mm_segment_t old_fs; + sector_t nolb = sbc_get_write_same_sectors(cmd); + loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; + unsigned int len, len_tmp, iov_num; + int i, rc; + unsigned char *buf; + + if (!nolb) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + sg = &cmd->t_data_sg[0]; + + if (cmd->t_data_nents > 1 || + sg->length != cmd->se_dev->dev_attrib.block_size) { + pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" + " block_size: %u\n", cmd->t_data_nents, sg->length, + cmd->se_dev->dev_attrib.block_size); + return TCM_INVALID_CDB_FIELD; + } + + len = len_tmp = nolb * se_dev->dev_attrib.block_size; + iov_num = DIV_ROUND_UP(len, PAGE_SIZE); + + buf = fd_setup_write_same_buf(cmd, sg, len); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + iov = vzalloc(sizeof(struct iovec) * iov_num); + if (!iov) { + pr_err("Unable to allocate fd_execute_write_same iovecs\n"); + kfree(buf); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * Map the single fabric received scatterlist block now populated + * in *buf into each iovec for I/O submission. + */ + for (i = 0; i < iov_num; i++) { + iov[i].iov_base = buf; + iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); + len_tmp -= iov[i].iov_len; + } + + old_fs = get_fs(); + set_fs(get_ds()); + rc = vfs_writev(f, &iov[0], iov_num, &pos); + set_fs(old_fs); + + vfree(iov); + kfree(buf); + + if (rc < 0 || rc != len) { + pr_err("vfs_writev() returned %d for write same\n", rc); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static sense_reason_t +fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) +{ + struct file *file = priv; + struct inode *inode = file->f_mapping->host; + int ret; + + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = inode->i_bdev; + + ret = blkdev_issue_discard(bdev, lba, + nolb, GFP_KERNEL, 0); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + } else { + /* The backend is normal file, use fallocate */ + struct se_device *se_dev = cmd->se_dev; + loff_t pos = lba * se_dev->dev_attrib.block_size; + unsigned int len = nolb * se_dev->dev_attrib.block_size; + int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; + + if (!file->f_op->fallocate) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = file->f_op->fallocate(file, mode, pos, len); + if (ret < 0) { + pr_warn("FILEIO: fallocate() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + } + + return 0; +} + +static sense_reason_t +fd_execute_write_same_unmap(struct se_cmd *cmd) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(se_dev); + struct file *file = fd_dev->fd_file; + sector_t lba = cmd->t_task_lba; + sector_t nolb = sbc_get_write_same_sectors(cmd); + int ret; + + if (!nolb) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + + ret = fd_do_unmap(cmd, file, lba, nolb); + if (ret) + return ret; + + target_complete_cmd(cmd, GOOD); + return 0; +} + +static sense_reason_t +fd_execute_unmap(struct se_cmd *cmd) +{ + struct file *file = FD_DEV(cmd->se_dev)->fd_file; + + return sbc_execute_unmap(cmd, fd_do_unmap, file); +} + +static sense_reason_t +fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; + struct fd_prot fd_prot; + sense_reason_t rc; int ret = 0; /* @@ -342,11 +626,51 @@ fd_execute_rw(struct se_cmd *cmd) * physical memory addresses to struct iovec virtual memory. */ if (data_direction == DMA_FROM_DEVICE) { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 0); + + if (ret > 0 && cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + } } else { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 1); /* - * Perform implict vfs_fsync_range() for fd_do_writev() ops + * Perform implicit vfs_fsync_range() for fd_do_writev() ops * for SCSI WRITEs with Forced Unit Access (FUA) set. * Allow this to happen independent of WCE=0 setting. */ @@ -360,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd) vfs_fsync_range(fd_dev->fd_file, start, end, 1); } + + if (ret > 0 && cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, true); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } } - if (ret < 0) + if (ret < 0) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (ret) target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -417,10 +750,10 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev, ret = -ENOMEM; break; } - ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); + ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size); kfree(arg_p); if (ret < 0) { - pr_err("strict_strtoull() failed for" + pr_err("kstrtoull() failed for" " fd_dev_size=\n"); goto out; } @@ -476,16 +809,116 @@ static sector_t fd_get_blocks(struct se_device *dev) * to handle underlying block_device resize operations. */ if (S_ISBLK(i->i_mode)) - dev_size = (i_size_read(i) - fd_dev->fd_block_size); + dev_size = i_size_read(i); else dev_size = fd_dev->fd_dev_size; - return div_u64(dev_size, dev->dev_attrib.block_size); + return div_u64(dev_size - dev->dev_attrib.block_size, + dev->dev_attrib.block_size); +} + +static int fd_init_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_file, *file = fd_dev->fd_file; + struct inode *inode; + int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + char buf[FD_MAX_DEV_PROT_NAME]; + + if (!file) { + pr_err("Unable to locate fd_dev->fd_file\n"); + return -ENODEV; + } + + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + pr_err("FILEIO Protection emulation only supported on" + " !S_ISBLK\n"); + return -ENOSYS; + } + + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) + flags &= ~O_DSYNC; + + snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", + fd_dev->fd_dev_name); + + prot_file = filp_open(buf, flags, 0600); + if (IS_ERR(prot_file)) { + pr_err("filp_open(%s) failed\n", buf); + ret = PTR_ERR(prot_file); + return ret; + } + fd_dev->fd_prot_file = prot_file; + + return 0; +} + +static int fd_format_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_fd = fd_dev->fd_prot_file; + sector_t prot_length, prot; + unsigned char *buf; + loff_t pos = 0; + int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; + int rc, ret = 0, size, len; + + if (!dev->dev_attrib.pi_prot_type) { + pr_err("Unable to format_prot while pi_prot_type == 0\n"); + return -ENODEV; + } + if (!prot_fd) { + pr_err("Unable to locate fd_dev->fd_prot_file\n"); + return -ENODEV; + } + + buf = vzalloc(unit_size); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; + size = prot_length; + + pr_debug("Using FILEIO prot_length: %llu\n", + (unsigned long long)prot_length); + + memset(buf, 0xff, unit_size); + for (prot = 0; prot < prot_length; prot += unit_size) { + len = min(unit_size, size); + rc = kernel_write(prot_fd, buf, len, pos); + if (rc != len) { + pr_err("vfs_write to prot file failed: %d\n", rc); + ret = -ENODEV; + goto out; + } + pos += len; + size -= len; + } + +out: + vfree(buf); + return ret; +} + +static void fd_free_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + + if (!fd_dev->fd_prot_file) + return; + + filp_close(fd_dev->fd_prot_file, NULL); + fd_dev->fd_prot_file = NULL; } static struct sbc_ops fd_sbc_ops = { .execute_rw = fd_execute_rw, .execute_sync_cache = fd_execute_sync_cache, + .execute_write_same = fd_execute_write_same, + .execute_write_same_unmap = fd_execute_write_same_unmap, + .execute_unmap = fd_execute_unmap, }; static sense_reason_t @@ -510,6 +943,9 @@ static struct se_subsystem_api fileio_template = { .show_configfs_dev_params = fd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = fd_get_blocks, + .init_prot = fd_init_prot, + .format_prot = fd_format_prot, + .free_prot = fd_free_prot, }; static int __init fileio_module_init(void) @@ -517,7 +953,7 @@ static int __init fileio_module_init(void) return transport_subsystem_register(&fileio_template); } -static void fileio_module_exit(void) +static void __exit fileio_module_exit(void) { transport_subsystem_release(&fileio_template); } diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index bc02b018ae4..182cbb29503 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,10 +4,14 @@ #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 +#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16 #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 1024 +/* + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call + */ +#define FD_MAX_BYTES 8388608 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 @@ -15,6 +19,13 @@ #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 +#define FDBD_FORMAT_UNIT_SIZE 2048 + +struct fd_prot { + unsigned char *prot_buf; + struct scatterlist *prot_sg; + u32 prot_sg_nents; +}; struct fd_dev { struct se_device dev; @@ -29,6 +40,7 @@ struct fd_dev { u32 fd_block_size; unsigned long long fd_dev_size; struct file *fd_file; + struct file *fd_prot_file; /* FILEIO HBA device is connected to */ struct fd_host *fd_host; } ____cacheline_aligned; diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index d2616cd48f1..a25051a37dd 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -3,7 +3,7 @@ * * This file contains the TCM HBA Transport related functions. * - * (c) Copyright 2003-2012 RisingTide Systems LLC. + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b526d23dcd4..7e6b857c6b3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -4,7 +4,7 @@ * This file contains the Storage Engine <-> Linux BlockIO transport * specific functions. * - * (c) Copyright 2003-2012 RisingTide Systems LLC. + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev) struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; struct block_device *bd = NULL; + struct blk_integrity *bi; fmode_t mode; int ret = -ENOMEM; @@ -154,8 +155,41 @@ static int iblock_configure_device(struct se_device *dev) if (blk_queue_nonrot(q)) dev->dev_attrib.is_nonrot = 1; + + bi = bdev_get_integrity(bd); + if (bi) { + struct bio_set *bs = ib_dev->ibd_bio_set; + + if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || + !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { + pr_err("IBLOCK export of blk_integrity: %s not" + " supported\n", bi->name); + ret = -ENOSYS; + goto out_blkdev_put; + } + + if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; + } + + if (dev->dev_attrib.pi_prot_type) { + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; + } + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + bs->bio_integrity_pool); + } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + } + return 0; +out_blkdev_put: + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); out_free_bioset: bioset_free(ib_dev->ibd_bio_set); ib_dev->ibd_bio_set = NULL; @@ -171,6 +205,7 @@ static void iblock_free_device(struct se_device *dev) blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); if (ib_dev->ibd_bio_set != NULL) bioset_free(ib_dev->ibd_bio_set); + kfree(ib_dev); } @@ -288,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err) * Bump the ib_bio_err_cnt and release bio. */ atomic_inc(&ibr->ib_bio_err_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } bio_put(bio); @@ -318,7 +353,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) bio->bi_bdev = ib_dev->ibd_bd; bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; - bio->bi_sector = lba; + bio->bi_iter.bi_sector = lba; return bio; } @@ -379,95 +414,40 @@ iblock_execute_sync_cache(struct se_cmd *cmd) } static sense_reason_t -iblock_execute_unmap(struct se_cmd *cmd) +iblock_do_unmap(struct se_cmd *cmd, void *priv, + sector_t lba, sector_t nolb) { - struct se_device *dev = cmd->se_dev; - struct iblock_dev *ib_dev = IBLOCK_DEV(dev); - unsigned char *buf, *ptr = NULL; - sector_t lba; - int size; - u32 range; - sense_reason_t ret = 0; - int dl, bd_dl, err; - - if (cmd->data_length < 8) { - pr_warn("UNMAP parameter list length %u too small\n", - cmd->data_length); - return TCM_INVALID_PARAMETER_LIST; - } + struct block_device *bdev = priv; + int ret; - buf = transport_kmap_data_sg(cmd); - if (!buf) + ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - dl = get_unaligned_be16(&buf[0]); - bd_dl = get_unaligned_be16(&buf[2]); - - size = cmd->data_length - 8; - if (bd_dl > size) - pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", - cmd->data_length, bd_dl); - else - size = bd_dl; - - if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { - ret = TCM_INVALID_PARAMETER_LIST; - goto err; } - /* First UNMAP block descriptor starts at 8 byte offset */ - ptr = &buf[8]; - pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" - " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); - - while (size >= 16) { - lba = get_unaligned_be64(&ptr[0]); - range = get_unaligned_be32(&ptr[8]); - pr_debug("UNMAP: Using lba: %llu and range: %u\n", - (unsigned long long)lba, range); - - if (range > dev->dev_attrib.max_unmap_lba_count) { - ret = TCM_INVALID_PARAMETER_LIST; - goto err; - } - - if (lba + range > dev->transport->get_blocks(dev) + 1) { - ret = TCM_ADDRESS_OUT_OF_RANGE; - goto err; - } - - err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, - GFP_KERNEL, 0); - if (err < 0) { - pr_err("blkdev_issue_discard() failed: %d\n", - err); - ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - goto err; - } + return 0; +} - ptr += 16; - size -= 16; - } +static sense_reason_t +iblock_execute_unmap(struct se_cmd *cmd) +{ + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; -err: - transport_kunmap_data_sg(cmd); - if (!ret) - target_complete_cmd(cmd, GOOD); - return ret; + return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); } static sense_reason_t iblock_execute_write_same_unmap(struct se_cmd *cmd) { - struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); - int rc; + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; + sector_t lba = cmd->t_task_lba; + sector_t nolb = sbc_get_write_same_sectors(cmd); + int ret; - rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, - spc_get_write_same_sectors(cmd), GFP_KERNEL, 0); - if (rc < 0) { - pr_warn("blkdev_issue_discard() failed: %d\n", rc); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } + ret = iblock_do_unmap(cmd, bdev, lba, nolb); + if (ret) + return ret; target_complete_cmd(cmd, GOOD); return 0; @@ -481,7 +461,7 @@ iblock_execute_write_same(struct se_cmd *cmd) struct bio *bio; struct bio_list list; sector_t block_lba = cmd->t_task_lba; - sector_t sectors = spc_get_write_same_sectors(cmd); + sector_t sectors = sbc_get_write_same_sectors(cmd); sg = &cmd->t_data_sg[0]; @@ -590,10 +570,10 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, ret = -ENOMEM; break; } - ret = strict_strtoul(arg_p, 0, &tmp_readonly); + ret = kstrtoul(arg_p, 0, &tmp_readonly); kfree(arg_p); if (ret < 0) { - pr_err("strict_strtoul() failed for" + pr_err("kstrtoul() failed for" " readonly=\n"); goto out; } @@ -640,34 +620,83 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) return bl; } +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) +{ + struct se_device *dev = cmd->se_dev; + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct scatterlist *sg; + int i, rc; + + bi = bdev_get_integrity(ib_dev->ibd_bd); + if (!bi) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); + if (!bip) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return -ENOMEM; + } + + bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * + dev->prot_length; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; + + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, + (unsigned long long)bip->bip_iter.bi_sector); + + for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { + + rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, + sg->offset); + if (rc != sg->length) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + return -ENOMEM; + } + + pr_debug("Added bio integrity page: %p length: %d offset; %d\n", + sg_page(sg), sg->length, sg->offset); + } + + return 0; +} + static sense_reason_t -iblock_execute_rw(struct se_cmd *cmd) +iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; - struct bio *bio; + struct bio *bio, *bio_start; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; sector_t block_lba; unsigned bio_cnt; - int rw; + int rw = 0; int i; if (data_direction == DMA_TO_DEVICE) { + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); /* - * Force data to disk if we pretend to not have a volatile - * write cache, or the initiator set the Force Unit Access bit. + * Force writethrough using WRITE_FUA if a volatile write cache + * is not enabled, or if initiator set the Force Unit Access bit. */ - if (dev->dev_attrib.emulate_write_cache == 0 || - (dev->dev_attrib.emulate_fua_write > 0 && - (cmd->se_cmd_flags & SCF_FUA))) - rw = WRITE_FUA; - else + if (q->flush_flags & REQ_FUA) { + if (cmd->se_cmd_flags & SCF_FUA) + rw = WRITE_FUA; + else if (!(q->flush_flags & REQ_FLUSH)) + rw = WRITE_FUA; + else + rw = WRITE; + } else { rw = WRITE; + } } else { rw = READ; } @@ -705,6 +734,7 @@ iblock_execute_rw(struct se_cmd *cmd) if (!bio) goto fail_free_ibr; + bio_start = bio; bio_list_init(&list); bio_list_add(&list, bio); @@ -738,6 +768,12 @@ iblock_execute_rw(struct se_cmd *cmd) sg_num--; } + if (cmd->prot_type) { + int rc = iblock_alloc_bip(cmd, bio_start); + if (rc) + goto fail_put_bios; + } + iblock_submit_bios(&list, rw); iblock_complete_cmd(cmd); return 0; @@ -760,6 +796,45 @@ static sector_t iblock_get_blocks(struct se_device *dev) return iblock_emulate_read_cap_with_block_size(dev, bd, q); } +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int ret; + + ret = bdev_alignment_offset(bd); + if (ret == -1) + return 0; + + /* convert offset-bytes to offset-lbas */ + return ret / bdev_logical_block_size(bd); +} + +static unsigned int iblock_get_lbppbe(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + + return ilog2(logs_per_phys); +} + +static unsigned int iblock_get_io_min(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_min(bd); +} + +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_opt(bd); +} + static struct sbc_ops iblock_sbc_ops = { .execute_rw = iblock_execute_rw, .execute_sync_cache = iblock_execute_sync_cache, @@ -774,6 +849,15 @@ iblock_parse_cdb(struct se_cmd *cmd) return sbc_parse_cdb(cmd, &iblock_sbc_ops); } +static bool iblock_get_write_cache(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + struct request_queue *q = bdev_get_queue(bd); + + return q->flush_flags & REQ_FLUSH; +} + static struct se_subsystem_api iblock_template = { .name = "iblock", .inquiry_prod = "IBLOCK", @@ -790,6 +874,11 @@ static struct se_subsystem_api iblock_template = { .show_configfs_dev_params = iblock_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = iblock_get_blocks, + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, + .get_lbppbe = iblock_get_lbppbe, + .get_io_min = iblock_get_io_min, + .get_io_opt = iblock_get_io_opt, + .get_write_cache = iblock_get_write_cache, }; static int __init iblock_module_init(void) @@ -797,7 +886,7 @@ static int __init iblock_module_init(void) return transport_subsystem_register(&iblock_template); } -static void iblock_module_exit(void) +static void __exit iblock_module_exit(void) { transport_subsystem_release(&iblock_template); } diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 93e9c1f580b..de9cab708f4 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); int core_free_device_list_for_node(struct se_node_acl *, struct se_portal_group *); -void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_update_device_list_access(u32, u32, struct se_node_acl *); int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, u32, struct se_node_acl *, struct se_portal_group *); @@ -25,6 +24,7 @@ int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_unmap_granularity(struct se_device *, u32); int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); int se_dev_set_max_write_same_len(struct se_device *, u32); +int se_dev_set_emulate_model_alias(struct se_device *, int); int se_dev_set_emulate_dpo(struct se_device *, int); int se_dev_set_emulate_fua_write(struct se_device *, int); int se_dev_set_emulate_fua_read(struct se_device *, int); @@ -33,6 +33,10 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); int se_dev_set_emulate_tas(struct se_device *, int); int se_dev_set_emulate_tpu(struct se_device *, int); int se_dev_set_emulate_tpws(struct se_device *, int); +int se_dev_set_emulate_caw(struct se_device *, int); +int se_dev_set_emulate_3pc(struct se_device *, int); +int se_dev_set_pi_prot_type(struct se_device *, int); +int se_dev_set_pi_prot_format(struct se_device *, int); int se_dev_set_enforce_pr_isids(struct se_device *, int); int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); @@ -45,7 +49,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3 int core_dev_del_lun(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, - u32, char *, int *); + struct se_node_acl *, u32, int *); int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, struct se_lun_acl *, u32, u32); int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, @@ -73,13 +77,11 @@ extern struct se_device *g_lun0_dev; struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, const char *); -struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, - unsigned char *); void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); -struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); -int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, - u32, void *); +struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); +int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, + u32, struct se_device *); struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); @@ -100,7 +102,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); -int transport_clear_lun_from_sessions(struct se_lun *); +int transport_clear_lun_ref(struct se_lun *); void transport_send_task_abort(struct se_cmd *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8e0290b38e4..df357862286 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -4,7 +4,7 @@ * This file contains SPC-3 compliant persistent reservations and * legacy SPC-2 reservations with compatible reservation handling (CRH=1) * - * (c) Copyright 2009-2012 RisingTide Systems LLC. + * (c) Copyright 2009-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> +#include <linux/file.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <asm/unaligned.h> @@ -52,18 +53,28 @@ struct pr_transport_id_holder { struct list_head dest_list; }; -int core_pr_dump_initiator_port( +void core_pr_dump_initiator_port( struct t10_pr_registration *pr_reg, char *buf, u32 size) { if (!pr_reg->isid_present_at_reg) - return 0; + buf[0] = '\0'; - snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); - return 1; + snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); } +enum register_type { + REGISTER, + REGISTER_AND_IGNORE_EXISTING_KEY, + REGISTER_AND_MOVE, +}; + +enum preempt_type { + PREEMPT, + PREEMPT_AND_ABORT, +}; + static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, struct t10_pr_registration *, int); @@ -463,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder( * statement. */ if (!ret && !other_cdb) { - pr_debug("Allowing explict CDB: 0x%02x for %s" + pr_debug("Allowing explicit CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -496,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( */ if (!registered_nexus) { - pr_debug("Allowing implict CDB: 0x%02x" + pr_debug("Allowing implicit CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -511,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder( * allow commands from registered nexuses. */ - pr_debug("Allowing implict CDB: 0x%02x for %s" + pr_debug("Allowing implicit CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -595,14 +606,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( return NULL; } - pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len, - GFP_ATOMIC); - if (!pr_reg->pr_aptpl_buf) { - pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); - kmem_cache_free(t10_pr_reg_cache, pr_reg); - return NULL; - } - INIT_LIST_HEAD(&pr_reg->pr_reg_list); INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); @@ -672,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { atomic_inc(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); spin_lock_bh(&port->sep_alua_lock); @@ -680,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( alua_port_list) { /* * This pointer will be NULL for demo mode MappedLUNs - * that have not been make explict via a ConfigFS + * that have not been make explicit via a ConfigFS * MappedLUN group for the SCSI Initiator Node ACL. */ if (!deve_tmp->se_lun_acl) @@ -707,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( continue; atomic_inc(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_bh(&port->sep_alua_lock); /* * Grab a configfs group dependency that is released @@ -720,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); goto out; } /* @@ -737,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( sa_res_key, all_tg_pt, aptpl); if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } @@ -752,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&dev->se_port_lock); @@ -793,7 +796,6 @@ int core_scsi3_alloc_aptpl_registration( pr_err("Unable to allocate struct t10_pr_registration\n"); return -ENOMEM; } - pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); INIT_LIST_HEAD(&pr_reg->pr_reg_list); INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); @@ -847,11 +849,9 @@ static void core_scsi3_aptpl_reserve( struct t10_pr_registration *pr_reg) { char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); spin_lock(&dev->dev_reservation_lock); dev->dev_pr_res_holder = pr_reg; @@ -864,11 +864,11 @@ static void core_scsi3_aptpl_reserve( (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, - (prf_isid) ? &i_buf[0] : ""); + i_buf); } static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *, - struct t10_pr_registration *, int, int); + struct t10_pr_registration *, enum register_type, int); static int __core_scsi3_check_aptpl_registration( struct se_device *dev, @@ -961,21 +961,19 @@ static void __core_scsi3_dump_registration( struct se_device *dev, struct se_node_acl *nacl, struct t10_pr_registration *pr_reg, - int register_type) + enum register_type register_type) { struct se_portal_group *se_tpg = nacl->se_tpg; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" - " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? - "_AND_MOVE" : (register_type == 1) ? + " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ? + "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, - (prf_isid) ? i_buf : ""); + i_buf); pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); @@ -997,7 +995,7 @@ static void __core_scsi3_add_registration( struct se_device *dev, struct se_node_acl *nacl, struct t10_pr_registration *pr_reg, - int register_type, + enum register_type register_type, int register_move) { struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; @@ -1063,7 +1061,7 @@ static int core_scsi3_alloc_registration( u64 sa_res_key, int all_tg_pt, int aptpl, - int register_type, + enum register_type register_type, int register_move) { struct t10_pr_registration *pr_reg; @@ -1112,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; } atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1127,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1157,10 +1155,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) { atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } -static int core_scsi3_check_implict_release( +static int core_scsi3_check_implicit_release( struct se_device *dev, struct t10_pr_registration *pr_reg) { @@ -1176,7 +1174,7 @@ static int core_scsi3_check_implict_release( } if (pr_res_holder == pr_reg) { /* - * Perform an implict RELEASE if the registration that + * Perform an implicit RELEASE if the registration that * is being released is holding the reservation. * * From spc4r17, section 5.7.11.1: @@ -1194,7 +1192,7 @@ static int core_scsi3_check_implict_release( * For 'All Registrants' reservation types, all existing * registrations are still processed as reservation holders * in core_scsi3_pr_seq_non_holder() after the initial - * reservation holder is implictly released here. + * reservation holder is implicitly released here. */ } else if (pr_reg->pr_reg_all_tg_pt && (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, @@ -1224,11 +1222,9 @@ static void __core_scsi3_free_registration( pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; struct t10_reservation *pr_tmpl = &dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); pr_reg->pr_reg_deve->def_pr_registered = 0; pr_reg->pr_reg_deve->pr_res_key = 0; @@ -1256,7 +1252,7 @@ static void __core_scsi3_free_registration( pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" " Node: %s%s\n", tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, - (prf_isid) ? &i_buf[0] : ""); + i_buf); pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", @@ -1268,7 +1264,6 @@ static void __core_scsi3_free_registration( if (!preempt_and_abort_list) { pr_reg->pr_reg_deve = NULL; pr_reg->pr_reg_nacl = NULL; - kfree(pr_reg->pr_aptpl_buf); kmem_cache_free(t10_pr_reg_cache, pr_reg); return; } @@ -1337,7 +1332,6 @@ void core_scsi3_free_all_registrations( list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, pr_reg_aptpl_list) { list_del(&pr_reg->pr_reg_aptpl_list); - kfree(pr_reg->pr_aptpl_buf); kmem_cache_free(t10_pr_reg_cache, pr_reg); } spin_unlock(&pr_tmpl->aptpl_reg_lock); @@ -1355,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1375,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) { atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } @@ -1383,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1414,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) */ if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } nacl = lun_acl->se_lun_nacl; @@ -1424,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static sense_reason_t @@ -1452,7 +1446,7 @@ core_scsi3_decode_spec_i_port( char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; - int dest_local_nexus, prf_isid; + int dest_local_nexus; u32 dest_rtpi = 0; memset(dest_iport, 0, 64); @@ -1558,14 +1552,14 @@ core_scsi3_decode_spec_i_port( continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(tmp_tpg)) { pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; } @@ -1579,7 +1573,7 @@ core_scsi3_decode_spec_i_port( tmp_tpg, i_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&tmp_tpg->acl_node_lock); @@ -1593,7 +1587,7 @@ core_scsi3_decode_spec_i_port( pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_tpg_undepend_item(tmp_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; @@ -1653,7 +1647,7 @@ core_scsi3_decode_spec_i_port( pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1763,8 +1757,7 @@ core_scsi3_decode_spec_i_port( kfree(tidh); memset(i_buf, 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(dest_pr_reg, i_buf, PR_REG_ISID_ID_LEN); __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); @@ -1772,8 +1765,7 @@ core_scsi3_decode_spec_i_port( pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" " registered Transport ID for Node: %s%s Mapped LUN:" " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), - dest_node_acl->initiatorname, (prf_isid) ? - &i_buf[0] : "", dest_se_deve->mapped_lun); + dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun); if (dest_local_nexus) continue; @@ -1812,7 +1804,6 @@ out: kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); } - kfree(dest_pr_reg->pr_aptpl_buf); kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); if (dest_local_nexus) @@ -1825,14 +1816,10 @@ out: return ret; } -/* - * Called with struct se_device->dev_reservation_lock held - */ -static int __core_scsi3_update_aptpl_buf( +static int core_scsi3_update_aptpl_buf( struct se_device *dev, unsigned char *buf, - u32 pr_aptpl_buf_len, - int clear_aptpl_metadata) + u32 pr_aptpl_buf_len) { struct se_lun *lun; struct se_portal_group *tpg; @@ -1840,20 +1827,13 @@ static int __core_scsi3_update_aptpl_buf( unsigned char tmp[512], isid_buf[32]; ssize_t len = 0; int reg_count = 0; + int ret = 0; - memset(buf, 0, pr_aptpl_buf_len); - /* - * Called to clear metadata once APTPL has been deactivated. - */ - if (clear_aptpl_metadata) { - snprintf(buf, pr_aptpl_buf_len, - "No Registrations or Reservations\n"); - return 0; - } + spin_lock(&dev->dev_reservation_lock); + spin_lock(&dev->t10_pr.registration_lock); /* * Walk the registration list.. */ - spin_lock(&dev->t10_pr.registration_lock); list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, pr_reg_list) { @@ -1899,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf( if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { pr_err("Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&dev->t10_pr.registration_lock); - return -EMSGSIZE; + ret = -EMSGSIZE; + goto out; } len += sprintf(buf+len, "%s", tmp); @@ -1917,53 +1897,34 @@ static int __core_scsi3_update_aptpl_buf( if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { pr_err("Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&dev->t10_pr.registration_lock); - return -EMSGSIZE; + ret = -EMSGSIZE; + goto out; } len += sprintf(buf+len, "%s", tmp); reg_count++; } - spin_unlock(&dev->t10_pr.registration_lock); if (!reg_count) len += sprintf(buf+len, "No Registrations or Reservations"); - return 0; -} - -static int core_scsi3_update_aptpl_buf( - struct se_device *dev, - unsigned char *buf, - u32 pr_aptpl_buf_len, - int clear_aptpl_metadata) -{ - int ret; - - spin_lock(&dev->dev_reservation_lock); - ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, - clear_aptpl_metadata); +out: + spin_unlock(&dev->t10_pr.registration_lock); spin_unlock(&dev->dev_reservation_lock); return ret; } -/* - * Called with struct se_device->aptpl_file_mutex held - */ static int __core_scsi3_write_aptpl_to_file( struct se_device *dev, - unsigned char *buf, - u32 pr_aptpl_buf_len) + unsigned char *buf) { struct t10_wwn *wwn = &dev->t10_wwn; struct file *file; - struct iovec iov[1]; - mm_segment_t old_fs; int flags = O_RDWR | O_CREAT | O_TRUNC; char path[512]; + u32 pr_aptpl_buf_len; int ret; - memset(iov, 0, sizeof(struct iovec)); memset(path, 0, 512); if (strlen(&wwn->unit_serial[0]) >= 512) { @@ -1974,84 +1935,78 @@ static int __core_scsi3_write_aptpl_to_file( snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); file = filp_open(path, flags, 0600); - if (IS_ERR(file) || !file || !file->f_dentry) { + if (IS_ERR(file)) { pr_err("filp_open(%s) for APTPL metadata" " failed\n", path); - return IS_ERR(file) ? PTR_ERR(file) : -ENOENT; + return PTR_ERR(file); } - iov[0].iov_base = &buf[0]; - if (!pr_aptpl_buf_len) - iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ - else - iov[0].iov_len = pr_aptpl_buf_len; + pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */ - old_fs = get_fs(); - set_fs(get_ds()); - ret = vfs_writev(file, &iov[0], 1, &file->f_pos); - set_fs(old_fs); + ret = kernel_write(file, buf, pr_aptpl_buf_len, 0); - if (ret < 0) { + if (ret < 0) pr_debug("Error writing APTPL metadata file: %s\n", path); - filp_close(file, NULL); - return -EIO; - } - filp_close(file, NULL); + fput(file); - return 0; + return (ret < 0) ? -EIO : 0; } -static int -core_scsi3_update_and_write_aptpl(struct se_device *dev, unsigned char *in_buf, - u32 in_pr_aptpl_buf_len) +/* + * Clear the APTPL metadata if APTPL has been disabled, otherwise + * write out the updated metadata to struct file for this SCSI device. + */ +static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) { - unsigned char null_buf[64], *buf; - u32 pr_aptpl_buf_len; - int clear_aptpl_metadata = 0; - int ret; + unsigned char *buf; + int rc; - /* - * Can be called with a NULL pointer from PROUT service action CLEAR - */ - if (!in_buf) { - memset(null_buf, 0, 64); - buf = &null_buf[0]; - /* - * This will clear the APTPL metadata to: - * "No Registrations or Reservations" status - */ - pr_aptpl_buf_len = 64; - clear_aptpl_metadata = 1; - } else { - buf = in_buf; - pr_aptpl_buf_len = in_pr_aptpl_buf_len; + if (!aptpl) { + char *null_buf = "No Registrations or Reservations\n"; + + rc = __core_scsi3_write_aptpl_to_file(dev, null_buf); + dev->t10_pr.pr_aptpl_active = 0; + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated\n"); + + if (rc) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + return 0; } - ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, - clear_aptpl_metadata); - if (ret != 0) - return ret; + buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); + if (!buf) + return TCM_OUT_OF_RESOURCES; - /* - * __core_scsi3_write_aptpl_to_file() will call strlen() - * on the passed buf to determine pr_aptpl_buf_len. - */ - return __core_scsi3_write_aptpl_to_file(dev, buf, 0); + rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); + if (rc < 0) { + kfree(buf); + return TCM_OUT_OF_RESOURCES; + } + + rc = __core_scsi3_write_aptpl_to_file(dev, buf); + if (rc != 0) { + pr_err("SPC-3 PR: Could not update APTPL\n"); + kfree(buf); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + dev->t10_pr.pr_aptpl_active = 1; + kfree(buf); + pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); + return 0; } static sense_reason_t core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, - int aptpl, int all_tg_pt, int spec_i_pt, int ignore_key) + bool aptpl, bool all_tg_pt, bool spec_i_pt, enum register_type register_type) { struct se_session *se_sess = cmd->se_sess; struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; - struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; + struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &dev->t10_pr; - /* Used for APTPL metadata w/ UNREGISTER */ - unsigned char *pr_aptpl_buf = NULL; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; sense_reason_t ret = TCM_NO_SENSE; int pr_holder = 0, type; @@ -2072,8 +2027,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, /* * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 */ - pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); - if (!pr_reg_e) { + pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); + if (!pr_reg) { if (res_key) { pr_warn("SPC-3 PR: Reservation Key non-zero" " for SA REGISTER, returning CONFLICT\n"); @@ -2094,7 +2049,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, if (core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, se_deve, isid_ptr, sa_res_key, all_tg_pt, aptpl, - ignore_key, 0)) { + register_type, 0)) { pr_err("Unable to allocate" " struct t10_pr_registration\n"); return TCM_INVALID_PARAMETER_LIST; @@ -2113,98 +2068,70 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, if (ret != 0) return ret; } - /* - * Nothing left to do for the APTPL=0 case. - */ - if (!aptpl) { - pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" - " REGISTER\n"); - return 0; - } - /* - * Locate the newly allocated local I_T Nexus *pr_reg, and - * update the APTPL metadata information using its - * preallocated *pr_reg->pr_aptpl_buf. - */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, - se_sess->se_node_acl, se_sess); - if (core_scsi3_update_and_write_aptpl(cmd->se_dev, - &pr_reg->pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_tmpl->pr_aptpl_active = 1; - pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); - } - - goto out_put_pr_reg; + return core_scsi3_update_and_write_aptpl(dev, aptpl); } - /* - * Locate the existing *pr_reg via struct se_node_acl pointers - */ - pr_reg = pr_reg_e; - type = pr_reg->pr_res_type; - - if (!ignore_key) { - if (res_key != pr_reg->pr_res_key) { - pr_err("SPC-3 PR REGISTER: Received" - " res_key: 0x%016Lx does not match" - " existing SA REGISTER res_key:" - " 0x%016Lx\n", res_key, - pr_reg->pr_res_key); - ret = TCM_RESERVATION_CONFLICT; - goto out_put_pr_reg; - } + /* ok, existing registration */ + + if ((register_type == REGISTER) && (res_key != pr_reg->pr_res_key)) { + pr_err("SPC-3 PR REGISTER: Received" + " res_key: 0x%016Lx does not match" + " existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, + pr_reg->pr_res_key); + ret = TCM_RESERVATION_CONFLICT; + goto out; } if (spec_i_pt) { - pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" - " set while sa_res_key=0\n"); + pr_err("SPC-3 PR REGISTER: SPEC_I_PT" + " set on a registered nexus\n"); ret = TCM_INVALID_PARAMETER_LIST; - goto out_put_pr_reg; + goto out; } /* * An existing ALL_TG_PT=1 registration being released * must also set ALL_TG_PT=1 in the incoming PROUT. */ - if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { - pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" + if (pr_reg->pr_reg_all_tg_pt && !all_tg_pt) { + pr_err("SPC-3 PR REGISTER: ALL_TG_PT=1" " registration exists, but ALL_TG_PT=1 bit not" " present in received PROUT\n"); ret = TCM_INVALID_CDB_FIELD; - goto out_put_pr_reg; + goto out; } /* - * Allocate APTPL metadata buffer used for UNREGISTER ops + * sa_res_key=1 Change Reservation Key for registered I_T Nexus. */ - if (aptpl) { - pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, - GFP_KERNEL); - if (!pr_aptpl_buf) { - pr_err("Unable to allocate" - " pr_aptpl_buf\n"); - ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - goto out_put_pr_reg; - } - } + if (sa_res_key) { + /* + * Increment PRgeneration counter for struct se_device" + * upon a successful REGISTER, see spc4r17 section 6.3.2 + * READ_KEYS service action. + */ + pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev); + pr_reg->pr_res_key = sa_res_key; + pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" + " Key for %s to: 0x%016Lx PRgeneration:" + " 0x%08x\n", cmd->se_tfo->get_fabric_name(), + (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "", + pr_reg->pr_reg_nacl->initiatorname, + pr_reg->pr_res_key, pr_reg->pr_res_generation); - /* - * sa_res_key=0 Unregister Reservation Key for registered I_T - * Nexus sa_res_key=1 Change Reservation Key for registered I_T - * Nexus. - */ - if (!sa_res_key) { - pr_holder = core_scsi3_check_implict_release( + } else { + /* + * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. + */ + pr_holder = core_scsi3_check_implicit_release( cmd->se_dev, pr_reg); if (pr_holder < 0) { - kfree(pr_aptpl_buf); ret = TCM_RESERVATION_CONFLICT; - goto out_put_pr_reg; + goto out; } + type = pr_reg->pr_res_type; spin_lock(&pr_tmpl->registration_lock); /* @@ -2235,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * Release the calling I_T Nexus registration now.. */ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); + pr_reg = NULL; /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2248,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * RESERVATIONS RELEASED. */ if (pr_holder && - (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || - type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { + (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || + type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, pr_reg_list) { @@ -2261,61 +2189,15 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ASCQ_2AH_RESERVATIONS_RELEASED); } } - spin_unlock(&pr_tmpl->registration_lock); - - if (!aptpl) { - pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(dev, NULL, 0); - pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" - " for UNREGISTER\n"); - return 0; - } - - if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_tmpl->pr_aptpl_active = 1; - pr_debug("SPC-3 PR: Set APTPL Bit Activated" - " for UNREGISTER\n"); - } - - goto out_free_aptpl_buf; - } - - /* - * Increment PRgeneration counter for struct se_device" - * upon a successful REGISTER, see spc4r17 section 6.3.2 - * READ_KEYS service action. - */ - pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev); - pr_reg->pr_res_key = sa_res_key; - pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" - " Key for %s to: 0x%016Lx PRgeneration:" - " 0x%08x\n", cmd->se_tfo->get_fabric_name(), - (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", - pr_reg->pr_reg_nacl->initiatorname, - pr_reg->pr_res_key, pr_reg->pr_res_generation); - if (!aptpl) { - pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(dev, NULL, 0); - pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" - " for REGISTER\n"); - ret = 0; - goto out_put_pr_reg; + spin_unlock(&pr_tmpl->registration_lock); } - if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_tmpl->pr_aptpl_active = 1; - pr_debug("SPC-3 PR: Set APTPL Bit Activated" - " for REGISTER\n"); - } + ret = core_scsi3_update_and_write_aptpl(dev, aptpl); -out_free_aptpl_buf: - kfree(pr_aptpl_buf); - ret = 0; -out_put_pr_reg: - core_scsi3_put_pr_reg(pr_reg); +out: + if (pr_reg) + core_scsi3_put_pr_reg(pr_reg); return ret; } @@ -2351,7 +2233,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) struct t10_reservation *pr_tmpl = &dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); @@ -2477,8 +2358,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) pr_reg->pr_res_type = type; pr_reg->pr_res_holder = 1; dev->dev_pr_res_holder = pr_reg; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", @@ -2487,17 +2367,11 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - (prf_isid) ? &i_buf[0] : ""); + i_buf); spin_unlock(&dev->dev_reservation_lock); - if (pr_tmpl->pr_aptpl_active) { - if (!core_scsi3_update_and_write_aptpl(cmd->se_dev, - &pr_reg->pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_debug("SPC-3 PR: Updated APTPL metadata" - " for RESERVE\n"); - } - } + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); ret = 0; out_put_pr_reg: @@ -2531,15 +2405,13 @@ static void __core_scsi3_complete_pro_release( struct se_device *dev, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, - int explict) + int explicit) { struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* * Go ahead and release the current PR reservation holder. */ @@ -2547,12 +2419,12 @@ static void __core_scsi3_complete_pro_release( pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - tfo->get_fabric_name(), (explict) ? "explict" : "implict", + tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", tfo->get_fabric_name(), se_nacl->initiatorname, - (prf_isid) ? &i_buf[0] : ""); + i_buf); /* * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE */ @@ -2713,12 +2585,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, spin_unlock(&pr_tmpl->registration_lock); write_aptpl: - if (pr_tmpl->pr_aptpl_active) { - if (!core_scsi3_update_and_write_aptpl(cmd->se_dev, - &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len)) { - pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); - } - } + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); + out_put_pr_reg: core_scsi3_put_pr_reg(pr_reg); return ret; @@ -2802,11 +2671,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", cmd->se_tfo->get_fabric_name()); - if (pr_tmpl->pr_aptpl_active) { - core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - pr_debug("SPC-3 PR: Updated APTPL metadata" - " for CLEAR\n"); - } + core_scsi3_update_and_write_aptpl(cmd->se_dev, false); core_scsi3_pr_generation(dev); return 0; @@ -2821,18 +2686,16 @@ static void __core_scsi3_complete_pro_preempt( struct list_head *preempt_and_abort_list, int type, int scope, - int abort) + enum preempt_type preempt_type) { struct se_node_acl *nacl = pr_reg->pr_reg_nacl; struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* - * Do an implict RELEASE of the existing reservation. + * Do an implicit RELEASE of the existing reservation. */ if (dev->dev_pr_res_holder) __core_scsi3_complete_pro_release(dev, nacl, @@ -2845,12 +2708,12 @@ static void __core_scsi3_complete_pro_preempt( pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", + tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", - tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", - nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); + tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", + nacl->initiatorname, i_buf); /* * For PREEMPT_AND_ABORT, add the preempting reservation's * struct t10_pr_registration to the list that will be compared @@ -2880,14 +2743,13 @@ static void core_scsi3_release_preempt_and_abort( pr_reg->pr_reg_deve = NULL; pr_reg->pr_reg_nacl = NULL; - kfree(pr_reg->pr_aptpl_buf); kmem_cache_free(t10_pr_reg_cache, pr_reg); } } static sense_reason_t core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, - u64 sa_res_key, int abort) + u64 sa_res_key, enum preempt_type preempt_type) { struct se_device *dev = cmd->se_dev; struct se_node_acl *pr_reg_nacl; @@ -2907,7 +2769,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if (!pr_reg_n) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for PREEMPT%s\n", - (abort) ? "_AND_ABORT" : ""); + (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : ""); return TCM_RESERVATION_CONFLICT; } if (pr_reg_n->pr_res_key != res_key) { @@ -2976,7 +2838,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, - (abort) ? &preempt_and_abort_list : + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, calling_it_nexus); released_regs++; } else { @@ -2986,7 +2848,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * 5.7.11.4 Preempting, Table 52 and Figure 7. * * For a ZERO SA Reservation key, release - * all other registrations and do an implict + * all other registrations and do an implicit * release of active persistent reservation. * * For a non-ZERO SA Reservation key, only @@ -3004,7 +2866,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, - (abort) ? &preempt_and_abort_list : + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, 0); released_regs++; } @@ -3033,24 +2895,17 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, */ if (pr_res_holder && all_reg && !(sa_res_key)) { __core_scsi3_complete_pro_preempt(dev, pr_reg_n, - (abort) ? &preempt_and_abort_list : NULL, - type, scope, abort); + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, + type, scope, preempt_type); - if (abort) + if (preempt_type == PREEMPT_AND_ABORT) core_scsi3_release_preempt_and_abort( &preempt_and_abort_list, pr_reg_n); } spin_unlock(&dev->dev_reservation_lock); - if (pr_tmpl->pr_aptpl_active) { - if (!core_scsi3_update_and_write_aptpl(cmd->se_dev, - &pr_reg_n->pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_debug("SPC-3 PR: Updated APTPL" - " metadata for PREEMPT%s\n", (abort) ? - "_AND_ABORT" : ""); - } - } + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); core_scsi3_put_pr_reg(pr_reg_n); core_scsi3_pr_generation(cmd->se_dev); @@ -3114,7 +2969,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, - (abort) ? &preempt_and_abort_list : NULL, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, calling_it_nexus); /* * e) Establish a unit attention condition for the initiator @@ -3131,8 +2986,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * I_T nexus using the contents of the SCOPE and TYPE fields; */ __core_scsi3_complete_pro_preempt(dev, pr_reg_n, - (abort) ? &preempt_and_abort_list : NULL, - type, scope, abort); + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, + type, scope, preempt_type); /* * d) Process tasks as defined in 5.7.1; * e) See above.. @@ -3172,20 +3027,14 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * been removed from the primary pr_reg list), except the * new persistent reservation holder, the calling Initiator Port. */ - if (abort) { + if (preempt_type == PREEMPT_AND_ABORT) { core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd); core_scsi3_release_preempt_and_abort(&preempt_and_abort_list, pr_reg_n); } - if (pr_tmpl->pr_aptpl_active) { - if (!core_scsi3_update_and_write_aptpl(cmd->se_dev, - &pr_reg_n->pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" - "%s\n", abort ? "_AND_ABORT" : ""); - } - } + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); core_scsi3_put_pr_reg(pr_reg_n); core_scsi3_pr_generation(cmd->se_dev); @@ -3194,7 +3043,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, static sense_reason_t core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, - u64 res_key, u64 sa_res_key, int abort) + u64 res_key, u64 sa_res_key, enum preempt_type preempt_type) { switch (type) { case PR_TYPE_WRITE_EXCLUSIVE: @@ -3204,10 +3053,10 @@ core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: return core_scsi3_pro_preempt(cmd, type, scope, res_key, - sa_res_key, abort); + sa_res_key, preempt_type); default: pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" - " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); + " Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type); return TCM_INVALID_CDB_FIELD; } } @@ -3231,7 +3080,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; - int new_reg = 0, type, scope, matching_iname, prf_isid; + int new_reg = 0, type, scope, matching_iname; sense_reason_t ret; unsigned short rtpi; unsigned char proto_ident; @@ -3319,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(dest_se_tpg)) { pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_put_pr_reg; } @@ -3424,7 +3273,7 @@ after_iport_check: initiator_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&dest_se_tpg->acl_node_lock); @@ -3440,7 +3289,7 @@ after_iport_check: pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_node_acl = NULL; ret = TCM_INVALID_PARAMETER_LIST; goto out; @@ -3465,7 +3314,7 @@ after_iport_check: if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_se_deve = NULL; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; @@ -3575,8 +3424,7 @@ after_iport_check: dest_pr_reg->pr_res_holder = 1; dest_pr_reg->pr_res_type = type; pr_reg->pr_res_scope = scope; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* * Increment PRGeneration for existing registrations.. */ @@ -3592,7 +3440,7 @@ after_iport_check: pr_debug("SPC-3 PR Successfully moved reservation from" " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, - (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), + i_buf, dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, (iport_ptr != NULL) ? iport_ptr : ""); /* @@ -3613,24 +3461,7 @@ after_iport_check: } else core_scsi3_put_pr_reg(pr_reg); - /* - * Clear the APTPL metadata if APTPL has been disabled, otherwise - * write out the updated metadata to struct file for this SCSI device. - */ - if (!aptpl) { - pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" - " REGISTER_AND_MOVE\n"); - } else { - pr_tmpl->pr_aptpl_active = 1; - if (!core_scsi3_update_and_write_aptpl(cmd->se_dev, - &dest_pr_reg->pr_aptpl_buf[0], - pr_tmpl->pr_aptpl_buf_len)) { - pr_debug("SPC-3 PR: Set APTPL Bit Activated for" - " REGISTER_AND_MOVE\n"); - } - } + core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl); transport_kunmap_data_sg(cmd); @@ -3763,7 +3594,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) switch (sa) { case PRO_REGISTER: ret = core_scsi3_emulate_pro_register(cmd, - res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); + res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER); break; case PRO_RESERVE: ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key); @@ -3776,15 +3607,15 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) break; case PRO_PREEMPT: ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, - res_key, sa_res_key, 0); + res_key, sa_res_key, PREEMPT); break; case PRO_PREEMPT_AND_ABORT: ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, - res_key, sa_res_key, 1); + res_key, sa_res_key, PREEMPT_AND_ABORT); break; case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: ret = core_scsi3_emulate_pro_register(cmd, - 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); + 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER_AND_IGNORE_EXISTING_KEY); break; case PRO_REGISTER_AND_MOVE: ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key, @@ -4049,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) add_desc_len = 0; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); /* * Determine expected length of $FABRIC_MOD specific @@ -4063,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } /* @@ -4125,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index b4a004247ab..2ee2936fa0b 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -43,9 +43,14 @@ #define PR_APTPL_MAX_IPORT_LEN 256 #define PR_APTPL_MAX_TPORT_LEN 256 +/* + * Function defined in target_core_spc.c + */ +void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); + extern struct kmem_cache *t10_pr_reg_cache; -extern int core_pr_dump_initiator_port(struct t10_pr_registration *, +extern void core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *); extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2bcfd79cf59..94d00df28f3 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -3,7 +3,7 @@ * * This file contains the generic target mode <-> Linux SCSI subsystem plugin. * - * (c) Copyright 2003-2012 RisingTide Systems LLC. + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) * pSCSI Host ID and enable for phba mode */ sh = scsi_host_lookup(phv->phv_host_id); - if (IS_ERR(sh)) { + if (!sh) { pr_err("pSCSI: Unable to locate SCSI Host for" " phv_host_id: %d\n", phv->phv_host_id); - return PTR_ERR(sh); + return -EINVAL; } phv->phv_lld_host = sh; @@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev) sh = phv->phv_lld_host; } else { sh = scsi_host_lookup(pdv->pdv_host_id); - if (IS_ERR(sh)) { + if (!sh) { pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return PTR_ERR(sh); + return -EINVAL; } } } else { @@ -840,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error) bio_put(bio); } -static inline struct bio *pscsi_get_bio(int sg_num) +static inline struct bio *pscsi_get_bio(int nr_vecs) { struct bio *bio; /* * Use bio_malloc() following the comment in for bio -> struct request * in block/blk-core.c:blk_make_request() */ - bio = bio_kmalloc(GFP_KERNEL, sg_num); + bio = bio_kmalloc(GFP_KERNEL, nr_vecs); if (!bio) { pr_err("PSCSI: bio_kmalloc() failed\n"); return NULL; @@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, page, len, off); - while (len > 0 && data_len > 0) { + /* + * We only have one page of data in each sg element, + * we can not cross a page boundary. + */ + if (off + len > PAGE_SIZE) + goto fail; + + if (len > 0 && data_len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); @@ -940,10 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bio = NULL; } - page++; - len -= bytes; data_len -= bytes; - off = 0; } } @@ -952,7 +956,6 @@ fail: while (*hbio) { bio = *hbio; *hbio = (*hbio)->bi_next; - bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1047,12 +1050,13 @@ pscsi_execute_cmd(struct se_cmd *cmd) req = blk_get_request(pdv->pdv_sd->request_queue, (data_direction == DMA_TO_DEVICE), GFP_KERNEL); - if (!req || IS_ERR(req)) { - pr_err("PSCSI: blk_get_request() failed: %ld\n", - req ? IS_ERR(req) : -ENOMEM); + if (!req) { + pr_err("PSCSI: blk_get_request() failed\n"); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto fail; } + + blk_rq_set_block_pc(req); } else { BUG_ON(!cmd->data_length); @@ -1069,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd) } } - req->cmd_type = REQ_TYPE_BLOCK_PC; req->end_io = pscsi_req_done; req->end_io_data = cmd; req->cmd_len = scsi_command_size(pt->pscsi_cdb); @@ -1092,7 +1095,6 @@ fail_free_bio: while (hbio) { struct bio *bio = hbio; hbio = hbio->bi_next; - bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1178,7 +1180,7 @@ static int __init pscsi_module_init(void) return transport_subsystem_register(&pscsi_template); } -static void pscsi_module_exit(void) +static void __exit pscsi_module_exit(void) { transport_subsystem_release(&pscsi_template); } diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 0457de362e6..b920db3388c 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -4,7 +4,7 @@ * This file contains the Storage Engine <-> Ramdisk transport * specific functions. * - * (c) Copyright 2003-2012 RisingTide Systems LLC. + * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -27,7 +27,6 @@ #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> -#include <linux/blkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> @@ -79,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba) hba->hba_ptr = NULL; } -/* rd_release_device_space(): - * - * - */ -static void rd_release_device_space(struct rd_dev *rd_dev) +static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 sg_table_count) { - u32 i, j, page_count = 0, sg_per_table; - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; + u32 i, j, page_count = 0, sg_per_table; - if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) - return; - - sg_table = rd_dev->sg_table_array; - - for (i = 0; i < rd_dev->sg_table_count; i++) { + for (i = 0; i < sg_table_count; i++) { sg = sg_table[i].sg_table; sg_per_table = sg_table[i].rd_sg_count; @@ -106,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev) page_count++; } } - kfree(sg); } + kfree(sg_table); + return page_count; +} + +static void rd_release_device_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, + rd_dev->sg_table_count); + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); - kfree(sg_table); rd_dev->sg_table_array = NULL; rd_dev->sg_table_count = 0; } @@ -125,33 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev) * * */ -static int rd_build_device_space(struct rd_dev *rd_dev) +static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 total_sg_needed, unsigned char init_payload) { - u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; + u32 i = 0, j, page_offset = 0, sg_per_table; u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / sizeof(struct scatterlist)); - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; - - if (rd_dev->rd_page_count <= 0) { - pr_err("Illegal page count: %u for Ramdisk device\n", - rd_dev->rd_page_count); - return -EINVAL; - } - total_sg_needed = rd_dev->rd_page_count; - - sg_tables = (total_sg_needed / max_sg_per_table) + 1; - - sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); - if (!sg_table) { - pr_err("Unable to allocate memory for Ramdisk" - " scatterlist tables\n"); - return -ENOMEM; - } - - rd_dev->sg_table_array = sg_table; - rd_dev->sg_table_count = sg_tables; + unsigned char *p; while (total_sg_needed) { sg_per_table = (total_sg_needed > max_sg_per_table) ? @@ -182,16 +166,119 @@ static int rd_build_device_space(struct rd_dev *rd_dev) } sg_assign_page(&sg[j], pg); sg[j].length = PAGE_SIZE; + + p = kmap(pg); + memset(p, init_payload, PAGE_SIZE); + kunmap(pg); } page_offset += sg_per_table; total_sg_needed -= sg_per_table; } + return 0; +} + +static int rd_build_device_space(struct rd_dev *rd_dev) +{ + struct rd_dev_sg_table *sg_table; + u32 sg_tables, total_sg_needed; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_page_count <= 0) { + pr_err("Illegal page count: %u for Ramdisk device\n", + rd_dev->rd_page_count); + return -EINVAL; + } + + /* Don't need backing pages for NULLIO */ + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + + total_sg_needed = rd_dev->rd_page_count; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_table_array = sg_table; + rd_dev->sg_table_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); + if (rc) + return rc; + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" - " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, - rd_dev->rd_dev_id, rd_dev->rd_page_count, - rd_dev->sg_table_count); + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count); + + return 0; +} + +static void rd_release_prot_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, + rd_dev->sg_prot_count); + + pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + rd_dev->sg_prot_array = NULL; + rd_dev->sg_prot_count = 0; +} + +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) +{ + struct rd_dev_sg_table *sg_table; + u32 total_sg_needed, sg_tables; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + /* + * prot_length=8byte dif data + * tot sg needed = rd_page_count * (PGSZ/block_size) * + * (prot_length/block_size) + pad + * PGSZ canceled each other. + */ + total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk protection" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_prot_array = sg_table; + rd_dev->sg_prot_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); + if (rc) + return rc; + + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); return 0; } @@ -256,10 +343,12 @@ static void rd_free_device(struct se_device *dev) static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) { - u32 i; struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); - for (i = 0; i < rd_dev->sg_table_count; i++) { + i = page / sg_per_table; + if (i < rd_dev->sg_table_count) { sg_table = &rd_dev->sg_table_array[i]; if ((sg_table->page_start_offset <= page) && (sg_table->page_end_offset >= page)) @@ -272,12 +361,30 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } +static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) +{ + struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + + i = page / sg_per_table; + if (i < rd_dev->sg_prot_count) { + sg_table = &rd_dev->sg_prot_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + static sense_reason_t -rd_execute_rw(struct se_cmd *cmd) +rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = RD_DEV(se_dev); struct rd_dev_sg_table *table; @@ -288,6 +395,12 @@ rd_execute_rw(struct se_cmd *cmd) u32 rd_page; u32 src_len; u64 tmp; + sense_reason_t rc; + + if (dev->rd_flags & RDF_NULLIO) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); @@ -305,6 +418,28 @@ rd_execute_rw(struct se_cmd *cmd) data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); + if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? @@ -314,7 +449,19 @@ rd_execute_rw(struct se_cmd *cmd) void *rd_addr; sg_miter_next(&m); + if (!(u32)m.length) { + pr_debug("RD[%u]: invalid sgl %p len %zu\n", + dev->rd_dev_id, m.addr, m.length); + sg_miter_stop(&m); + return TCM_INCORRECT_AMOUNT_OF_DATA; + } len = min((u32)m.length, src_len); + if (len > rd_size) { + pr_debug("RD[%u]: size underrun page %d offset %d " + "size %d\n", dev->rd_dev_id, + rd_page, rd_offset, rd_size); + len = rd_size; + } m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; @@ -354,16 +501,39 @@ rd_execute_rw(struct se_cmd *cmd) } sg_miter_stop(&m); + if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } enum { - Opt_rd_pages, Opt_err + Opt_rd_pages, Opt_rd_nullio, Opt_err }; static match_table_t tokens = { {Opt_rd_pages, "rd_pages=%d"}, + {Opt_rd_nullio, "rd_nullio=%d"}, {Opt_err, NULL} }; @@ -394,6 +564,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; + case Opt_rd_nullio: + match_int(args, &arg); + if (arg != 1) + break; + + pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); + rd_dev->rd_flags |= RDF_NULLIO; + break; default: break; } @@ -410,8 +588,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" - " SG_table_count: %u\n", rd_dev->rd_page_count, - PAGE_SIZE, rd_dev->sg_table_count); + " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count, + !!(rd_dev->rd_flags & RDF_NULLIO)); return bl; } @@ -425,6 +604,24 @@ static sector_t rd_get_blocks(struct se_device *dev) return blocks_long; } +static int rd_init_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + if (!dev->dev_attrib.pi_prot_type) + return 0; + + return rd_build_prot_space(rd_dev, dev->prot_length, + dev->dev_attrib.block_size); +} + +static void rd_free_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + rd_release_prot_space(rd_dev); +} + static struct sbc_ops rd_sbc_ops = { .execute_rw = rd_execute_rw, }; @@ -450,6 +647,8 @@ static struct se_subsystem_api rd_mcp_template = { .show_configfs_dev_params = rd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = rd_get_blocks, + .init_prot = rd_init_prot, + .free_prot = rd_free_prot, }; int __init rd_module_init(void) diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 933b38b6e56..cc46a6a89b3 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,6 +22,7 @@ struct rd_dev_sg_table { } ____cacheline_aligned; #define RDF_HAS_PAGE_COUNT 0x01 +#define RDF_NULLIO 0x02 struct rd_dev { struct se_device dev; @@ -32,8 +33,12 @@ struct rd_dev { u32 rd_page_count; /* Number of SG tables in sg_table_array */ u32 sg_table_count; + /* Number of SG tables in sg_prot_array */ + u32 sg_prot_count; /* Array of rd_dev_sg_table_t containing scatterlists */ struct rd_dev_sg_table *sg_table_array; + /* Array of rd_dev_sg_table containing protection scatterlists */ + struct rd_dev_sg_table *sg_prot_array; /* Ramdisk HBA device is connected to */ struct rd_host *rd_host; } ____cacheline_aligned; diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 26a6d183ccb..bd78d9235ac 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1,7 +1,7 @@ /* * SCSI Block Commands (SBC) parsing and emulation. * - * (c) Copyright 2002-2012 RisingTide Systems LLC. + * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -23,8 +23,10 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ratelimit.h> +#include <linux/crc-t10dif.h> #include <asm/unaligned.h> #include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> @@ -32,17 +34,33 @@ #include "target_core_internal.h" #include "target_core_ua.h" - +#include "target_core_alua.h" static sense_reason_t sbc_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + unsigned char *cdb = cmd->t_task_cdb; unsigned long long blocks_long = dev->transport->get_blocks(dev); unsigned char *rbuf; unsigned char buf[8]; u32 blocks; + /* + * SBC-2 says: + * If the PMI bit is set to zero and the LOGICAL BLOCK + * ADDRESS field is not set to zero, the device server shall + * terminate the command with CHECK CONDITION status with + * the sense key set to ILLEGAL REQUEST and the additional + * sense code set to INVALID FIELD IN CDB. + * + * In SBC-3, these fields are obsolete, but some SCSI + * compliance tests actually check this, so we might as well + * follow SBC-2. + */ + if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) + return TCM_INVALID_CDB_FIELD; + if (blocks_long >= 0x00000000ffffffff) blocks = 0xffffffff; else @@ -58,13 +76,12 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) buf[7] = dev->dev_attrib.block_size & 0xff; rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8); return 0; } @@ -72,6 +89,7 @@ static sense_reason_t sbc_emulate_readcapacity_16(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; unsigned char *rbuf; unsigned char buf[32]; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -90,24 +108,40 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff; /* + * Set P_TYPE and PROT_EN bits for DIF support + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; + } + + if (dev->transport->get_lbppbe) + buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + + if (dev->transport->get_alignment_offset_lbas) { + u16 lalba = dev->transport->get_alignment_offset_lbas(dev); + buf[14] = (lalba >> 8) & 0x3f; + buf[15] = lalba & 0xff; + } + + /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) - buf[14] = 0x80; + buf[14] |= 0x80; rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 32); return 0; } -sector_t spc_get_write_same_sectors(struct se_cmd *cmd) +sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) { u32 num_blocks; @@ -128,7 +162,7 @@ sector_t spc_get_write_same_sectors(struct se_cmd *cmd) return cmd->se_dev->transport->get_blocks(cmd->se_dev) - cmd->t_task_lba + 1; } -EXPORT_SYMBOL(spc_get_write_same_sectors); +EXPORT_SYMBOL(sbc_get_write_same_sectors); static sense_reason_t sbc_emulate_noop(struct se_cmd *cmd) @@ -142,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) return cmd->se_dev->dev_attrib.block_size * sectors; } -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned long long end_lba; - u32 sectors; - - sectors = cmd->data_length / dev->dev_attrib.block_size; - end_lba = dev->transport->get_blocks(dev) + 1; - - if (cmd->t_task_lba + sectors > end_lba) { - pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", - cmd->t_task_lba, sectors, end_lba); - return -EINVAL; - } - - return 0; -} - static inline u32 transport_get_sectors_6(unsigned char *cdb) { /* @@ -235,7 +251,7 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) static sense_reason_t sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) { - unsigned int sectors = spc_get_write_same_sectors(cmd); + unsigned int sectors = sbc_get_write_same_sectors(cmd); if ((flags[0] & 0x04) || (flags[0] & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" @@ -248,6 +264,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o sectors, cmd->se_dev->dev_attrib.max_write_same_len); return TCM_INVALID_CDB_FIELD; } + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ + if (flags[0] & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); + return TCM_INVALID_CDB_FIELD; + } /* * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. @@ -266,13 +287,13 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o return 0; } -static void xdreadwrite_callback(struct se_cmd *cmd) +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; struct scatterlist *sg; unsigned int offset; - int i; - int count; + sense_reason_t ret = TCM_NO_SENSE; + int i, count; /* * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command * @@ -287,7 +308,7 @@ static void xdreadwrite_callback(struct se_cmd *cmd) buf = kmalloc(cmd->data_length, GFP_KERNEL); if (!buf) { pr_err("Unable to allocate xor_callback buf\n"); - return; + return TCM_OUT_OF_RESOURCES; } /* * Copy the scatterlist WRITE buffer located at cmd->t_data_sg @@ -306,8 +327,10 @@ static void xdreadwrite_callback(struct se_cmd *cmd) offset = 0; for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { addr = kmap_atomic(sg_page(sg)); - if (!addr) + if (!addr) { + ret = TCM_OUT_OF_RESOURCES; goto out; + } for (i = 0; i < sg->length; i++) *(addr + sg->offset + i) ^= *(buf + offset + i); @@ -318,6 +341,328 @@ static void xdreadwrite_callback(struct se_cmd *cmd) out: kfree(buf); + return ret; +} + +static sense_reason_t +sbc_execute_rw(struct se_cmd *cmd) +{ + return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, + cmd->data_direction); +} + +static sense_reason_t compare_and_write_post(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + /* + * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through + * within target_complete_ok_work() if the command was successfully + * sent to the backend driver. + */ + spin_lock_irq(&cmd->t_state_lock); + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; + spin_unlock_irq(&cmd->t_state_lock); + + /* + * Unlock ->caw_sem originally obtained during sbc_compare_and_write() + * before the original READ I/O submission. + */ + up(&dev->caw_sem); + + return TCM_NO_SENSE; +} + +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *write_sg = NULL, *sg; + unsigned char *buf = NULL, *addr; + struct sg_mapping_iter m; + unsigned int offset = 0, len; + unsigned int nlbas = cmd->t_task_nolb; + unsigned int block_size = dev->dev_attrib.block_size; + unsigned int compare_len = (nlbas * block_size); + sense_reason_t ret = TCM_NO_SENSE; + int rc, i; + + /* + * Handle early failure in transport_generic_request_failure(), + * which will not have taken ->caw_mutex yet.. + */ + if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) + return TCM_NO_SENSE; + /* + * Immediately exit + release dev->caw_sem if command has already + * been failed with a non-zero SCSI status. + */ + if (cmd->scsi_status) { + pr_err("compare_and_write_callback: non zero scsi_status:" + " 0x%02x\n", cmd->scsi_status); + goto out; + } + + buf = kzalloc(cmd->data_length, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate compare_and_write buf\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + + write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + GFP_KERNEL); + if (!write_sg) { + pr_err("Unable to allocate compare_and_write sg\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + sg_init_table(write_sg, cmd->t_data_nents); + /* + * Setup verify and write data payloads from total NumberLBAs. + */ + rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, + cmd->data_length); + if (!rc) { + pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + /* + * Compare against SCSI READ payload against verify payload + */ + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { + addr = (unsigned char *)kmap_atomic(sg_page(sg)); + if (!addr) { + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + + len = min(sg->length, compare_len); + + if (memcmp(addr, buf + offset, len)) { + pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", + addr, buf + offset); + kunmap_atomic(addr); + goto miscompare; + } + kunmap_atomic(addr); + + offset += len; + compare_len -= len; + if (!compare_len) + break; + } + + i = 0; + len = cmd->t_task_nolb * block_size; + sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); + /* + * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. + */ + while (len) { + sg_miter_next(&m); + + if (block_size < PAGE_SIZE) { + sg_set_page(&write_sg[i], m.page, block_size, + block_size); + } else { + sg_miter_next(&m); + sg_set_page(&write_sg[i], m.page, block_size, + 0); + } + len -= block_size; + i++; + } + sg_miter_stop(&m); + /* + * Save the original SGL + nents values before updating to new + * assignments, to be released in transport_free_pages() -> + * transport_reset_sgl_orig() + */ + cmd->t_data_sg_orig = cmd->t_data_sg; + cmd->t_data_sg = write_sg; + cmd->t_data_nents_orig = cmd->t_data_nents; + cmd->t_data_nents = 1; + + cmd->sam_task_attr = MSG_HEAD_TAG; + cmd->transport_complete_callback = compare_and_write_post; + /* + * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler + * for submitting the adjusted SGL to write instance user-data. + */ + cmd->execute_cmd = sbc_execute_rw; + + spin_lock_irq(&cmd->t_state_lock); + cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + __target_execute_cmd(cmd); + + kfree(buf); + return ret; + +miscompare: + pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", + dev->transport->name); + ret = TCM_MISCOMPARE_VERIFY; +out: + /* + * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in + * sbc_compare_and_write() before the original READ I/O submission. + */ + up(&dev->caw_sem); + kfree(write_sg); + kfree(buf); + return ret; +} + +static sense_reason_t +sbc_compare_and_write(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + sense_reason_t ret; + int rc; + /* + * Submit the READ first for COMPARE_AND_WRITE to perform the + * comparision using SGLs at cmd->t_bidi_data_sg.. + */ + rc = down_interruptible(&dev->caw_sem); + if ((rc != 0) || signal_pending(current)) { + cmd->transport_complete_callback = NULL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * Reset cmd->data_length to individual block_size in order to not + * confuse backend drivers that depend on this value matching the + * size of the I/O being submitted. + */ + cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; + + ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, + DMA_FROM_DEVICE); + if (ret) { + cmd->transport_complete_callback = NULL; + up(&dev->caw_sem); + return ret; + } + /* + * Unlock of dev->caw_sem to occur in compare_and_write_callback() + * upon MISCOMPARE, or in compare_and_write_done() upon completion + * of WRITE instance user-data. + */ + return TCM_NO_SENSE; +} + +static int +sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, + bool is_write, struct se_cmd *cmd) +{ + if (is_write) { + cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : + TARGET_PROT_DOUT_INSERT; + switch (protect) { + case 0x0: + case 0x3: + cmd->prot_checks = 0; + break; + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } else { + cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : + TARGET_PROT_DIN_STRIP; + switch (protect) { + case 0x0: + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x3: + cmd->prot_checks = 0; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } + + return 0; +} + +static bool +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, + u32 sectors, bool is_write) +{ + u8 protect = cdb[1] >> 5; + + if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) + return true; + + switch (dev->dev_attrib.pi_prot_type) { + case TARGET_DIF_TYPE3_PROT: + cmd->reftag_seed = 0xffffffff; + break; + case TARGET_DIF_TYPE2_PROT: + if (protect) + return false; + + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE1_PROT: + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE0_PROT: + default: + return true; + } + + if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, + is_write, cmd)) + return false; + + cmd->prot_type = dev->dev_attrib.pi_prot_type; + cmd->prot_length = dev->prot_length * sectors; + + /** + * In case protection information exists over the wire + * we modify command data length to describe pure data. + * The actual transfer length is data length + protection + * length + **/ + if (protect) + cmd->data_length = sectors * dev->dev_attrib.block_size; + + pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " + "prot_op=%d prot_checks=%d\n", + __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, + cmd->prot_op, cmd->prot_checks); + + return true; } sense_reason_t @@ -334,56 +679,88 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_6(cdb); cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case READ_10: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case READ_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case READ_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case WRITE_6: sectors = transport_get_sectors_6(cdb); cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case WRITE_10: case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case WRITE_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case WRITE_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; break; case XDWRITEREAD_10: if (cmd->data_direction != DMA_TO_DEVICE || @@ -397,7 +774,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) /* * Setup BIDI XOR callback to be run after I/O completion. */ - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; @@ -420,7 +798,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) * Setup BIDI XOR callback to be run during after I/O * completion. */ - cmd->execute_cmd = ops->execute_rw; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; @@ -447,6 +826,28 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) } break; } + case COMPARE_AND_WRITE: + sectors = cdb[13]; + /* + * Currently enforce COMPARE_AND_WRITE for a single sector + */ + if (sectors > 1) { + pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" + " than 1\n", sectors); + return TCM_INVALID_CDB_FIELD; + } + /* + * Double size because we have two buffers, note that + * zero is not an error.. + */ + size = 2 * sbc_get_size(cmd, sectors); + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); + cmd->t_task_nolb = sectors; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; + cmd->execute_rw = ops->execute_rw; + cmd->execute_cmd = sbc_compare_and_write; + cmd->transport_complete_callback = compare_and_write_callback; + break; case READ_CAPACITY: size = READ_CAP_LEN; cmd->execute_cmd = sbc_emulate_readcapacity; @@ -456,6 +857,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case SAI_READ_CAPACITY_16: cmd->execute_cmd = sbc_emulate_readcapacity_16; break; + case SAI_REPORT_REFERRALS: + cmd->execute_cmd = target_emulate_report_referrals; + break; default: pr_err("Unsupported SA: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); @@ -466,12 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) - return TCM_UNSUPPORTED_SCSI_OPCODE; - - /* - * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE - */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); @@ -479,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); } - - size = sbc_get_size(cmd, sectors); - - /* - * Check to ensure that LBA + Range does not exceed past end of - * device for IBLOCK and FILEIO ->do_sync_cache() backend calls - */ - if (cmd->t_task_lba || sectors) { - if (sbc_check_valid_sectors(cmd) < 0) - return TCM_INVALID_CDB_FIELD; + if (ops->execute_sync_cache) { + cmd->execute_cmd = ops->execute_sync_cache; + goto check_lba; } - cmd->execute_cmd = ops->execute_sync_cache; + size = 0; + cmd->execute_cmd = sbc_emulate_noop; break; case UNMAP: if (!ops->execute_unmap) @@ -533,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case VERIFY: size = 0; + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->execute_cmd = sbc_emulate_noop; - break; + goto check_lba; case REZERO_UNIT: case SEEK_6: case SEEK_10: @@ -574,16 +968,17 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) dev->dev_attrib.hw_max_sectors); return TCM_INVALID_CDB_FIELD; } - +check_lba: end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { pr_err("cmd exceeds last lba %llu " "(lba %llu, sectors %u)\n", end_lba, cmd->t_task_lba, sectors); - return TCM_INVALID_CDB_FIELD; + return TCM_ADDRESS_OUT_OF_RANGE; } - size = sbc_get_size(cmd, sectors); + if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) + size = sbc_get_size(cmd, sectors); } return target_cmd_size_check(cmd, size); @@ -595,3 +990,347 @@ u32 sbc_get_device_type(struct se_device *dev) return TYPE_DISK; } EXPORT_SYMBOL(sbc_get_device_type); + +sense_reason_t +sbc_execute_unmap(struct se_cmd *cmd, + sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, + sector_t, sector_t), + void *priv) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *buf, *ptr = NULL; + sector_t lba; + int size; + u32 range; + sense_reason_t ret = 0; + int dl, bd_dl; + + /* We never set ANC_SUP */ + if (cmd->t_task_cdb[1]) + return TCM_INVALID_CDB_FIELD; + + if (cmd->data_length == 0) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + + if (cmd->data_length < 8) { + pr_warn("UNMAP parameter list length %u too small\n", + cmd->data_length); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + dl = get_unaligned_be16(&buf[0]); + bd_dl = get_unaligned_be16(&buf[2]); + + size = cmd->data_length - 8; + if (bd_dl > size) + pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", + cmd->data_length, bd_dl); + else + size = bd_dl; + + if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { + ret = TCM_INVALID_PARAMETER_LIST; + goto err; + } + + /* First UNMAP block descriptor starts at 8 byte offset */ + ptr = &buf[8]; + pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" + " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); + + while (size >= 16) { + lba = get_unaligned_be64(&ptr[0]); + range = get_unaligned_be32(&ptr[8]); + pr_debug("UNMAP: Using lba: %llu and range: %u\n", + (unsigned long long)lba, range); + + if (range > dev->dev_attrib.max_unmap_lba_count) { + ret = TCM_INVALID_PARAMETER_LIST; + goto err; + } + + if (lba + range > dev->transport->get_blocks(dev) + 1) { + ret = TCM_ADDRESS_OUT_OF_RANGE; + goto err; + } + + ret = do_unmap_fn(cmd, priv, lba, range); + if (ret) + goto err; + + ptr += 16; + size -= 16; + } + +err: + transport_kunmap_data_sg(cmd); + if (!ret) + target_complete_cmd(cmd, GOOD); + return ret; +} +EXPORT_SYMBOL(sbc_execute_unmap); + +void +sbc_dif_generate(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = cmd->t_prot_sg; + sector_t sector = cmd->t_task_lba; + void *daddr, *paddr; + int i, j, offset = 0; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, + dev->dev_attrib.block_size)); + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) + sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); + sdt->app_tag = 0; + + pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + sector++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } +} + +static sense_reason_t +sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, + const void *p, sector_t sector, unsigned int ei_lba) +{ + int block_size = dev->dev_attrib.block_size; + __be16 csum; + + csum = cpu_to_be16(crc_t10dif(p, block_size)); + + if (sdt->guard_tag != csum) { + pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" + " csum 0x%04x\n", (unsigned long long)sector, + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); + return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" + " sector MSB: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" + " ei_lba: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), ei_lba); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + return 0; +} + +static void +sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, + struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *psg; + void *paddr, *addr; + unsigned int i, len, left; + unsigned int offset = sg_off; + + left = sectors * dev->prot_length; + + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { + unsigned int psg_len, copied = 0; + + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + psg_len = min(left, psg->length); + while (psg_len) { + len = min(psg_len, sg->length - offset); + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + + if (read) + memcpy(paddr + copied, addr, len); + else + memcpy(addr, paddr + copied, len); + + left -= len; + offset += len; + copied += len; + psg_len -= len; + + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + kunmap_atomic(addr); + } + kunmap_atomic(paddr); + } +} + +sense_reason_t +sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = cmd->t_prot_sg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = 0; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + cmd->bad_sector = sector; + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); + + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_write); + +static sense_reason_t +__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = sg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = sg_off; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + sg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + if (sdt->app_tag == cpu_to_be16(0xffff)) { + sector++; + offset += sizeof(struct se_dif_v1_tuple); + continue; + } + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + cmd->bad_sector = sector; + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + + return 0; +} + +sense_reason_t +sbc_dif_read_strip(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors = cmd->prot_length / dev->prot_length; + + return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, + cmd->t_prot_sg, 0); +} + +sense_reason_t +sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + sense_reason_t rc; + + rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); + if (rc) + return rc; + + sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_read); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 84f9e96e8ac..6cd7222738f 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -1,7 +1,7 @@ /* * SCSI Primary Commands (SPC) parsing and emulation. * - * (c) Copyright 2002-2012 RisingTide Systems LLC. + * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -35,7 +35,7 @@ #include "target_core_alua.h" #include "target_core_pr.h" #include "target_core_ua.h" - +#include "target_core_xcopy.h" static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) { @@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) buf[5] = 0x80; /* - * Set TPGS field for explict and/or implict ALUA access type + * Set TPGS field for explicit and/or implicit ALUA access type * and opteration. * * See spc4r17 section 6.4.2 Table 135 @@ -66,11 +66,12 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } -static sense_reason_t -spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) +sense_reason_t +spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; /* Set RMB (removable media) for tape devices */ if (dev->transport->get_device_type(dev) == TYPE_TAPE) @@ -95,38 +96,51 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) */ spc_fill_alua_data(lun->lun_sep, buf); + /* + * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY + */ + if (dev->dev_attrib.emulate_3pc) + buf[5] |= 0x8; + /* + * Set Protection (PROTECT) bit when DIF has been enabled on the + * device, and the transport supports VERIFY + PASS. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + buf[5] |= 0x1; + } + buf[7] = 0x2; /* CmdQue=1 */ - snprintf(&buf[8], 8, "LIO-ORG"); - snprintf(&buf[16], 16, "%s", dev->t10_wwn.model); - snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision); + memcpy(&buf[8], "LIO-ORG ", 8); + memset(&buf[16], 0x20, 16); + memcpy(&buf[16], dev->t10_wwn.model, + min_t(size_t, strlen(dev->t10_wwn.model), 16)); + memcpy(&buf[32], dev->t10_wwn.revision, + min_t(size_t, strlen(dev->t10_wwn.revision), 4)); buf[4] = 31; /* Set additional length to 31 */ return 0; } +EXPORT_SYMBOL(spc_emulate_inquiry_std); /* unit serial number */ static sense_reason_t spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u16 len = 0; + u16 len; if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { - u32 unit_serial_len; - - unit_serial_len = strlen(dev->t10_wwn.unit_serial); - unit_serial_len++; /* For NULL Terminator */ - - len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); + len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } return 0; } -static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, - unsigned char *buf) +void spc_parse_naa_6h_vendor_specific(struct se_device *dev, + unsigned char *buf) { unsigned char *p = &dev->t10_wwn.unit_serial[0]; int cnt; @@ -160,7 +174,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, * Device identification VPD, for a complete list of * DESIGNATOR TYPEs see spc4r17 Table 459. */ -static sense_reason_t +sense_reason_t spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; @@ -257,7 +271,7 @@ check_t10_vend_desc: port = lun->lun_sep; if (port) { struct t10_alua_lu_gp *lu_gp; - u32 padding, scsi_name_len; + u32 padding, scsi_name_len, scsi_target_len; u16 lu_gp_id = 0; u16 tg_pt_gp_id = 0; u16 tpgt; @@ -355,16 +369,6 @@ check_lu_gp: * section 7.5.1 Table 362 */ check_scsi_name: - scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); - /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ - scsi_name_len += 10; - /* Check for 4-byte padding */ - padding = ((-scsi_name_len) & 3); - if (padding != 0) - scsi_name_len += padding; - /* Header size + Designation descriptor */ - scsi_name_len += 4; - buf[off] = (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x3; /* CODE SET == UTF-8 */ @@ -392,30 +396,107 @@ check_scsi_name: * shall be no larger than 256 and shall be a multiple * of four. */ + padding = ((-scsi_name_len) & 3); if (padding) scsi_name_len += padding; + if (scsi_name_len > 256) + scsi_name_len = 256; buf[off-1] = scsi_name_len; off += scsi_name_len; /* Header size + Designation descriptor */ len += (scsi_name_len + 4); + + /* + * Target device designator + */ + buf[off] = + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target device: 10b */ + buf[off] |= 0x20; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means "<iSCSI name>" in + * UTF-8 encoding. + */ + scsi_target_len = sprintf(&buf[off], "%s", + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + scsi_target_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + padding = ((-scsi_target_len) & 3); + if (padding) + scsi_target_len += padding; + if (scsi_target_len > 256) + scsi_target_len = 256; + + buf[off-1] = scsi_target_len; + off += scsi_target_len; + + /* Header size + Designation descriptor */ + len += (scsi_target_len + 4); } buf[2] = ((len >> 8) & 0xff); buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ return 0; } +EXPORT_SYMBOL(spc_emulate_evpd_83); + +static bool +spc_check_dev_wce(struct se_device *dev) +{ + bool wce = false; + + if (dev->transport->get_write_cache) + wce = dev->transport->get_write_cache(dev); + else if (dev->dev_attrib.emulate_write_cache > 0) + wce = true; + + return wce; +} /* Extended INQUIRY Data VPD Page */ static sense_reason_t spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) { + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + buf[3] = 0x3c; + /* + * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK + * only for TYPE3 protection. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) + buf[4] = 0x5; + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) + buf[4] = 0x4; + } + /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (cmd->se_dev->dev_attrib.emulate_write_cache > 0) + if (spc_check_dev_wce(dev)) buf[6] = 0x01; + /* If an LBA map is present set R_SUP */ + spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + buf[8] = 0x10; + spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); return 0; } @@ -426,6 +507,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; u32 max_sectors; int have_tp = 0; + int opt, min; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -440,11 +522,19 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* Set WSNZ to 1 */ buf[4] = 0x01; + /* + * Set MAXIMUM COMPARE AND WRITE LENGTH + */ + if (dev->dev_attrib.emulate_caw) + buf[5] = 0x01; /* * Set OPTIMAL TRANSFER LENGTH GRANULARITY */ - put_unaligned_be16(1, &buf[6]); + if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) + put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); + else + put_unaligned_be16(1, &buf[6]); /* * Set MAXIMUM TRANSFER LENGTH @@ -456,7 +546,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set OPTIMAL TRANSFER LENGTH */ - put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); + if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) + put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); + else + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); /* * Exit now if we don't support TP. @@ -562,6 +655,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) return 0; } +/* Referrals VPD page */ +static sense_reason_t +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x0c; + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); + + return 0; +} + static sense_reason_t spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); @@ -576,6 +683,7 @@ static struct { { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, + { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, }; /* supported vital product data pages */ @@ -605,11 +713,16 @@ spc_emulate_inquiry(struct se_cmd *cmd) struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; unsigned char *rbuf; unsigned char *cdb = cmd->t_task_cdb; - unsigned char buf[SE_INQUIRY_BUF]; + unsigned char *buf; sense_reason_t ret; int p; + int len = 0; - memset(buf, 0, SE_INQUIRY_BUF); + buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate response buffer for INQUIRY\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (dev == tpg->tpg_virt_lun0.lun_se_dev) buf[0] = 0x3f; /* Not connected */ @@ -625,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) } ret = spc_emulate_inquiry_std(cmd, buf); + len = buf[4] + 5; goto out; } @@ -632,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; ret = evpd_handlers[p].emulate(cmd, buf); + len = get_unaligned_be16(&buf[2]) + 4; goto out; } } @@ -641,18 +756,18 @@ spc_emulate_inquiry(struct se_cmd *cmd) out: rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + kfree(buf); if (!ret) - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, len); return ret; } -static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) { p[0] = 0x01; p[1] = 0x0a; @@ -665,8 +780,11 @@ out: return 12; } -static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) { + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + p[0] = 0x0a; p[1] = 0x0a; @@ -748,6 +866,21 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) * status (see SAM-4). */ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; + /* + * From spc4r30, section 7.5.7 Control mode page + * + * Application Tag Owner (ATO) bit set to one. + * + * If the ATO bit is set to one the device server shall not modify the + * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection + * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE + * TAG field. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + p[5] |= 0x80; + } + p[8] = 0xff; p[9] = 0xff; p[11] = 30; @@ -756,8 +889,10 @@ out: return 12; } -static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) { + struct se_device *dev = cmd->se_dev; + p[0] = 0x08; p[1] = 0x12; @@ -765,7 +900,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) if (pc == 1) goto out; - if (dev->dev_attrib.emulate_write_cache > 0) + if (spc_check_dev_wce(dev)) p[2] = 0x04; /* Write Cache Enable */ p[12] = 0x20; /* Disabled Read Ahead */ @@ -773,7 +908,7 @@ out: return 20; } -static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) +static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) { p[0] = 0x1c; p[1] = 0x0a; @@ -789,7 +924,7 @@ out: static struct { uint8_t page; uint8_t subpage; - int (*emulate)(struct se_device *, u8, unsigned char *); + int (*emulate)(struct se_cmd *, u8, unsigned char *); } modesense_handlers[] = { { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, @@ -851,7 +986,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; - unsigned char *buf, *map_buf; + unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; int type = dev->transport->get_device_type(dev); int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); bool dbd = !!(cdb[1] & 0x08); @@ -863,26 +998,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) int ret; int i; - map_buf = transport_kmap_data_sg(cmd); - if (!map_buf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - /* - * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we - * know we actually allocated a full page. Otherwise, if the - * data buffer is too small, allocate a temporary buffer so we - * don't have to worry about overruns in all our INQUIRY - * emulation handling. - */ - if (cmd->data_length < SE_MODE_PAGE_BUF && - (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { - buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); - if (!buf) { - transport_kunmap_data_sg(cmd); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } - } else { - buf = map_buf; - } + memset(buf, 0, SE_MODE_PAGE_BUF); + /* * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). @@ -895,7 +1012,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) spc_modesense_write_protect(&buf[length], type); - if ((dev->dev_attrib.emulate_write_cache > 0) && + if ((spc_check_dev_wce(dev)) && (dev->dev_attrib.emulate_fua_write > 0)) spc_modesense_dpofua(&buf[length], type); @@ -934,8 +1051,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) if (page == 0x3f) { if (subpage != 0x00 && subpage != 0xff) { pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); - kfree(buf); - transport_kunmap_data_sg(cmd); return TCM_INVALID_CDB_FIELD; } @@ -947,7 +1062,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) * the only two possibilities). */ if ((modesense_handlers[i].subpage & ~subpage) == 0) { - ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); + ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); if (!ten && length + ret >= 255) break; length += ret; @@ -960,7 +1075,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) if (modesense_handlers[i].page == page && modesense_handlers[i].subpage == subpage) { - length += modesense_handlers[i].emulate(dev, pc, &buf[length]); + length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); goto set_length; } @@ -972,7 +1087,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", page, subpage); - transport_kunmap_data_sg(cmd); return TCM_UNKNOWN_MODE_PAGE; set_length: @@ -981,19 +1095,18 @@ set_length: else buf[0] = length - 1; - if (buf != map_buf) { - memcpy(map_buf, buf, cmd->data_length); - kfree(buf); + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); + transport_kunmap_data_sg(cmd); } - transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, length); return 0; } static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; bool ten = cdb[0] == MODE_SELECT_10; int off = ten ? 8 : 4; @@ -1005,6 +1118,14 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) int ret = 0; int i; + if (!cmd->data_length) { + target_complete_cmd(cmd, GOOD); + return 0; + } + + if (cmd->data_length < off + 2) + return TCM_PARAMETER_LIST_LENGTH_ERROR; + buf = transport_kmap_data_sg(cmd); if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1021,7 +1142,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) if (modesense_handlers[i].page == page && modesense_handlers[i].subpage == subpage) { memset(tbuf, 0, SE_MODE_PAGE_BUF); - length = modesense_handlers[i].emulate(dev, 0, tbuf); + length = modesense_handlers[i].emulate(cmd, 0, tbuf); goto check_contents; } @@ -1029,6 +1150,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) goto out; check_contents: + if (cmd->data_length < off + length) { + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + if (memcmp(buf + off, tbuf, length)) ret = TCM_INVALID_PARAMETER_LIST; @@ -1151,7 +1277,7 @@ done: buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); return 0; } EXPORT_SYMBOL(spc_emulate_report_luns); @@ -1228,7 +1354,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) *size = (cdb[3] << 8) + cdb[4]; /* - * Do implict HEAD_OF_QUEUE processing for INQUIRY. + * Do implicit HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; @@ -1239,8 +1365,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; break; case EXTENDED_COPY: - case READ_ATTRIBUTE: + *size = get_unaligned_be32(&cdb[10]); + cmd->execute_cmd = target_do_xcopy; + break; case RECEIVE_COPY_RESULTS: + *size = get_unaligned_be32(&cdb[10]); + cmd->execute_cmd = target_do_receive_copy_results; + break; + case READ_ATTRIBUTE: case WRITE_ATTRIBUTE: *size = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; @@ -1256,7 +1388,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_report_luns; *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* - * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS + * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index d154ce79718..03538994d2f 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -4,7 +4,7 @@ * Modern ConfigFS group context specific statistics based on original * target_core_mib.c code * - * (c) Copyright 2006-2012 RisingTide Systems LLC. + * (c) Copyright 2006-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@linux-iscsi.org> * @@ -32,7 +32,6 @@ #include <linux/utsname.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/blkdev.h> #include <linux/configfs.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); @@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuNumCommands */ - return snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)dev->num_cmds); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_cmds)); } DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); @@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuReadMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->read_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); @@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuWrittenMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->write_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); @@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuInResets */ - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_LU_ATTR_RO(resets); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index c6e0293ffdb..f7cd95e8111 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -3,7 +3,7 @@ * * This file contains SPC-3 task management infrastructure * - * (c) Copyright 2009-2012 RisingTide Systems LLC. + * (c) Copyright 2009-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -85,21 +85,19 @@ void core_tmr_release_req( static void core_tmr_handle_tas_abort( struct se_node_acl *tmr_nacl, struct se_cmd *cmd, - int tas, - int fe_count) + int tas) { - if (!fe_count) { - transport_cmd_finish_abort(cmd, 1); - return; - } + bool remove = true; /* * TASK ABORTED status (TAS) bit support */ if ((tmr_nacl && - (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) + (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { + remove = false; transport_send_task_abort(cmd); + } - transport_cmd_finish_abort(cmd, 0); + transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -132,6 +130,11 @@ void core_tmr_abort_task( if (dev != se_cmd->se_dev) continue; + + /* skip se_cmd associated with tmr */ + if (tmr->task_cmd == se_cmd) + continue; + ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); if (tmr->ref_task_tag != ref_tag) continue; @@ -155,18 +158,9 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - /* - * Now send SAM_STAT_TASK_ABORTED status for the referenced - * se_cmd descriptor.. - */ - transport_send_task_abort(se_cmd); - /* - * Also deal with possible extra acknowledge reference.. - */ - if (se_cmd->se_cmd_flags & SCF_ACK_KREF) - target_put_sess_cmd(se_sess, se_cmd); target_put_sess_cmd(se_sess, se_cmd); + transport_cmd_finish_abort(se_cmd, true); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %d\n", ref_tag); @@ -253,7 +247,6 @@ static void core_tmr_drain_state_list( LIST_HEAD(drain_task_list); struct se_cmd *cmd, *next; unsigned long flags; - int fe_count; /* * Complete outstanding commands with TASK_ABORTED SAM status. @@ -329,24 +322,10 @@ static void core_tmr_drain_state_list( spin_lock_irqsave(&cmd->t_state_lock, flags); target_stop_cmd(cmd, &flags); - fe_count = atomic_read(&cmd->t_fe_count); - - if (!(cmd->transport_state & CMD_T_ACTIVE)) { - pr_debug("LUN_RESET: got CMD_T_ACTIVE for" - " cdb: %p, t_fe_count: %d dev: %p\n", cmd, - fe_count, dev); - cmd->transport_state |= CMD_T_ABORTED; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); - continue; - } - pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p," - " t_fe_count: %d dev: %p\n", cmd, fe_count, dev); cmd->transport_state |= CMD_T_ABORTED; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); } } @@ -406,9 +385,7 @@ int core_tmr_lun_reset( pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); } - spin_lock_irq(&dev->stats_lock); - dev->num_resets++; - spin_unlock_irq(&dev->stats_lock); + atomic_long_inc(&dev->num_resets); pr_debug("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 5192ac0337f..c036595b17c 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -3,7 +3,7 @@ * * This file contains generic Target Portal Group related functions. * - * (c) Copyright 2002-2012 RisingTide Systems LLC. + * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -111,17 +111,12 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( struct se_node_acl *acl; spin_lock_irq(&tpg->acl_node_lock); - list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!strcmp(acl->initiatorname, initiatorname) && - !acl->dynamic_node_acl) { - spin_unlock_irq(&tpg->acl_node_lock); - return acl; - } - } + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); spin_unlock_irq(&tpg->acl_node_lock); - return NULL; + return acl; } +EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); /* core_tpg_add_node_to_devs(): * @@ -283,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); acl->dynamic_node_acl = 1; tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -411,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -639,6 +632,13 @@ int core_tpg_set_initiator_node_tag( } EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); +static void core_tpg_lun_ref_release(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) { /* Set in core_dev_setup_virtual_lun0() */ @@ -652,12 +652,11 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); if (ret < 0) return ret; @@ -697,10 +696,9 @@ int core_tpg_register( atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); } se_tpg->se_tpg_type = se_tpg_type; @@ -717,7 +715,8 @@ int core_tpg_register( if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { - kfree(se_tpg); + array_free(se_tpg->tpg_lun_list, + TRANSPORT_MAX_LUNS_PER_TPG); return -ENOMEM; } } @@ -782,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) } EXPORT_SYMBOL(core_tpg_deregister); -struct se_lun *core_tpg_pre_addlun( +struct se_lun *core_tpg_alloc_lun( struct se_portal_group *tpg, u32 unpacked_lun) { @@ -812,18 +811,24 @@ struct se_lun *core_tpg_pre_addlun( return lun; } -int core_tpg_post_addlun( +int core_tpg_add_lun( struct se_portal_group *tpg, struct se_lun *lun, u32 lun_access, - void *lun_ptr) + struct se_device *dev) { int ret; - ret = core_dev_export(lun_ptr, tpg, lun); + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); if (ret < 0) return ret; + ret = core_dev_export(dev, tpg, lun); + if (ret < 0) { + percpu_ref_cancel_init(&lun->lun_ref); + return ret; + } + spin_lock(&tpg->tpg_lun_lock); lun->lun_access = lun_access; lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; @@ -832,14 +837,6 @@ int core_tpg_post_addlun( return 0; } -static void core_tpg_shutdown_lun( - struct se_portal_group *tpg, - struct se_lun *lun) -{ - core_clear_lun_from_tpg(lun, tpg); - transport_clear_lun_from_sessions(lun); -} - struct se_lun *core_tpg_pre_dellun( struct se_portal_group *tpg, u32 unpacked_lun) @@ -874,7 +871,8 @@ int core_tpg_post_dellun( struct se_portal_group *tpg, struct se_lun *lun) { - core_tpg_shutdown_lun(tpg, lun); + core_clear_lun_from_tpg(lun, tpg); + transport_clear_lun_ref(lun); core_dev_unexport(lun->lun_se_dev, tpg, lun); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index bd587b70661..7fa62fc93e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3,7 +3,7 @@ * * This file contains the Generic Target Engine Core. * - * (c) Copyright 2002-2012 RisingTide Systems LLC. + * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -28,7 +28,6 @@ #include <linux/string.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> @@ -52,6 +51,9 @@ #include "target_core_pr.h" #include "target_core_ua.h" +#define CREATE_TRACE_POINTS +#include <trace/events/target.h> + static struct workqueue_struct *target_completion_wq; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_ua_cache; @@ -60,13 +62,13 @@ struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); -static int transport_generic_get_mem(struct se_cmd *cmd); -static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); -static void transport_put_cmd(struct se_cmd *cmd); +static int transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) @@ -128,14 +130,36 @@ int init_se_kmem_caches(void) "mem_t failed\n"); goto out_free_tg_pt_gp_cache; } + t10_alua_lba_map_cache = kmem_cache_create( + "t10_alua_lba_map_cache", + sizeof(struct t10_alua_lba_map), + __alignof__(struct t10_alua_lba_map), 0, NULL); + if (!t10_alua_lba_map_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_" + "cache failed\n"); + goto out_free_tg_pt_gp_mem_cache; + } + t10_alua_lba_map_mem_cache = kmem_cache_create( + "t10_alua_lba_map_mem_cache", + sizeof(struct t10_alua_lba_map_member), + __alignof__(struct t10_alua_lba_map_member), 0, NULL); + if (!t10_alua_lba_map_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" + "cache failed\n"); + goto out_free_lba_map_cache; + } target_completion_wq = alloc_workqueue("target_completion", WQ_MEM_RECLAIM, 0); if (!target_completion_wq) - goto out_free_tg_pt_gp_mem_cache; + goto out_free_lba_map_mem_cache; return 0; +out_free_lba_map_mem_cache: + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: + kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_mem_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); out_free_tg_pt_gp_cache: @@ -164,6 +188,8 @@ void release_se_kmem_caches(void) kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kmem_cache_destroy(t10_alua_lba_map_cache); + kmem_cache_destroy(t10_alua_lba_map_mem_cache); } /* This code ensures unique mib indexes are handed out. */ @@ -209,7 +235,7 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } -struct se_session *transport_init_session(void) +struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; @@ -222,13 +248,67 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list); + INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); + se_sess->sup_prot_ops = sup_prot_ops; return se_sess; } EXPORT_SYMBOL(transport_init_session); +int transport_alloc_session_tags(struct se_session *se_sess, + unsigned int tag_num, unsigned int tag_size) +{ + int rc; + + se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, + GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + if (!se_sess->sess_cmd_map) { + se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); + if (!se_sess->sess_cmd_map) { + pr_err("Unable to allocate se_sess->sess_cmd_map\n"); + return -ENOMEM; + } + } + + rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); + if (rc < 0) { + pr_err("Unable to init se_sess->sess_tag_pool," + " tag_num: %u\n", tag_num); + if (is_vmalloc_addr(se_sess->sess_cmd_map)) + vfree(se_sess->sess_cmd_map); + else + kfree(se_sess->sess_cmd_map); + se_sess->sess_cmd_map = NULL; + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(transport_alloc_session_tags); + +struct se_session *transport_init_session_tags(unsigned int tag_num, + unsigned int tag_size, + enum target_prot_op sup_prot_ops) +{ + struct se_session *se_sess; + int rc; + + se_sess = transport_init_session(sup_prot_ops); + if (IS_ERR(se_sess)) + return se_sess; + + rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); + if (rc < 0) { + transport_free_session(se_sess); + return ERR_PTR(-ENOMEM); + } + + return se_sess; +} +EXPORT_SYMBOL(transport_init_session_tags); + /* * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. */ @@ -364,6 +444,13 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); void transport_free_session(struct se_session *se_sess) { + if (se_sess->sess_cmd_map) { + percpu_ida_destroy(&se_sess->sess_tag_pool); + if (is_vmalloc_addr(se_sess->sess_cmd_map)) + vfree(se_sess->sess_cmd_map); + else + kfree(se_sess->sess_cmd_map); + } kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); @@ -413,11 +500,11 @@ void transport_deregister_session(struct se_session *se_sess) pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); /* - * If last kref is dropping now for an explict NodeACL, awake sleeping + * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context. */ - if (se_nacl && comp_nacl == true) + if (se_nacl && comp_nacl) target_put_nacl(se_nacl); transport_free_session(se_sess); @@ -446,27 +533,14 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) +static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + bool write_pending) { unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - complete(&cmd->transport_lun_stop_comp); - return 1; - } + if (write_pending) + cmd->t_state = TRANSPORT_WRITE_PENDING; if (remove_from_lists) { target_remove_from_state_list(cmd); @@ -488,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return 1; } @@ -515,32 +589,31 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { - return transport_cmd_check_stop(cmd, true); + return transport_cmd_check_stop(cmd, true, false); } static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - unsigned long flags; if (!lun) return; - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->transport_state & CMD_T_DEV_ACTIVE) { - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (!list_empty(&cmd->se_lun_node)) - list_del_init(&cmd->se_lun_node); - spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) + transport_lun_remove_cmd(cmd); + /* + * Allow the fabric driver to unmap any resources before + * releasing the descriptor via TFO->release_cmd() + */ + if (remove) + cmd->se_tfo->aborted_task(cmd); + if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) @@ -607,19 +680,16 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) return; } - if (!success) - cmd->transport_state |= CMD_T_FAILED; - /* - * Check for case where an explict ABORT_TASK has been received + * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ if (cmd->transport_state & CMD_T_ABORTED && cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; - } else if (cmd->transport_state & CMD_T_FAILED) { + } else if (!success) { INIT_WORK(&cmd->work, target_complete_failure_work); } else { INIT_WORK(&cmd->work, target_complete_ok_work); @@ -633,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -666,7 +753,7 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -907,15 +994,18 @@ int transport_dump_vpd_ident( switch (vpd->device_identifier_code_set) { case 0x01: /* Binary */ - sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", + snprintf(buf, sizeof(buf), + "T10 VPD Binary Device Identifier: %s\n", &vpd->device_identifier[0]); break; case 0x02: /* ASCII */ - sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", + snprintf(buf, sizeof(buf), + "T10 VPD ASCII Device Identifier: %s\n", &vpd->device_identifier[0]); break; case 0x03: /* UTF-8 */ - sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", + snprintf(buf, sizeof(buf), + "T10 VPD UTF-8 Device Identifier: %s\n", &vpd->device_identifier[0]); break; default: @@ -1032,17 +1122,15 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); - init_completion(&cmd->transport_lun_fe_stop_comp); - init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); + kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; @@ -1078,7 +1166,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Dormant to Active status. */ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, dev->transport->name); @@ -1089,7 +1177,6 @@ sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) { struct se_device *dev = cmd->se_dev; - unsigned long flags; sense_reason_t ret; /* @@ -1124,6 +1211,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) */ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); + trace_target_sequencer_start(cmd); + /* * Check for an existing UNIT ATTENTION condition */ @@ -1136,8 +1225,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) return ret; ret = target_check_reservation(cmd); - if (ret) + if (ret) { + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; return ret; + } ret = dev->transport->parse_cdb(cmd); if (ret) @@ -1147,9 +1238,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) if (ret) return ret; - spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) @@ -1202,7 +1291,7 @@ int transport_handle_cdb_direct( } EXPORT_SYMBOL(transport_handle_cdb_direct); -static sense_reason_t +sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) { @@ -1248,6 +1337,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, * @sgl_count: scatterlist count for unidirectional mapping * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information * * Returns non zero to signal active I/O shutdown failure. All other * setup exceptions will be returned as a SCSI CHECK_CONDITION response, @@ -1260,7 +1351,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags, struct scatterlist *sgl, u32 sgl_count, - struct scatterlist *sgl_bidi, u32 sgl_bidi_count) + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count) { struct se_portal_group *se_tpg; sense_reason_t rc; @@ -1308,6 +1400,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess transport_generic_request_failure(se_cmd, rc); return 0; } + + /* + * Save pointers for SGLs containing protection information, + * if present. + */ + if (sgl_prot_count) { + se_cmd->t_prot_sg = sgl_prot; + se_cmd->t_prot_nents = sgl_prot_count; + } + /* * When a non zero sgl_count has been passed perform SGL passthrough * mapping for pre-allocated fabric memory instead of having target @@ -1344,6 +1446,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess return 0; } } + /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. @@ -1383,7 +1486,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, { return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, unpacked_lun, data_length, task_attr, data_dir, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, NULL, 0); } EXPORT_SYMBOL(target_submit_cmd); @@ -1508,12 +1611,20 @@ void transport_generic_request_failure(struct se_cmd *cmd, * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); + /* + * Handle special case for COMPARE_AND_WRITE failure, where the + * callback is expected to drop the per device ->caw_mutex. + */ + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd); switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: case TCM_INVALID_CDB_FIELD: case TCM_INVALID_PARAMETER_LIST: + case TCM_PARAMETER_LIST_LENGTH_ERROR: case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: case TCM_UNKNOWN_MODE_PAGE: case TCM_WRITE_PROTECTED: @@ -1521,6 +1632,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_NOT_READY: + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1546,7 +1660,8 @@ void transport_generic_request_failure(struct se_cmd *cmd, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - ret = cmd->se_tfo->queue_status(cmd); + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo-> queue_status(cmd); if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; goto check_stop; @@ -1573,14 +1688,10 @@ queue_full: } EXPORT_SYMBOL(transport_generic_request_failure); -static void __target_execute_cmd(struct se_cmd *cmd) +void __target_execute_cmd(struct se_cmd *cmd) { sense_reason_t ret; - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); - spin_unlock_irq(&cmd->t_state_lock); - if (cmd->execute_cmd) { ret = cmd->execute_cmd(cmd); if (ret) { @@ -1612,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) return false; case MSG_ORDERED_TAG: atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " " se_ordered_id: %u\n", @@ -1630,7 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) * For SIMPLE and UNTAGGED Task Attribute commands */ atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } @@ -1653,45 +1764,45 @@ void target_execute_cmd(struct se_cmd *cmd) /* * If the received CDB has aleady been aborted stop processing it here. */ - if (transport_check_aborted_status(cmd, 1)) { - complete(&cmd->transport_lun_stop_comp); + if (transport_check_aborted_status(cmd, 1)) return; - } - - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - spin_lock_irq(&cmd->t_state_lock); - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state &= ~CMD_T_ACTIVE; - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->transport_lun_stop_comp); - return; - } /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ + spin_lock_irq(&cmd->t_state_lock); if (cmd->transport_state & CMD_T_STOP) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE; + cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); + /* + * Perform WRITE_INSERT of PI using software emulation when backend + * device has PI enabled, if the transport has not already generated + * PI using hardware WRITE_INSERT offload. + */ + if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) + sbc_dif_generate(cmd); + } - if (!target_handle_task_attr(cmd)) - __target_execute_cmd(cmd); + if (target_handle_task_attr(cmd)) { + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + return; + } + + __target_execute_cmd(cmd); } EXPORT_SYMBOL(target_execute_cmd); @@ -1735,7 +1846,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1747,7 +1858,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1764,6 +1875,7 @@ static void transport_complete_qf(struct se_cmd *cmd) transport_complete_task_attr(cmd); if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); if (ret) goto out; @@ -1771,16 +1883,18 @@ static void transport_complete_qf(struct se_cmd *cmd) switch (cmd->data_direction) { case DMA_FROM_DEVICE: + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); break; case DMA_TO_DEVICE: - if (cmd->t_bidi_data_sg) { + if (cmd->se_cmd_flags & SCF_BIDI) { ret = cmd->se_tfo->queue_data_in(cmd); if (ret < 0) break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); break; default: @@ -1803,12 +1917,27 @@ static void transport_handle_queue_full( spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); } +static bool target_check_read_strip(struct se_cmd *cmd) +{ + sense_reason_t rc; + + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { + rc = sbc_dif_read_strip(cmd); + if (rc) { + cmd->pi_err = rc; + return true; + } + } + + return false; +} + static void target_complete_ok_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -1845,10 +1974,25 @@ static void target_complete_ok_work(struct work_struct *work) } /* * Check for a callback, used by amongst other things - * XDWRITE_READ_10 emulation. + * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. */ - if (cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd); + if (cmd->transport_complete_callback) { + sense_reason_t rc; + + rc = cmd->transport_complete_callback(cmd); + if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { + return; + } else if (rc) { + ret = transport_send_check_condition_and_sense(cmd, + rc, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + } switch (cmd->data_direction) { case DMA_FROM_DEVICE: @@ -1858,7 +2002,24 @@ static void target_complete_ok_work(struct work_struct *work) cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); + /* + * Perform READ_STRIP of PI using software emulation when + * backend had PI enabled, if the transport will not be + * performing hardware READ_STRIP offload. + */ + if (cmd->prot_op == TARGET_PROT_DIN_STRIP && + target_check_read_strip(cmd)) { + ret = transport_send_check_condition_and_sense(cmd, + cmd->pi_err, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; @@ -1873,7 +2034,7 @@ static void target_complete_ok_work(struct work_struct *work) /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (cmd->t_bidi_data_sg) { + if (cmd->se_cmd_flags & SCF_BIDI) { spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_sep->sep_stats.tx_data_octets += @@ -1887,6 +2048,7 @@ static void target_complete_ok_work(struct work_struct *work) } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; @@ -1917,10 +2079,29 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents) kfree(sgl); } +static inline void transport_reset_sgl_orig(struct se_cmd *cmd) +{ + /* + * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE + * emulation, and free + reset pointers if necessary.. + */ + if (!cmd->t_data_sg_orig) + return; + + kfree(cmd->t_data_sg); + cmd->t_data_sg = cmd->t_data_sg_orig; + cmd->t_data_sg_orig = NULL; + cmd->t_data_nents = cmd->t_data_nents_orig; + cmd->t_data_nents_orig = 0; +} + static inline void transport_free_pages(struct se_cmd *cmd) { - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + transport_reset_sgl_orig(cmd); return; + } + transport_reset_sgl_orig(cmd); transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); cmd->t_data_sg = NULL; @@ -1929,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd) transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; + + transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); + cmd->t_prot_sg = NULL; + cmd->t_prot_nents = 0; } /** @@ -1938,7 +2123,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) * This routine unconditionally frees a command, and reference counting * or list removal must be done in the caller. */ -static void transport_release_cmd(struct se_cmd *cmd) +static int transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); @@ -1950,11 +2135,7 @@ static void transport_release_cmd(struct se_cmd *cmd) * If this cmd has been setup with target_get_sess_cmd(), drop * the kref and call ->release_cmd() in kref callback. */ - if (cmd->check_release != 0) { - target_put_sess_cmd(cmd->se_sess, cmd); - return; - } - cmd->se_tfo->release_cmd(cmd); + return target_put_sess_cmd(cmd->se_sess, cmd); } /** @@ -1963,26 +2144,10 @@ static void transport_release_cmd(struct se_cmd *cmd) * * This routine releases our reference to the command and frees it if possible. */ -static void transport_put_cmd(struct se_cmd *cmd) +static int transport_put_cmd(struct se_cmd *cmd) { - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (atomic_read(&cmd->t_fe_count) && - !atomic_dec_and_test(&cmd->t_fe_count)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - - if (cmd->transport_state & CMD_T_DEV_ACTIVE) { - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_free_pages(cmd); - transport_release_cmd(cmd); - return; + return transport_release_cmd(cmd); } void *transport_kmap_data_sg(struct se_cmd *cmd) @@ -2036,24 +2201,22 @@ void transport_kunmap_data_sg(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_kunmap_data_sg); -static int -transport_generic_get_mem(struct se_cmd *cmd) +int +target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, + bool zero_page) { - u32 length = cmd->data_length; - unsigned int nents; + struct scatterlist *sg; struct page *page; - gfp_t zero_flag; + gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; + unsigned int nent; int i = 0; - nents = DIV_ROUND_UP(length, PAGE_SIZE); - cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); - if (!cmd->t_data_sg) + nent = DIV_ROUND_UP(length, PAGE_SIZE); + sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); + if (!sg) return -ENOMEM; - cmd->t_data_nents = nents; - sg_init_table(cmd->t_data_sg, nents); - - zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; + sg_init_table(sg, nent); while (length) { u32 page_len = min_t(u32, length, PAGE_SIZE); @@ -2061,19 +2224,20 @@ transport_generic_get_mem(struct se_cmd *cmd) if (!page) goto out; - sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); + sg_set_page(&sg[i], page, page_len, 0); length -= page_len; i++; } + *sgl = sg; + *nents = nent; return 0; out: while (i > 0) { i--; - __free_page(sg_page(&cmd->t_data_sg[i])); + __free_page(sg_page(&sg[i])); } - kfree(cmd->t_data_sg); - cmd->t_data_sg = NULL; + kfree(sg); return -ENOMEM; } @@ -2094,13 +2258,38 @@ transport_generic_new_cmd(struct se_cmd *cmd) */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && cmd->data_length) { - ret = transport_generic_get_mem(cmd); + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); + + if ((cmd->se_cmd_flags & SCF_BIDI) || + (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { + u32 bidi_length; + + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) + bidi_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + else + bidi_length = cmd->data_length; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + bidi_length, zero_flag); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (cmd->prot_op != TARGET_PROT_NORMAL) { + ret = target_alloc_sgl(&cmd->t_prot_sg, + &cmd->t_prot_nents, + cmd->prot_length, true); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, + cmd->data_length, zero_flag); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - - atomic_inc(&cmd->t_fe_count); - /* * If this command is not a write we can execute it right here, * for write buffers we need to notify the fabric driver first @@ -2111,12 +2300,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) target_execute_cmd(cmd); return 0; } - - spin_lock_irq(&cmd->t_state_lock); - cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irq(&cmd->t_state_lock); - - transport_cmd_check_stop(cmd, false); + transport_cmd_check_stop(cmd, false, true); ret = cmd->se_tfo->write_pending(cmd); if (ret == -EAGAIN || ret == -ENOMEM) @@ -2147,24 +2331,36 @@ static void transport_write_pending_qf(struct se_cmd *cmd) } } -void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { + unsigned long flags; + int ret = 0; + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) transport_wait_for_tasks(cmd); - transport_release_cmd(cmd); + ret = transport_release_cmd(cmd); } else { if (wait_for_tasks) transport_wait_for_tasks(cmd); - - core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ + if (cmd->state_active) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + target_remove_from_state_list(cmd); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } if (cmd->se_lun) transport_lun_remove_cmd(cmd); - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); } + return ret; } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -2173,19 +2369,18 @@ EXPORT_SYMBOL(transport_generic_free_cmd); * @se_cmd: command descriptor to add * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ -static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, +int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, bool ack_kref) { unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref == true) { + if (ack_kref) { kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } @@ -2196,32 +2391,29 @@ static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd goto out; } list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); - se_cmd->check_release = 1; - out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); return ret; } +EXPORT_SYMBOL(target_get_sess_cmd); static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; - unsigned long flags; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2232,7 +2424,12 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { - return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); + if (!se_sess) { + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } + return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, + &se_sess->sess_cmd_lock); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2247,11 +2444,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - - WARN_ON(se_sess->sess_tearing_down); + if (se_sess->sess_tearing_down) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + return; + } se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) se_cmd->cmd_wait_set = 1; spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -2260,205 +2460,52 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); /* target_wait_for_sess_cmds - Wait for outstanding descriptors * @se_sess: session to wait for active I/O - * @wait_for_tasks: Make extra transport_wait_for_tasks call */ -void target_wait_for_sess_cmds( - struct se_session *se_sess, - int wait_for_tasks) +void target_wait_for_sess_cmds(struct se_session *se_sess) { struct se_cmd *se_cmd, *tmp_cmd; - bool rc = false; + unsigned long flags; list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_cmd_list, se_cmd_list) { + &se_sess->sess_wait_list, se_cmd_list) { list_del(&se_cmd->se_cmd_list); pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" " %d\n", se_cmd, se_cmd->t_state, se_cmd->se_tfo->get_cmd_state(se_cmd)); - if (wait_for_tasks) { - pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - - rc = transport_wait_for_tasks(se_cmd); - - pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - } - - if (!rc) { - wait_for_completion(&se_cmd->cmd_wait_comp); - pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - } + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); se_cmd->se_tfo->release_cmd(se_cmd); } -} -EXPORT_SYMBOL(target_wait_for_sess_cmds); - -/* transport_lun_wait_for_tasks(): - * - * Called from ConfigFS context to stop the passed struct se_cmd to allow - * an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) -{ - unsigned long flags; - int ret = 0; - - /* - * If the frontend has already requested this struct se_cmd to - * be stopped, we can safely ignore this struct se_cmd. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->transport_state & CMD_T_STOP) { - cmd->transport_state &= ~CMD_T_LUN_STOP; - - pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", - cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_cmd_check_stop(cmd, false); - return -EPERM; - } - cmd->transport_state |= CMD_T_LUN_FE_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - // XXX: audit task_flags checks. - spin_lock_irqsave(&cmd->t_state_lock, flags); - if ((cmd->transport_state & CMD_T_BUSY) && - (cmd->transport_state & CMD_T_SENT)) { - if (!target_stop_cmd(cmd, &flags)) - ret++; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - pr_debug("ConfigFS: cmd: %p stop tasks ret:" - " %d\n", cmd, ret); - if (!ret) { - pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->transport_lun_stop_comp); - pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - } - - return 0; -} - -static void __transport_clear_lun_from_sessions(struct se_lun *lun) -{ - struct se_cmd *cmd = NULL; - unsigned long lun_flags, cmd_flags; - /* - * Do exception processing and return CHECK_CONDITION status to the - * Initiator Port. - */ - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty(&lun->lun_cmd_list)) { - cmd = list_first_entry(&lun->lun_cmd_list, - struct se_cmd, se_lun_node); - list_del_init(&cmd->se_lun_node); - - spin_lock(&cmd->t_state_lock); - pr_debug("SE_LUN[%d] - Setting cmd->transport" - "_lun_stop for ITT: 0x%08x\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state |= CMD_T_LUN_STOP; - spin_unlock(&cmd->t_state_lock); - - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - - if (!cmd->se_lun) { - pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - /* - * If the Storage engine still owns the iscsi_cmd_t, determine - * and/or stop its context. - */ - pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - - pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" - "_wait_for_tasks(): SUCCESS\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - goto check_cond; - } - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - - /* - * The Storage engine stopped this struct se_cmd before it was - * send to the fabric frontend for delivery back to the - * Initiator Node. Return this SCSI CDB back with an - * CHECK_CONDITION status. - */ -check_cond: - transport_send_check_condition_and_sense(cmd, - TCM_NON_EXISTENT_LUN, 0); - /* - * If the fabric frontend is waiting for this iscsi_cmd_t to - * be released, notify the waiting thread now that LU has - * finished accessing it. - */ - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (cmd->transport_state & CMD_T_LUN_FE_STOP) { - pr_debug("SE_LUN[%d] - Detected FE stop for" - " struct se_cmd: %p ITT: 0x%08x\n", - lun->unpacked_lun, - cmd, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, - cmd_flags); - transport_cmd_check_stop(cmd, false); - complete(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + WARN_ON(!list_empty(&se_sess->sess_cmd_list)); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - } - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); } +EXPORT_SYMBOL(target_wait_for_sess_cmds); -static int transport_clear_lun_thread(void *p) +static int transport_clear_lun_ref_thread(void *p) { struct se_lun *lun = p; - __transport_clear_lun_from_sessions(lun); + percpu_ref_kill(&lun->lun_ref); + + wait_for_completion(&lun->lun_ref_comp); complete(&lun->lun_shutdown_comp); return 0; } -int transport_clear_lun_from_sessions(struct se_lun *lun) +int transport_clear_lun_ref(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, lun, + kt = kthread_run(transport_clear_lun_ref_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { pr_err("Unable to start clear_lun thread\n"); @@ -2492,43 +2539,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); return false; } - /* - * If we are already stopped due to an external event (ie: LUN shutdown) - * sleep until the connection can have the passed struct se_cmd back. - * The cmd->transport_lun_stopped_sem will be upped by - * transport_clear_lun_from_sessions() once the ConfigFS context caller - * has completed its operation on the struct se_cmd. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("wait_for_tasks: Stopping" - " wait_for_completion(&cmd->t_tasktransport_lun_fe" - "_stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - /* - * There is a special case for WRITES where a FE exception + - * LUN shutdown means ConfigFS context is still sleeping on - * transport_lun_stop_comp in transport_lun_wait_for_tasks(). - * We go ahead and up transport_lun_stop_comp just to be sure - * here. - */ - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->transport_lun_stop_comp); - wait_for_completion(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_state_lock, flags); - - target_remove_from_state_list(cmd); - /* - * At this point, the frontend who was the originator of this - * struct se_cmd, now owns the structure and can be released through - * normal means below. - */ - pr_debug("wait_for_tasks: Stopped" - " wait_for_completion(&cmd->t_tasktransport_lun_fe_" - "stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_LUN_STOP; - } if (!(cmd->transport_state & CMD_T_ACTIVE)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2570,6 +2580,19 @@ static int transport_get_sense_codes( return 0; } +static +void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) +{ + /* Place failed LBA in sense data information descriptor 0. */ + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; + buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ + buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; + buffer[SPC_VALIDITY_OFFSET] = 0x80; + + /* Descriptor Information: failing sector */ + put_unaligned_be64(bad_sector, &buffer[12]); +} + int transport_send_check_condition_and_sense(struct se_cmd *cmd, sense_reason_t reason, int from_transport) @@ -2674,6 +2697,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, /* INVALID FIELD IN PARAMETER LIST */ buffer[SPC_ASC_KEY_OFFSET] = 0x26; break; + case TCM_PARAMETER_LIST_LENGTH_ERROR: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* PARAMETER LIST LENGTH ERROR */ + buffer[SPC_ASC_KEY_OFFSET] = 0x1a; + break; case TCM_UNEXPECTED_UNSOLICITED_DATA: /* CURRENT ERROR */ buffer[0] = 0x70; @@ -2745,13 +2777,60 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, buffer[SPC_ASC_KEY_OFFSET] = asc; buffer[SPC_ASCQ_KEY_OFFSET] = ascq; break; - case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: - default: + case TCM_MISCOMPARE_VERIFY: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE; + /* MISCOMPARE DURING VERIFY OPERATION */ + buffer[SPC_ASC_KEY_OFFSET] = 0x1d; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; + break; + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK GUARD CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; + transport_err_sector_info(buffer, cmd->bad_sector); + break; + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; + transport_err_sector_info(buffer, cmd->bad_sector); + break; + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: /* CURRENT ERROR */ buffer[0] = 0x70; buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; + transport_err_sector_info(buffer, cmd->bad_sector); + break; + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + default: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* + * Returning ILLEGAL REQUEST would cause immediate IO errors on + * Solaris initiators. Returning NOT READY instead means the + * operations will be retried a finite number of times and we + * can survive intermittent errors. + */ + buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; /* LOGICAL UNIT COMMUNICATION FAILURE */ buffer[SPC_ASC_KEY_OFFSET] = 0x08; break; @@ -2767,6 +2846,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; after_reason: + trace_target_cmd_complete(cmd); return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(transport_send_check_condition_and_sense); @@ -2776,13 +2856,19 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) if (!(cmd->transport_state & CMD_T_ABORTED)) return 0; - if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) + /* + * If cmd has been aborted but either no status is to be sent or it has + * already been sent, just return + */ + if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) return 1; pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); - cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; + cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); cmd->se_tfo->queue_status(cmd); return 1; @@ -2794,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd) unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { + if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } @@ -2809,7 +2895,9 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; - smp_mb__after_atomic_inc(); + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + smp_mb__after_atomic(); + return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -2820,6 +2908,7 @@ void transport_send_task_abort(struct se_cmd *cmd) " ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); + trace_target_cmd_complete(cmd); cmd->se_tfo->queue_status(cmd); } @@ -2866,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work) int transport_generic_handle_tmr( struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state |= CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index bf0e390ce2d..101858e245b 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -3,7 +3,7 @@ * * This file contains logic for SPC-3 Unit Attention emulation * - * (c) Copyright 2009-2012 RisingTide Systems LLC. + * (c) Copyright 2009-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * @@ -98,7 +98,6 @@ int core_scsi3_ua_allocate( pr_err("Unable to allocate struct se_ua\n"); return -ENOMEM; } - INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_nacl_list); ua->ua_nacl = nacl; @@ -163,7 +162,7 @@ int core_scsi3_ua_allocate( spin_unlock_irq(&nacl->device_list_lock); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -176,7 +175,7 @@ int core_scsi3_ua_allocate( asc, ascq); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -191,7 +190,7 @@ void core_scsi3_ua_release_all( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); } @@ -252,7 +251,7 @@ void core_scsi3_ua_for_check_condition( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); @@ -311,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index 0204952fe4d..be912b36daa 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -19,7 +19,7 @@ #define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 #define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 #define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 -#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 #define ASCQ_2AH_PRIORITY_CHANGED 0x08 #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c new file mode 100644 index 00000000000..e9186cdf35e --- /dev/null +++ b/drivers/target/target_core_xcopy.c @@ -0,0 +1,1081 @@ +/******************************************************************************* + * Filename: target_core_xcopy.c + * + * This file contains support for SPC-4 Extended-Copy offload with generic + * TCM backends. + * + * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. + * + * Author: + * Nicholas A. Bellinger <nab@daterainc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/configfs.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> +#include <target/target_core_configfs.h> + +#include "target_core_pr.h" +#include "target_core_ua.h" +#include "target_core_xcopy.h" + +static struct workqueue_struct *xcopy_wq = NULL; +/* + * From target_core_device.c + */ +extern struct mutex g_device_mutex; +extern struct list_head g_device_list; +/* + * From target_core_configfs.c + */ +extern struct configfs_subsystem *target_core_subsystem[]; + +static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) +{ + int off = 0; + + buf[off++] = (0x6 << 4); + buf[off++] = 0x01; + buf[off++] = 0x40; + buf[off] = (0x5 << 4); + + spc_parse_naa_6h_vendor_specific(dev, &buf[off]); + return 0; +} + +static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, + bool src) +{ + struct se_device *se_dev; + struct configfs_subsystem *subsys = target_core_subsystem[0]; + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; + int rc; + + if (src) + dev_wwn = &xop->dst_tid_wwn[0]; + else + dev_wwn = &xop->src_tid_wwn[0]; + + mutex_lock(&g_device_mutex); + list_for_each_entry(se_dev, &g_device_list, g_dev_node) { + + if (!se_dev->dev_attrib.emulate_3pc) + continue; + + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); + + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); + if (rc != 0) + continue; + + if (src) { + xop->dst_dev = se_dev; + pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" + " se_dev\n", xop->dst_dev); + } else { + xop->src_dev = se_dev; + pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" + " se_dev\n", xop->src_dev); + } + + rc = configfs_depend_item(subsys, + &se_dev->dev_group.cg_item); + if (rc != 0) { + pr_err("configfs_depend_item attempt failed:" + " %d for se_dev: %p\n", rc, se_dev); + mutex_unlock(&g_device_mutex); + return rc; + } + + pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" + " se_dev->se_dev_group: %p\n", subsys, se_dev, + &se_dev->dev_group); + + mutex_unlock(&g_device_mutex); + return 0; + } + mutex_unlock(&g_device_mutex); + + pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; +} + +static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, + unsigned char *p, bool src) +{ + unsigned char *desc = p; + unsigned short ript; + u8 desig_len; + /* + * Extract RELATIVE INITIATOR PORT IDENTIFIER + */ + ript = get_unaligned_be16(&desc[2]); + pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); + /* + * Check for supported code set, association, and designator type + */ + if ((desc[4] & 0x0f) != 0x1) { + pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); + return -EINVAL; + } + if ((desc[5] & 0x30) != 0x00) { + pr_err("XCOPY 0xe4: association other than LUN not supported\n"); + return -EINVAL; + } + if ((desc[5] & 0x0f) != 0x3) { + pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", + (desc[5] & 0x0f)); + return -EINVAL; + } + /* + * Check for matching 16 byte length for NAA IEEE Registered Extended + * Assigned designator + */ + desig_len = desc[7]; + if (desig_len != 16) { + pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); + return -EINVAL; + } + pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); + /* + * Check for NAA IEEE Registered Extended Assigned header.. + */ + if ((desc[8] & 0xf0) != 0x60) { + pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", + (desc[8] & 0xf0)); + return -EINVAL; + } + + if (src) { + memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the source designator matches the local device + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { + xop->op_origin = XCOL_SOURCE_RECV_OP; + xop->src_dev = se_cmd->se_dev; + pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" + " received xop\n", xop->src_dev); + } + } else { + memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the destination designator matches the local device + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { + xop->op_origin = XCOL_DEST_RECV_OP; + xop->dst_dev = se_cmd->se_dev; + pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" + " received xop\n", xop->dst_dev); + } + } + + return 0; +} + +static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + struct xcopy_op *xop, unsigned char *p, + unsigned short tdll) +{ + struct se_device *local_dev = se_cmd->se_dev; + unsigned char *desc = p; + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; + unsigned short start = 0; + bool src = true; + + if (offset != 0) { + pr_err("XCOPY target descriptor list length is not" + " multiple of %d\n", XCOPY_TARGET_DESC_LEN); + return -EINVAL; + } + if (tdll > 64) { + pr_err("XCOPY target descriptor supports a maximum" + " two src/dest descriptors, tdll: %hu too large..\n", tdll); + return -EINVAL; + } + /* + * Generate an IEEE Registered Extended designator based upon the + * se_device the XCOPY was received upon.. + */ + memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]); + + while (start < tdll) { + /* + * Check target descriptor identification with 0xE4 type with + * use VPD 0x83 WWPN matching .. + */ + switch (desc[0]) { + case 0xe4: + rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, + &desc[0], src); + if (rc != 0) + goto out; + /* + * Assume target descriptors are in source -> destination order.. + */ + if (src) + src = false; + else + src = true; + start += XCOPY_TARGET_DESC_LEN; + desc += XCOPY_TARGET_DESC_LEN; + ret++; + break; + default: + pr_err("XCOPY unsupported descriptor type code:" + " 0x%02x\n", desc[0]); + goto out; + } + } + + if (xop->op_origin == XCOL_SOURCE_RECV_OP) + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); + else + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); + + if (rc < 0) + goto out; + + pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->src_dev, &xop->src_tid_wwn[0]); + pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->dst_dev, &xop->dst_tid_wwn[0]); + + return ret; + +out: + return -EINVAL; +} + +static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, + unsigned char *p) +{ + unsigned char *desc = p; + int dc = (desc[1] & 0x02); + unsigned short desc_len; + + desc_len = get_unaligned_be16(&desc[2]); + if (desc_len != 0x18) { + pr_err("XCOPY segment desc 0x02: Illegal desc_len:" + " %hu\n", desc_len); + return -EINVAL; + } + + xop->stdi = get_unaligned_be16(&desc[4]); + xop->dtdi = get_unaligned_be16(&desc[6]); + pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", + desc_len, xop->stdi, xop->dtdi, dc); + + xop->nolb = get_unaligned_be16(&desc[10]); + xop->src_lba = get_unaligned_be64(&desc[12]); + xop->dst_lba = get_unaligned_be64(&desc[20]); + pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", + xop->nolb, (unsigned long long)xop->src_lba, + (unsigned long long)xop->dst_lba); + + if (dc != 0) { + xop->dbl = (desc[29] & 0xff) << 16; + xop->dbl |= (desc[30] & 0xff) << 8; + xop->dbl |= desc[31] & 0xff; + + pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); + } + return 0; +} + +static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, + struct xcopy_op *xop, unsigned char *p, + unsigned int sdll) +{ + unsigned char *desc = p; + unsigned int start = 0; + int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + + if (offset != 0) { + pr_err("XCOPY segment descriptor list length is not" + " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + return -EINVAL; + } + + while (start < sdll) { + /* + * Check segment descriptor type code for block -> block + */ + switch (desc[0]) { + case 0x02: + rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); + if (rc < 0) + goto out; + + ret++; + start += XCOPY_SEGMENT_DESC_LEN; + desc += XCOPY_SEGMENT_DESC_LEN; + break; + default: + pr_err("XCOPY unspported segment descriptor" + "type: 0x%02x\n", desc[0]); + goto out; + } + } + + return ret; + +out: + return -EINVAL; +} + +/* + * Start xcopy_pt ops + */ + +struct xcopy_pt_cmd { + bool remote_port; + struct se_cmd se_cmd; + struct xcopy_op *xcopy_op; + struct completion xpt_passthrough_sem; + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; +}; + +static struct se_port xcopy_pt_port; +static struct se_portal_group xcopy_pt_tpg; +static struct se_session xcopy_pt_sess; +static struct se_node_acl xcopy_pt_nacl; + +static char *xcopy_pt_get_fabric_name(void) +{ + return "xcopy-pt"; +} + +static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) +{ + struct configfs_subsystem *subsys = target_core_subsystem[0]; + struct se_device *remote_dev; + + if (xop->op_origin == XCOL_SOURCE_RECV_OP) + remote_dev = xop->dst_dev; + else + remote_dev = xop->src_dev; + + pr_debug("Calling configfs_undepend_item for subsys: %p" + " remote_dev: %p remote_dev->dev_group: %p\n", + subsys, remote_dev, &remote_dev->dev_group.cg_item); + + configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); +} + +static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) +{ + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, + struct xcopy_pt_cmd, se_cmd); + + kfree(xpt_cmd); +} + +static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) +{ + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, + struct xcopy_pt_cmd, se_cmd); + + complete(&xpt_cmd->xpt_passthrough_sem); + return 0; +} + +static int xcopy_pt_write_pending(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_queue_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static struct target_core_fabric_ops xcopy_pt_tfo = { + .get_fabric_name = xcopy_pt_get_fabric_name, + .get_task_tag = xcopy_pt_get_tag, + .get_cmd_state = xcopy_pt_get_cmd_state, + .release_cmd = xcopy_pt_release_cmd, + .check_stop_free = xcopy_pt_check_stop_free, + .write_pending = xcopy_pt_write_pending, + .write_pending_status = xcopy_pt_write_pending_status, + .queue_data_in = xcopy_pt_queue_data_in, + .queue_status = xcopy_pt_queue_status, +}; + +/* + * End xcopy_pt_ops + */ + +int target_xcopy_setup_pt(void) +{ + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); + if (!xcopy_wq) { + pr_err("Unable to allocate xcopy_wq\n"); + return -ENOMEM; + } + + memset(&xcopy_pt_port, 0, sizeof(struct se_port)); + INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); + INIT_LIST_HEAD(&xcopy_pt_port.sep_list); + mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); + + memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); + INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); + INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); + INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); + + xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; + xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; + + memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); + memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); + INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); + INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); + + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; + + xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; + xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; + + return 0; +} + +void target_xcopy_release_pt(void) +{ + if (xcopy_wq) + destroy_workqueue(xcopy_wq); +} + +static void target_xcopy_setup_pt_port( + struct xcopy_pt_cmd *xpt_cmd, + struct xcopy_op *xop, + bool remote_port) +{ + struct se_cmd *ec_cmd = xop->xop_se_cmd; + struct se_cmd *pt_cmd = &xpt_cmd->se_cmd; + + if (xop->op_origin == XCOL_SOURCE_RECV_OP) { + /* + * Honor destination port reservations for X-COPY PUSH emulation + * when CDB is received on local source port, and READs blocks to + * WRITE on remote destination port. + */ + if (remote_port) { + xpt_cmd->remote_port = remote_port; + pt_cmd->se_lun->lun_sep = &xcopy_pt_port; + pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" + " cmd->se_lun->lun_sep for X-COPY data PUSH\n", + pt_cmd->se_lun->lun_sep); + } else { + pt_cmd->se_lun = ec_cmd->se_lun; + pt_cmd->se_dev = ec_cmd->se_dev; + + pr_debug("Honoring local SRC port from ec_cmd->se_dev:" + " %p\n", pt_cmd->se_dev); + pt_cmd->se_lun = ec_cmd->se_lun; + pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n", + pt_cmd->se_lun); + } + } else { + /* + * Honor source port reservation for X-COPY PULL emulation + * when CDB is received on local desintation port, and READs + * blocks from the remote source port to WRITE on local + * destination port. + */ + if (remote_port) { + xpt_cmd->remote_port = remote_port; + pt_cmd->se_lun->lun_sep = &xcopy_pt_port; + pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" + " cmd->se_lun->lun_sep for X-COPY data PULL\n", + pt_cmd->se_lun->lun_sep); + } else { + pt_cmd->se_lun = ec_cmd->se_lun; + pt_cmd->se_dev = ec_cmd->se_dev; + + pr_debug("Honoring local DST port from ec_cmd->se_dev:" + " %p\n", pt_cmd->se_dev); + pt_cmd->se_lun = ec_cmd->se_lun; + pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n", + pt_cmd->se_lun); + } + } +} + +static int target_xcopy_init_pt_lun( + struct xcopy_pt_cmd *xpt_cmd, + struct xcopy_op *xop, + struct se_device *se_dev, + struct se_cmd *pt_cmd, + bool remote_port) +{ + /* + * Don't allocate + init an pt_cmd->se_lun if honoring local port for + * reservations. The pt_cmd->se_lun pointer will be setup from within + * target_xcopy_setup_pt_port() + */ + if (!remote_port) { + pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; + return 0; + } + + pt_cmd->se_lun = &se_dev->xcopy_lun; + pt_cmd->se_dev = se_dev; + + pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); + pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; + + pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", + pt_cmd->se_lun->lun_se_dev); + + return 0; +} + +static int target_xcopy_setup_pt_cmd( + struct xcopy_pt_cmd *xpt_cmd, + struct xcopy_op *xop, + struct se_device *se_dev, + unsigned char *cdb, + bool remote_port, + bool alloc_mem) +{ + struct se_cmd *cmd = &xpt_cmd->se_cmd; + sense_reason_t sense_rc; + int ret = 0, rc; + /* + * Setup LUN+port to honor reservations based upon xop->op_origin for + * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. + */ + rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); + if (rc < 0) { + ret = rc; + goto out; + } + xpt_cmd->xcopy_op = xop; + target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); + + sense_rc = target_setup_cmd_from_cdb(cmd, cdb); + if (sense_rc) { + ret = -EINVAL; + goto out; + } + + if (alloc_mem) { + rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, + cmd->data_length, false); + if (rc < 0) { + ret = rc; + goto out; + } + /* + * Set this bit so that transport_free_pages() allows the + * caller to release SGLs + physical memory allocated by + * transport_generic_get_mem().. + */ + cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + } else { + /* + * Here the previously allocated SGLs for the internal READ + * are mapped zero-copy to the internal WRITE. + */ + sense_rc = transport_generic_map_mem_to_cmd(cmd, + xop->xop_data_sg, xop->xop_data_nents, + NULL, 0); + if (sense_rc) { + ret = -EINVAL; + goto out; + } + + pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" + " %u\n", cmd->t_data_sg, cmd->t_data_nents); + } + + return 0; + +out: + return ret; +} + +static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) +{ + struct se_cmd *se_cmd = &xpt_cmd->se_cmd; + sense_reason_t sense_rc; + + sense_rc = transport_generic_new_cmd(se_cmd); + if (sense_rc) + return -EINVAL; + + if (se_cmd->data_direction == DMA_TO_DEVICE) + target_execute_cmd(se_cmd); + + wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); + + pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", + se_cmd->scsi_status); + + return (se_cmd->scsi_status) ? -EINVAL : 0; +} + +static int target_xcopy_read_source( + struct se_cmd *ec_cmd, + struct xcopy_op *xop, + struct se_device *src_dev, + sector_t src_lba, + u32 src_sectors) +{ + struct xcopy_pt_cmd *xpt_cmd; + struct se_cmd *se_cmd; + u32 length = (src_sectors * src_dev->dev_attrib.block_size); + int rc; + unsigned char cdb[16]; + bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); + + xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); + if (!xpt_cmd) { + pr_err("Unable to allocate xcopy_pt_cmd\n"); + return -ENOMEM; + } + init_completion(&xpt_cmd->xpt_passthrough_sem); + se_cmd = &xpt_cmd->se_cmd; + + memset(&cdb[0], 0, 16); + cdb[0] = READ_16; + put_unaligned_be64(src_lba, &cdb[2]); + put_unaligned_be32(src_sectors, &cdb[10]); + pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", + (unsigned long long)src_lba, src_sectors, length); + + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, + DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); + xop->src_pt_cmd = xpt_cmd; + + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], + remote_port, true); + if (rc < 0) { + transport_generic_free_cmd(se_cmd, 0); + return rc; + } + + xop->xop_data_sg = se_cmd->t_data_sg; + xop->xop_data_nents = se_cmd->t_data_nents; + pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" + " memory\n", xop->xop_data_sg, xop->xop_data_nents); + + rc = target_xcopy_issue_pt_cmd(xpt_cmd); + if (rc < 0) { + transport_generic_free_cmd(se_cmd, 0); + return rc; + } + /* + * Clear off the allocated t_data_sg, that has been saved for + * zero-copy WRITE submission reuse in struct xcopy_op.. + */ + se_cmd->t_data_sg = NULL; + se_cmd->t_data_nents = 0; + + return 0; +} + +static int target_xcopy_write_destination( + struct se_cmd *ec_cmd, + struct xcopy_op *xop, + struct se_device *dst_dev, + sector_t dst_lba, + u32 dst_sectors) +{ + struct xcopy_pt_cmd *xpt_cmd; + struct se_cmd *se_cmd; + u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); + int rc; + unsigned char cdb[16]; + bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); + + xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); + if (!xpt_cmd) { + pr_err("Unable to allocate xcopy_pt_cmd\n"); + return -ENOMEM; + } + init_completion(&xpt_cmd->xpt_passthrough_sem); + se_cmd = &xpt_cmd->se_cmd; + + memset(&cdb[0], 0, 16); + cdb[0] = WRITE_16; + put_unaligned_be64(dst_lba, &cdb[2]); + put_unaligned_be32(dst_sectors, &cdb[10]); + pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", + (unsigned long long)dst_lba, dst_sectors, length); + + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, + DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); + xop->dst_pt_cmd = xpt_cmd; + + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], + remote_port, false); + if (rc < 0) { + struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; + /* + * If the failure happened before the t_mem_list hand-off in + * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that + * core releases this memory on error during X-COPY WRITE I/O. + */ + src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + src_cmd->t_data_sg = xop->xop_data_sg; + src_cmd->t_data_nents = xop->xop_data_nents; + + transport_generic_free_cmd(se_cmd, 0); + return rc; + } + + rc = target_xcopy_issue_pt_cmd(xpt_cmd); + if (rc < 0) { + se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + transport_generic_free_cmd(se_cmd, 0); + return rc; + } + + return 0; +} + +static void target_xcopy_do_work(struct work_struct *work) +{ + struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); + struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; + struct se_cmd *ec_cmd = xop->xop_se_cmd; + sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; + unsigned int max_sectors; + int rc; + unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; + + end_lba = src_lba + nolb; + /* + * Break up XCOPY I/O into hw_max_sectors sized I/O based on the + * smallest max_sectors between src_dev + dev_dev, or + */ + max_sectors = min(src_dev->dev_attrib.hw_max_sectors, + dst_dev->dev_attrib.hw_max_sectors); + max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); + + max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); + + pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", + nolb, max_nolb, (unsigned long long)end_lba); + pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", + (unsigned long long)src_lba, (unsigned long long)dst_lba); + + while (src_lba < end_lba) { + cur_nolb = min(nolb, max_nolb); + + pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," + " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); + + rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); + if (rc < 0) + goto out; + + src_lba += cur_nolb; + pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", + (unsigned long long)src_lba); + + pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," + " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); + + rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, + dst_lba, cur_nolb); + if (rc < 0) { + transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); + goto out; + } + + dst_lba += cur_nolb; + pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", + (unsigned long long)dst_lba); + + copied_nolb += cur_nolb; + nolb -= cur_nolb; + + transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); + xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + + transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); + } + + xcopy_pt_undepend_remotedev(xop); + kfree(xop); + + pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", + (unsigned long long)src_lba, (unsigned long long)dst_lba); + pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", + copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); + + pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); + target_complete_cmd(ec_cmd, SAM_STAT_GOOD); + return; + +out: + xcopy_pt_undepend_remotedev(xop); + kfree(xop); + + pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); +} + +sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) +{ + struct se_device *dev = se_cmd->se_dev; + struct xcopy_op *xop = NULL; + unsigned char *p = NULL, *seg_desc; + unsigned int list_id, list_id_usage, sdll, inline_dl, sa; + sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; + int rc; + unsigned short tdll; + + if (!dev->dev_attrib.emulate_3pc) { + pr_err("EXTENDED_COPY operation explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + sa = se_cmd->t_task_cdb[1] & 0x1f; + if (sa != 0x00) { + pr_err("EXTENDED_COPY(LID4) not supported\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); + if (!xop) { + pr_err("Unable to allocate xcopy_op\n"); + return TCM_OUT_OF_RESOURCES; + } + xop->xop_se_cmd = se_cmd; + + p = transport_kmap_data_sg(se_cmd); + if (!p) { + pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); + kfree(xop); + return TCM_OUT_OF_RESOURCES; + } + + list_id = p[0]; + list_id_usage = (p[1] & 0x18) >> 3; + + /* + * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH + */ + tdll = get_unaligned_be16(&p[2]); + sdll = get_unaligned_be32(&p[8]); + + inline_dl = get_unaligned_be32(&p[12]); + if (inline_dl != 0) { + pr_err("XCOPY with non zero inline data length\n"); + goto out; + } + + pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, + tdll, sdll, inline_dl); + + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); + if (rc <= 0) + goto out; + + if (xop->src_dev->dev_attrib.block_size != + xop->dst_dev->dev_attrib.block_size) { + pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" + " block_size: %u currently unsupported\n", + xop->src_dev->dev_attrib.block_size, + xop->dst_dev->dev_attrib.block_size); + xcopy_pt_undepend_remotedev(xop); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, + rc * XCOPY_TARGET_DESC_LEN); + seg_desc = &p[16]; + seg_desc += (rc * XCOPY_TARGET_DESC_LEN); + + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); + if (rc <= 0) { + xcopy_pt_undepend_remotedev(xop); + goto out; + } + transport_kunmap_data_sg(se_cmd); + + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, + rc * XCOPY_SEGMENT_DESC_LEN); + INIT_WORK(&xop->xop_work, target_xcopy_do_work); + queue_work(xcopy_wq, &xop->xop_work); + return TCM_NO_SENSE; + +out: + if (p) + transport_kunmap_data_sg(se_cmd); + kfree(xop); + return ret; +} + +static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) +{ + unsigned char *p; + + p = transport_kmap_data_sg(se_cmd); + if (!p) { + pr_err("transport_kmap_data_sg failed in" + " target_rcr_operating_parameters\n"); + return TCM_OUT_OF_RESOURCES; + } + + if (se_cmd->data_length < 54) { + pr_err("Receive Copy Results Op Parameters length" + " too small: %u\n", se_cmd->data_length); + transport_kunmap_data_sg(se_cmd); + return TCM_INVALID_CDB_FIELD; + } + /* + * Set SNLID=1 (Supports no List ID) + */ + p[4] = 0x1; + /* + * MAXIMUM TARGET DESCRIPTOR COUNT + */ + put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); + /* + * MAXIMUM SEGMENT DESCRIPTOR COUNT + */ + put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); + /* + * MAXIMUM DESCRIPTOR LIST LENGTH + */ + put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); + /* + * MAXIMUM SEGMENT LENGTH + */ + put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); + /* + * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) + */ + put_unaligned_be32(0x0, &p[20]); + /* + * HELD DATA LIMIT + */ + put_unaligned_be32(0x0, &p[24]); + /* + * MAXIMUM STREAM DEVICE TRANSFER SIZE + */ + put_unaligned_be32(0x0, &p[28]); + /* + * TOTAL CONCURRENT COPIES + */ + put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); + /* + * MAXIMUM CONCURRENT COPIES + */ + p[36] = RCR_OP_MAX_CONCURR_COPIES; + /* + * DATA SEGMENT GRANULARITY (log 2) + */ + p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; + /* + * INLINE DATA GRANULARITY log 2) + */ + p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; + /* + * HELD DATA GRANULARITY + */ + p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; + /* + * IMPLEMENTED DESCRIPTOR LIST LENGTH + */ + p[43] = 0x2; + /* + * List of implemented descriptor type codes (ordered) + */ + p[44] = 0x02; /* Copy Block to Block device */ + p[45] = 0xe4; /* Identification descriptor target descriptor */ + + /* + * AVAILABLE DATA (n-3) + */ + put_unaligned_be32(42, &p[0]); + + transport_kunmap_data_sg(se_cmd); + target_complete_cmd(se_cmd, GOOD); + + return TCM_NO_SENSE; +} + +sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) +{ + unsigned char *cdb = &se_cmd->t_task_cdb[0]; + int sa = (cdb[1] & 0x1f), list_id = cdb[2]; + sense_reason_t rc = TCM_NO_SENSE; + + pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" + " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); + + if (list_id != 0) { + pr_err("Receive Copy Results with non zero list identifier" + " not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + switch (sa) { + case RCR_SA_OPERATING_PARAMETERS: + rc = target_rcr_operating_parameters(se_cmd); + break; + case RCR_SA_COPY_STATUS: + case RCR_SA_RECEIVE_DATA: + case RCR_SA_FAILED_SEGMENT_DETAILS: + default: + pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); + return TCM_INVALID_CDB_FIELD; + } + + return rc; +} diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h new file mode 100644 index 00000000000..700a981c7b4 --- /dev/null +++ b/drivers/target/target_core_xcopy.h @@ -0,0 +1,62 @@ +#define XCOPY_TARGET_DESC_LEN 32 +#define XCOPY_SEGMENT_DESC_LEN 28 +#define XCOPY_NAA_IEEE_REGEX_LEN 16 +#define XCOPY_MAX_SECTORS 1024 + +enum xcopy_origin_list { + XCOL_SOURCE_RECV_OP = 0x01, + XCOL_DEST_RECV_OP = 0x02, +}; + +struct xcopy_pt_cmd; + +struct xcopy_op { + int op_origin; + + struct se_cmd *xop_se_cmd; + struct se_device *src_dev; + unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct se_device *dst_dev; + unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + + sector_t src_lba; + sector_t dst_lba; + unsigned short stdi; + unsigned short dtdi; + unsigned short nolb; + unsigned int dbl; + + struct xcopy_pt_cmd *src_pt_cmd; + struct xcopy_pt_cmd *dst_pt_cmd; + + u32 xop_data_nents; + struct scatterlist *xop_data_sg; + struct work_struct xop_work; +}; + +/* + * Receive Copy Results Sevice Actions + */ +#define RCR_SA_COPY_STATUS 0x00 +#define RCR_SA_RECEIVE_DATA 0x01 +#define RCR_SA_OPERATING_PARAMETERS 0x03 +#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04 + +/* + * Receive Copy Results defs for Operating Parameters + */ +#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2 +#define RCR_OP_MAX_SG_DESC_COUNT 0x1 +#define RCR_OP_MAX_DESC_LIST_LEN 1024 +#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */ +#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */ +#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */ +#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */ +#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ +#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ + +extern int target_xcopy_setup_pt(void); +extern void target_xcopy_release_pt(void); +extern sense_reason_t target_do_xcopy(struct se_cmd *); +extern sense_reason_t target_do_receive_copy_results(struct se_cmd *); diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index eea69358ced..a0bcfd3e7e7 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -22,6 +22,7 @@ #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ #define FT_TPG_NAMELEN 32 /* max length of TPG name */ #define FT_LUN_NAMELEN 32 /* max length of LUN name */ +#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */ struct ft_transport_id { __u8 format; @@ -93,20 +94,19 @@ struct ft_lun { */ struct ft_tpg { u32 index; - struct ft_lport_acl *lport_acl; + struct ft_lport_wwn *lport_wwn; struct ft_tport *tport; /* active tport or NULL */ - struct list_head list; /* linkage in ft_lport_acl tpg_list */ struct list_head lun_list; /* head of LUNs */ struct se_portal_group se_tpg; struct workqueue_struct *workqueue; }; -struct ft_lport_acl { +struct ft_lport_wwn { u64 wwpn; char name[FT_NAMELEN]; - struct list_head list; - struct list_head tpg_list; - struct se_wwn fc_lport_wwn; + struct list_head ft_wwn_node; + struct ft_tpg *tpg; + struct se_wwn se_wwn; }; /* @@ -127,7 +127,6 @@ struct ft_cmd { u32 sg_cnt; /* No. of item in scatterlist */ }; -extern struct list_head ft_lport_list; extern struct mutex ft_lport_lock; extern struct fc4_prov ft_prov; extern struct target_fabric_configfs *ft_configfs; @@ -161,7 +160,8 @@ int ft_write_pending(struct se_cmd *); int ft_write_pending_status(struct se_cmd *); u32 ft_get_task_tag(struct se_cmd *); int ft_get_cmd_state(struct se_cmd *); -int ft_queue_tm_resp(struct se_cmd *); +void ft_queue_tm_resp(struct se_cmd *); +void ft_aborted_task(struct se_cmd *); /* * other internal functions. diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index b406f178ff3..be0c0d08c56 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -28,6 +28,7 @@ #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> +#include <linux/percpu_ida.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; + struct ft_sess *sess; if (!cmd) return; + sess = cmd->sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ - kfree(cmd); + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ft_sess_put(sess); /* undo get from lookup at recv */ } void ft_release_cmd(struct se_cmd *se_cmd) @@ -125,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd) struct fc_lport *lport; struct fc_exch *ep; size_t len; + int rc; if (cmd->aborted) return 0; @@ -134,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd) len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { - /* XXX shouldn't just drop it - requeue and retry? */ - return 0; + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; } + fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; @@ -167,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd) fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); - lport->tt.seq_send(lport, cmd->seq, fp); + rc = lport->tt.seq_send(lport, cmd->seq, fp); + if (rc) { + pr_info_ratelimited("%s: Failed to send response frame %p, " + "xid <0x%x>\n", __func__, fp, ep->xid); + /* + * Generate a TASK_SET_FULL status to notify the initiator + * to reduce it's queue_depth after the se_cmd response has + * been re-queued by target-core. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; + } lport->tt.exch_done(cmd->seq); return 0; } @@ -394,14 +410,14 @@ static void ft_send_tm(struct ft_cmd *cmd) /* * Send status from completed task management request. */ -int ft_queue_tm_resp(struct se_cmd *se_cmd) +void ft_queue_tm_resp(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct se_tmr_req *tmr = se_cmd->se_tmr_req; enum fcp_resp_rsp_codes code; if (cmd->aborted) - return 0; + return; switch (tmr->response) { case TMR_FUNCTION_COMPLETE: code = FCP_TMF_CMPL; @@ -413,10 +429,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) code = FCP_TMF_REJECTED; break; case TMR_TASK_DOES_NOT_EXIST: - case TMR_TASK_STILL_ALLEGIANT: - case TMR_TASK_FAILOVER_NOT_SUPPORTED: case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: - case TMR_FUNCTION_AUTHORIZATION_FAILED: default: code = FCP_TMF_FAILED; break; @@ -424,7 +437,11 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) pr_debug("tmr fn %d resp %d fcp code %d\n", tmr->function, tmr->response, code); ft_send_resp_code(cmd, code); - return 0; +} + +void ft_aborted_task(struct se_cmd *se_cmd) +{ + return; } static void ft_send_work(struct work_struct *work); @@ -436,14 +453,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) { struct ft_cmd *cmd; struct fc_lport *lport = sess->tport->lport; + struct se_session *se_sess = sess->se_sess; + int tag; - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); - if (!cmd) + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); + if (tag < 0) goto busy; + + cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct ft_cmd)); + + cmd->se_cmd.map_tag = tag; cmd->sess = sess; cmd->seq = lport->tt.seq_assign(lport, fp); if (!cmd->seq) { - kfree(cmd); + percpu_ida_free(&se_sess->sess_tag_pool, tag); goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index b74feb0d513..efdcb9663a1 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -50,7 +50,7 @@ struct target_fabric_configfs *ft_configfs; -LIST_HEAD(ft_lport_list); +static LIST_HEAD(ft_wwn_list); DEFINE_MUTEX(ft_lport_lock); unsigned int ft_debug_logging; @@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) return found; } -struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) +static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) { struct ft_node_acl *acl; @@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg( struct config_group *group, const char *name) { - struct ft_lport_acl *lacl; + struct ft_lport_wwn *ft_wwn; struct ft_tpg *tpg; struct workqueue_struct *wq; unsigned long index; @@ -311,15 +311,24 @@ static struct se_portal_group *ft_add_tpg( */ if (strstr(name, "tpgt_") != name) return NULL; - if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX) + + ret = kstrtoul(name + 5, 10, &index); + if (ret) + return NULL; + if (index > UINT_MAX) return NULL; - lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); + if ((index != 1)) { + pr_err("Error, a single TPG=1 is used for HW port mappings\n"); + return ERR_PTR(-ENOSYS); + } + + ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn); tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); if (!tpg) return NULL; tpg->index = index; - tpg->lport_acl = lacl; + tpg->lport_wwn = ft_wwn; INIT_LIST_HEAD(&tpg->lun_list); wq = alloc_workqueue("tcm_fc", 0, 1); @@ -338,7 +347,7 @@ static struct se_portal_group *ft_add_tpg( tpg->workqueue = wq; mutex_lock(&ft_lport_lock); - list_add_tail(&tpg->list, &lacl->tpg_list); + ft_wwn->tpg = tpg; mutex_unlock(&ft_lport_lock); return &tpg->se_tpg; @@ -347,6 +356,7 @@ static struct se_portal_group *ft_add_tpg( static void ft_del_tpg(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + struct ft_lport_wwn *ft_wwn = tpg->lport_wwn; pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); @@ -357,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) synchronize_rcu(); mutex_lock(&ft_lport_lock); - list_del(&tpg->list); + ft_wwn->tpg = NULL; if (tpg->tport) { tpg->tport->tpg = NULL; tpg->tport = NULL; @@ -376,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) */ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) { - struct ft_lport_acl *lacl; - struct ft_tpg *tpg; + struct ft_lport_wwn *ft_wwn; - list_for_each_entry(lacl, &ft_lport_list, list) { - if (lacl->wwpn == lport->wwpn) { - list_for_each_entry(tpg, &lacl->tpg_list, list) - return tpg; /* XXX for now return first entry */ - return NULL; - } + list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (ft_wwn->wwpn == lport->wwpn) + return ft_wwn->tpg; } return NULL; } @@ -397,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) * Add lport to allowed config. * The name is the WWPN in lower-case ASCII, colon-separated bytes. */ -static struct se_wwn *ft_add_lport( +static struct se_wwn *ft_add_wwn( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { - struct ft_lport_acl *lacl; - struct ft_lport_acl *old_lacl; + struct ft_lport_wwn *ft_wwn; + struct ft_lport_wwn *old_ft_wwn; u64 wwpn; - pr_debug("add lport %s\n", name); + pr_debug("add wwn %s\n", name); if (ft_parse_wwn(name, &wwpn, 1) < 0) return NULL; - lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); - if (!lacl) + ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL); + if (!ft_wwn) return NULL; - lacl->wwpn = wwpn; - INIT_LIST_HEAD(&lacl->tpg_list); + ft_wwn->wwpn = wwpn; mutex_lock(&ft_lport_lock); - list_for_each_entry(old_lacl, &ft_lport_list, list) { - if (old_lacl->wwpn == wwpn) { + list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (old_ft_wwn->wwpn == wwpn) { mutex_unlock(&ft_lport_lock); - kfree(lacl); + kfree(ft_wwn); return NULL; } } - list_add_tail(&lacl->list, &ft_lport_list); - ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); + list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list); + ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn); mutex_unlock(&ft_lport_lock); - return &lacl->fc_lport_wwn; + return &ft_wwn->se_wwn; } -static void ft_del_lport(struct se_wwn *wwn) +static void ft_del_wwn(struct se_wwn *wwn) { - struct ft_lport_acl *lacl = container_of(wwn, - struct ft_lport_acl, fc_lport_wwn); + struct ft_lport_wwn *ft_wwn = container_of(wwn, + struct ft_lport_wwn, se_wwn); - pr_debug("del lport %s\n", lacl->name); + pr_debug("del wwn %s\n", ft_wwn->name); mutex_lock(&ft_lport_lock); - list_del(&lacl->list); + list_del(&ft_wwn->ft_wwn_node); mutex_unlock(&ft_lport_lock); - kfree(lacl); + kfree(ft_wwn); } static ssize_t ft_wwn_show_attr_version( @@ -467,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; - return tpg->lport_acl->name; + return tpg->lport_wwn->name; } static u16 ft_get_tag(struct se_portal_group *se_tpg) @@ -532,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = { .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, + .aborted_task = ft_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c */ - .fabric_make_wwn = &ft_add_lport, - .fabric_drop_wwn = &ft_del_lport, + .fabric_make_wwn = &ft_add_wwn, + .fabric_drop_wwn = &ft_del_wwn, .fabric_make_tpg = &ft_add_tpg, .fabric_drop_tpg = &ft_del_tpg, .fabric_post_link = NULL, @@ -548,7 +554,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .fabric_drop_nodeacl = &ft_del_acl, }; -int ft_register_configfs(void) +static int ft_register_configfs(void) { struct target_fabric_configfs *fabric; int ret; @@ -567,16 +573,16 @@ int ft_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = ft_nacl_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * register the fabric for use within TCM */ @@ -595,7 +601,7 @@ int ft_register_configfs(void) return 0; } -void ft_deregister_configfs(void) +static void ft_deregister_configfs(void) { if (!ft_configfs) return; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index b6fd4cf4284..97b486c3dda 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) if (cmd->aborted) return 0; + + if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) + goto queue_status; + ep = fc_seq_exch(cmd->seq); lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); @@ -103,6 +107,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd) use_sg = !(remaining % 4); while (remaining) { + struct fc_seq *seq = cmd->seq; + + if (!seq) { + pr_debug("%s: Command aborted, xid 0x%x\n", + __func__, ep->xid); + break; + } if (!mem_len) { sg = sg_next(sg); mem_len = min((size_t)sg->length, remaining); @@ -169,16 +180,25 @@ int ft_queue_data_in(struct se_cmd *se_cmd) f_ctl |= FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, fh_off); - error = lport->tt.seq_send(lport, cmd->seq, fp); + error = lport->tt.seq_send(lport, seq, fp); if (error) { - /* XXX For now, initiator will retry */ - pr_err_ratelimited("%s: Failed to send frame %p, " + pr_info_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, remaining, lport->lso_max); + /* + * Go ahead and set TASK_SET_FULL status ignoring the + * rest of the DataIN, and immediately attempt to + * send the response via ft_queue_status() in order + * to notify the initiator that it should reduce it's + * per LUN queue_depth. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + break; } } +queue_status: return ft_queue_status(se_cmd); } diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 6659dd36e80..21ce50880c7 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *); * Lookup or allocate target local port. * Caller holds ft_lport_lock. */ -static struct ft_tport *ft_tport_create(struct fc_lport *lport) +static struct ft_tport *ft_tport_get(struct fc_lport *lport) { struct ft_tpg *tpg; struct ft_tport *tport; @@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport) if (tport) { tport->tpg = tpg; + tpg->tport = tport; return tport; } @@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport) void ft_lport_add(struct fc_lport *lport, void *arg) { mutex_lock(&ft_lport_lock); - ft_tport_create(lport); + ft_tport_get(lport); mutex_unlock(&ft_lport_lock); } @@ -169,7 +170,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) { struct ft_tport *tport; struct hlist_head *head; - struct hlist_node *pos; struct ft_sess *sess; rcu_read_lock(); @@ -178,7 +178,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) goto out; head = &tport->hash[ft_sess_hash(port_id)]; - hlist_for_each_entry_rcu(sess, pos, head, hash) { + hlist_for_each_entry_rcu(sess, head, hash) { if (sess->port_id == port_id) { kref_get(&sess->kref); rcu_read_unlock(); @@ -201,10 +201,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, { struct ft_sess *sess; struct hlist_head *head; - struct hlist_node *pos; head = &tport->hash[ft_sess_hash(port_id)]; - hlist_for_each_entry_rcu(sess, pos, head, hash) + hlist_for_each_entry_rcu(sess, head, hash) if (sess->port_id == port_id) return sess; @@ -212,7 +211,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, if (!sess) return NULL; - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, + sizeof(struct ft_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; @@ -253,11 +254,10 @@ static void ft_sess_unhash(struct ft_sess *sess) static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) { struct hlist_head *head; - struct hlist_node *pos; struct ft_sess *sess; head = &tport->hash[ft_sess_hash(port_id)]; - hlist_for_each_entry_rcu(sess, pos, head, hash) { + hlist_for_each_entry_rcu(sess, head, hash) { if (sess->port_id == port_id) { ft_sess_unhash(sess); return sess; @@ -273,12 +273,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) static void ft_sess_delete_all(struct ft_tport *tport) { struct hlist_head *head; - struct hlist_node *pos; struct ft_sess *sess; for (head = tport->hash; head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { - hlist_for_each_entry_rcu(sess, pos, head, hash) { + hlist_for_each_entry_rcu(sess, head, hash) { ft_sess_unhash(sess); transport_deregister_session_configfs(sess->se_sess); ft_sess_put(sess); /* release from table */ @@ -353,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, struct ft_node_acl *acl; u32 fcp_parm; - tport = ft_tport_create(rdata->local_port); + tport = ft_tport_get(rdata->local_port); if (!tport) goto not_target; /* not a target for this local port */ @@ -432,19 +431,12 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, return ret; } -static void ft_sess_rcu_free(struct rcu_head *rcu) -{ - struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); - - kfree(sess); -} - static void ft_sess_free(struct kref *kref) { struct ft_sess *sess = container_of(kref, struct ft_sess, kref); transport_deregister_session(sess->se_sess); - call_rcu(&sess->rcu, ft_sess_rcu_free); + kfree_rcu(sess, rcu); } void ft_sess_put(struct ft_sess *sess) |
