diff options
Diffstat (limited to 'drivers/target')
43 files changed, 2666 insertions, 591 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 18303686eb5..dc2d84ac5a0 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -3,6 +3,7 @@ menuconfig TARGET_CORE tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" depends on SCSI && BLOCK select CONFIGFS_FS + select CRC_T10DIF default n help Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled @@ -13,6 +14,7 @@ if TARGET_CORE config TCM_IBLOCK tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" + select BLK_DEV_INTEGRITY help Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered access to Linux/Block devices using BIO diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 00867190413..1f4c794f5fc 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -52,7 +52,7 @@ static LIST_HEAD(g_tiqn_list); static LIST_HEAD(g_np_list); static DEFINE_SPINLOCK(tiqn_lock); -static DEFINE_SPINLOCK(np_lock); +static DEFINE_MUTEX(np_lock); static struct idr tiqn_idr; struct idr sess_idr; @@ -300,13 +300,16 @@ bool iscsit_check_np_match( port = ntohs(sock_in->sin_port); } - if ((ip_match == true) && (np->np_port == port) && + if (ip_match && (np->np_port == port) && (np->np_network_transport == network_transport)) return true; return false; } +/* + * Called with mutex np_lock held + */ static struct iscsi_np *iscsit_get_np( struct __kernel_sockaddr_storage *sockaddr, int network_transport) @@ -314,29 +317,26 @@ static struct iscsi_np *iscsit_get_np( struct iscsi_np *np; bool match; - spin_lock_bh(&np_lock); list_for_each_entry(np, &g_np_list, np_list) { - spin_lock(&np->np_thread_lock); + spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); continue; } match = iscsit_check_np_match(sockaddr, np, network_transport); - if (match == true) { + if (match) { /* * Increment the np_exports reference count now to * prevent iscsit_del_np() below from being called * while iscsi_tpg_add_network_portal() is called. */ np->np_exports++; - spin_unlock(&np->np_thread_lock); - spin_unlock_bh(&np_lock); + spin_unlock_bh(&np->np_thread_lock); return np; } - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); } - spin_unlock_bh(&np_lock); return NULL; } @@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np( struct sockaddr_in6 *sock_in6; struct iscsi_np *np; int ret; + + mutex_lock(&np_lock); + /* * Locate the existing struct iscsi_np if already active.. */ np = iscsit_get_np(sockaddr, network_transport); - if (np) + if (np) { + mutex_unlock(&np_lock); return np; + } np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); if (!np) { pr_err("Unable to allocate memory for struct iscsi_np\n"); + mutex_unlock(&np_lock); return ERR_PTR(-ENOMEM); } @@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np( ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } @@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np( pr_err("Unable to create kthread: iscsi_np\n"); ret = PTR_ERR(np->np_thread); kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } /* @@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np( * point because iscsi_np has not been added to g_np_list yet. */ np->np_exports = 1; + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; - spin_lock_bh(&np_lock); list_add_tail(&np->np_list, &g_np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); @@ -452,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np) spin_lock_bh(&np->np_thread_lock); np->np_exports--; if (np->np_exports) { + np->enabled = true; spin_unlock_bh(&np->np_thread_lock); return 0; } @@ -470,9 +479,9 @@ int iscsit_del_np(struct iscsi_np *np) np->np_transport->iscsit_free_np(np); - spin_lock_bh(&np_lock); + mutex_lock(&np_lock); list_del(&np->np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); @@ -491,6 +500,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) return 0; } +static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + __iscsit_free_cmd(cmd, scsi_cmd, true); +} + +static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + static struct iscsit_transport iscsi_target_transport = { .name = "iSCSI/TCP", .transport_type = ISCSI_TCP, @@ -505,6 +531,8 @@ static struct iscsit_transport iscsi_target_transport = { .iscsit_response_queue = iscsit_response_queue, .iscsit_queue_data_in = iscsit_queue_rsp, .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, }; static int __init iscsi_target_init_module(void) @@ -622,7 +650,7 @@ static int iscsit_add_reject( { struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) return -1; @@ -777,7 +805,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) spin_unlock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); iscsit_free_cmd(cmd, false); } } @@ -1093,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, /* * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. */ - if (dump_payload == true) + if (dump_payload) goto after_immediate_data; immed_ret = iscsit_handle_immediate_data(cmd, hdr, @@ -1281,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, if (cmd->data_direction != DMA_TO_DEVICE) { pr_err("Command ITT: 0x%08x received DataOUT for a" " NON-WRITE command.\n", cmd->init_task_tag); - return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); + return iscsit_dump_data_payload(conn, payload_length, 1); } se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); @@ -1495,6 +1523,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, { u32 payload_length = ntoh24(hdr->dlength); + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { + pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { pr_err("NOPOUT ITT is reserved, but Immediate Bit is" " not set, protocol error.\n"); @@ -1556,7 +1594,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * Initiator is expecting a NopIN ping reply.. */ if (hdr->itt != RESERVED_ITT) { - BUG_ON(!cmd); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); @@ -2460,6 +2500,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) { struct iscsi_cmd *cmd; struct iscsi_conn *conn_p; + bool found = false; /* * Only send a Asynchronous Message on connections whos network @@ -2468,14 +2509,15 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { iscsit_inc_conn_usage_count(conn_p); + found = true; break; } } - if (!conn_p) + if (!found) return; - cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); if (!cmd) { iscsit_dec_conn_usage_count(conn_p); return; @@ -3348,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) #define SENDTARGETS_BUF_LIMIT 32768U -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) +static int +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, + enum iscsit_transport_type network_transport) { char *payload = NULL; struct iscsi_conn *conn = cmd->conn; @@ -3425,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (np->np_network_transport != network_transport) + continue; + if (!target_name_printed) { len = sprintf(buf, "TargetName=%s", tiqn->tiqn); @@ -3443,10 +3490,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", - (inaddr_any == false) ? - np->np_ip : conn->local_ip, - (inaddr_any == false) ? - np->np_port : conn->local_port, + inaddr_any ? conn->local_ip : np->np_ip, + inaddr_any ? conn->local_port : np->np_port, tpg->tpgt); len += 1; @@ -3478,11 +3523,12 @@ eob: int iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, - struct iscsi_text_rsp *hdr) + struct iscsi_text_rsp *hdr, + enum iscsit_transport_type network_transport) { int text_length, padding; - text_length = iscsit_build_sendtargets_response(cmd); + text_length = iscsit_build_sendtargets_response(cmd, network_transport); if (text_length < 0) return text_length; @@ -3520,7 +3566,7 @@ static int iscsit_send_text_rsp( u32 tx_size = 0; int text_length, iov_count = 0, rc; - rc = iscsit_build_text_rsp(cmd, conn, hdr); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); if (rc < 0) return rc; @@ -3700,7 +3746,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state break; case ISTATE_REMOVE: spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, false); @@ -3951,7 +3997,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) switch (hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_SCSI_CMD: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; @@ -3963,28 +4009,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) case ISCSI_OP_NOOP_OUT: cmd = NULL; if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; } ret = iscsit_handle_nop_out(conn, cmd, buf); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); break; case ISCSI_OP_TEXT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_text_cmd(conn, cmd, buf); break; case ISCSI_OP_LOGOUT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; @@ -4143,7 +4189,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -4188,7 +4234,9 @@ int iscsit_close_connection( iscsit_stop_timers_for_cmds(conn); iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - iscsit_free_queue_reqs_for_conn(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); /* * During Connection recovery drop unacknowledged out of order @@ -4206,6 +4254,7 @@ int iscsit_close_connection( iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); } + iscsit_free_queue_reqs_for_conn(conn); /* * Handle decrementing session or connection usage count if diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index de77d9aa22c..ab4915c0d93 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -71,6 +71,40 @@ static void chap_gen_challenge( challenge_asciihex); } +static int chap_check_algorithm(const char *a_str) +{ + char *tmp, *orig, *token; + + tmp = kstrdup(a_str, GFP_KERNEL); + if (!tmp) { + pr_err("Memory allocation failed for CHAP_A temporary buffer\n"); + return CHAP_DIGEST_UNKNOWN; + } + orig = tmp; + + token = strsep(&tmp, "="); + if (!token) + goto out; + + if (strcmp(token, "CHAP_A")) { + pr_err("Unable to locate CHAP_A key\n"); + goto out; + } + while (token) { + token = strsep(&tmp, ","); + if (!token) + goto out; + + if (!strncmp(token, "5", 1)) { + pr_debug("Selected MD5 Algorithm\n"); + kfree(orig); + return CHAP_DIGEST_MD5; + } + } +out: + kfree(orig); + return CHAP_DIGEST_UNKNOWN; +} static struct iscsi_chap *chap_server_open( struct iscsi_conn *conn, @@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open( char *aic_str, unsigned int *aic_len) { + int ret; struct iscsi_chap *chap; if (!(auth->naf_flags & NAF_USERID_SET) || @@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open( return NULL; chap = conn->auth_protocol; - /* - * We only support MD5 MDA presently. - */ - if (strncmp(a_str, "CHAP_A=5", 8)) { - pr_err("CHAP_A is not MD5.\n"); + ret = chap_check_algorithm(a_str); + switch (ret) { + case CHAP_DIGEST_MD5: + pr_debug("[server] Got CHAP_A=5\n"); + /* + * Send back CHAP_A set to MD5. + */ + *aic_len = sprintf(aic_str, "CHAP_A=5"); + *aic_len += 1; + chap->digest_type = CHAP_DIGEST_MD5; + pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + break; + case CHAP_DIGEST_UNKNOWN: + default: + pr_err("Unsupported CHAP_A value\n"); return NULL; } - pr_debug("[server] Got CHAP_A=5\n"); - /* - * Send back CHAP_A set to MD5. - */ - *aic_len = sprintf(aic_str, "CHAP_A=5"); - *aic_len += 1; - chap->digest_type = CHAP_DIGEST_MD5; - pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); + /* * Set Identifier. */ @@ -136,7 +174,6 @@ static int chap_server_compute_md5( char *nr_out_ptr, unsigned int *nr_out_len) { - char *endptr; unsigned long id; unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; @@ -282,9 +319,14 @@ static int chap_server_compute_md5( } if (type == HEX) - id = simple_strtoul(&identifier[2], &endptr, 0); + ret = kstrtoul(&identifier[2], 0, &id); else - id = simple_strtoul(identifier, &endptr, 0); + ret = kstrtoul(identifier, 0, &id); + + if (ret < 0) { + pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); + goto out; + } if (id > 255) { pr_err("chap identifier: %lu greater than 255\n", id); goto out; @@ -313,6 +355,20 @@ static int chap_server_compute_md5( pr_err("Unable to convert incoming challenge\n"); goto out; } + if (challenge_len > 1024) { + pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); + goto out; + } + /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } /* * Generate CHAP_N and CHAP_R for mutual authentication. */ diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h index 2f463c09626..d22f7b96a06 100644 --- a/drivers/target/iscsi/iscsi_target_auth.h +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -1,6 +1,7 @@ #ifndef _ISCSI_CHAP_H_ #define _ISCSI_CHAP_H_ +#define CHAP_DIGEST_UNKNOWN 0 #define CHAP_DIGEST_MD5 5 #define CHAP_DIGEST_SHA 6 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 1c0088fe9e9..ae03f3e5de1 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR); */ DEF_TPG_ATTRIB(default_erl); TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_t10_pi + */ +DEF_TPG_ATTRIB(t10_pi); +TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_authentication.attr, @@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_prod_mode_write_protect.attr, &iscsi_tpg_attrib_demo_mode_discovery.attr, &iscsi_tpg_attrib_default_erl.attr, + &iscsi_tpg_attrib_t10_pi.attr, NULL, }; @@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd) iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); } +static void lio_aborted_task(struct se_cmd *se_cmd) +{ + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + + cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); +} + static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; @@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void) fabric->tf_ops.queue_data_in = &lio_queue_data_in; fabric->tf_ops.queue_status = &lio_queue_status; fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; + fabric->tf_ops.aborted_task = &lio_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c */ diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 48f7b3bf4e8..302eb3b7871 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -58,7 +58,8 @@ #define TA_DEMO_MODE_DISCOVERY 1 #define TA_DEFAULT_ERL 0 #define TA_CACHE_CORE_NPS 0 - +/* T10 protection information disabled by default */ +#define TA_DEFAULT_T10_PI 0 #define ISCSI_IOV_DATA_BUFFER 5 @@ -556,7 +557,7 @@ struct iscsi_conn { struct completion rx_half_close_comp; /* socket used by this connection */ struct socket *sock; - void (*orig_data_ready)(struct sock *, int); + void (*orig_data_ready)(struct sock *); void (*orig_state_change)(struct sock *); #define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_CLOSED 2 @@ -765,6 +766,7 @@ struct iscsi_tpg_attrib { u32 prod_mode_write_protect; u32 demo_mode_discovery; u32 default_erl; + u8 t10_pi; struct iscsi_portal_group *tpg; }; @@ -773,6 +775,7 @@ struct iscsi_np { int np_ip_proto; int np_sock_type; enum np_thread_state_table np_thread_state; + bool enabled; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; @@ -787,6 +790,7 @@ struct iscsi_np { void *np_context; struct iscsit_transport *np_transport; struct list_head np_list; + struct iscsi_tpg_np *tpg_np; } ____cacheline_aligned; struct iscsi_tpg_np { diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index e048d6439f4..cda4d80cfae 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -507,7 +507,9 @@ int iscsit_handle_status_snack( u32 last_statsn; int found_cmd; - if (conn->exp_statsn > begrun) { + if (!begrun) { + begrun = conn->exp_statsn; + } else if (conn->exp_statsn > begrun) { pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" " 0x%08x but already got ExpStatSN: 0x%08x on CID:" " %hu.\n", begrun, runlength, conn->exp_statsn, diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 33be1fb1df3..4ca8fd2a70d 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); return --cr->cmd_count; } @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_conn_node); to release the command to the + * list_del_init(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index e29279e6b57..5e71ac60941 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -249,6 +249,28 @@ static void iscsi_login_set_conn_values( mutex_unlock(&auth_id_lock); } +static __printf(2, 3) int iscsi_change_param_sprintf( + struct iscsi_conn *conn, + const char *fmt, ...) +{ + va_list args; + unsigned char buf[64]; + + memset(buf, 0, sizeof buf); + + va_start(args, fmt); + vsnprintf(buf, sizeof buf, fmt, args); + va_end(args); + + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + return 0; +} + /* * This is the leading connection of a new session, * or session reinstatement. @@ -259,6 +281,7 @@ static int iscsi_login_zero_tsih_s1( { struct iscsi_session *sess = NULL; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + enum target_prot_op sup_pro_ops; int ret; sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); @@ -320,8 +343,9 @@ static int iscsi_login_zero_tsih_s1( kfree(sess); return -ENOMEM; } + sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(sup_pro_ops); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -337,7 +361,6 @@ static int iscsi_login_zero_tsih_s2( { struct iscsi_node_attrib *na; struct iscsi_session *sess = conn->sess; - unsigned char buf[32]; bool iser = false; sess->tpg = conn->tpg; @@ -378,26 +401,16 @@ static int iscsi_login_zero_tsih_s2( * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } /* * Workaround for Initiators that have broken connection recovery logic. * * "We would really like to get rid of this." Linux-iSCSI.org team */ - memset(buf, 0, 32); - sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) return -1; - } if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) return -1; @@ -409,12 +422,9 @@ static int iscsi_login_zero_tsih_s2( unsigned long mrdsl, off; int rc; - sprintf(buf, "RDMAExtensions=Yes"); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes")) return -1; - } + /* * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for * Immediate Data + Unsolicitied Data-OUT if necessary.. @@ -434,7 +444,7 @@ static int iscsi_login_zero_tsih_s2( } off = mrdsl % PAGE_SIZE; if (!off) - return 0; + goto check_prot; if (mrdsl < PAGE_SIZE) mrdsl = PAGE_SIZE; @@ -444,11 +454,25 @@ static int iscsi_login_zero_tsih_s2( pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" " to PAGE_SIZE\n", mrdsl); - sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl)) return -1; + /* + * ISER currently requires that ImmediateData + Unsolicited + * Data be disabled when protection / signature MRs are enabled. + */ +check_prot: + if (sess->se_sess->sup_prot_ops & + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | + TARGET_PROT_DOUT_INSERT)) { + + if (iscsi_change_param_sprintf(conn, "ImmediateData=No")) + return -1; + + if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes")) + return -1; + + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" + " T10-PI enabled ISER session\n"); } } @@ -591,13 +615,8 @@ static int iscsi_login_non_zero_tsih_s2( * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } return iscsi_login_disable_FIM_keys(conn->param_list, conn); } @@ -982,6 +1001,7 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; + np->enabled = true; return 0; } @@ -1125,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) void iscsi_target_login_sess_out(struct iscsi_conn *conn, struct iscsi_np *np, bool zero_tsih, bool new_sess) { - if (new_sess == false) + if (!new_sess) goto old_sess_out; pr_err("iSCSI Login negotiation failed.\n"); @@ -1196,7 +1216,7 @@ old_sess_out: static int __iscsi_target_login_thread(struct iscsi_np *np) { u8 *buffer, zero_tsih = 0; - int ret = 0, rc, stop; + int ret = 0, rc; struct iscsi_conn *conn = NULL; struct iscsi_login *login; struct iscsi_portal_group *tpg = NULL; @@ -1210,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; complete(&np->np_restart_comp); + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; } else { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; } @@ -1402,10 +1425,8 @@ old_sess_out: } out: - stop = kthread_should_stop(); - /* Wait for another socket.. */ - if (!stop) - return 1; + return 1; + exit: iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); @@ -1422,7 +1443,7 @@ int iscsi_target_login_thread(void *arg) allow_signal(SIGINT); - while (!kthread_should_stop()) { + while (1) { ret = __iscsi_target_login_thread(np); /* * We break and exit here unless another sock_accept() call diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 83c965c6538..62a095f36bf 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -375,7 +375,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log return 0; } -static void iscsi_target_sk_data_ready(struct sock *sk, int count) +static void iscsi_target_sk_data_ready(struct sock *sk) { struct iscsi_conn *conn = sk->sk_user_data; bool rc; @@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk, int count) } rc = schedule_delayed_work(&conn->login_work, 0); - if (rc == false) { + if (!rc) { pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" " got false\n"); } @@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) state = (tpg->tpg_state == TPG_STATE_ACTIVE); spin_unlock(&tpg->tpg_state_lock); - if (state == false) { + if (!state) { pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); iscsi_target_restore_sock_callbacks(conn); iscsi_target_login_drop(conn, login); @@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) state = iscsi_target_sk_state_check(sk); read_unlock_bh(&sk->sk_callback_lock); - if (state == false) { + if (!state) { pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); iscsi_target_restore_sock_callbacks(conn); iscsi_target_login_drop(conn, login); @@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero( } goto do_auth; + } else if (!payload_length) { + pr_err("Initiator sent zero length security payload," + " login failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; } if (login->first_request) @@ -1192,7 +1198,7 @@ get_target: */ alloc_tags: tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); - tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 4d2e23fc76f..02f9de26f38 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate( if (!strcmp(param->name, AUTHMETHOD)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, HEADERDIGEST)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, DATADIGEST)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXCONNECTIONS)) { SET_PSTATE_NEGOTIATE(param); @@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, IMMEDIATEDATA)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { - if (iser == false) + if (!iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { continue; @@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate( } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, RDMAEXTENSIONS)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { - if (iser == true) + if (iser) SET_PSTATE_NEGOTIATE(param); } } @@ -1605,7 +1605,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { - pr_err("Unable to allocate memory for tmpbuf.\n"); + pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); return -1; } diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 39761837608..c3cb5c15efd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { spin_lock(&tpg->tpg_state_lock); - if (tpg->tpg_state == TPG_STATE_FREE) { + if (tpg->tpg_state != TPG_STATE_ACTIVE) { spin_unlock(&tpg->tpg_state_lock); continue; } @@ -184,10 +184,12 @@ static void iscsit_clear_tpg_np_login_thread( return; } + if (shutdown) + tpg_np->tpg_np->enabled = false; iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); } -void iscsit_clear_tpg_np_login_threads( +static void iscsit_clear_tpg_np_login_threads( struct iscsi_portal_group *tpg, bool shutdown) { @@ -225,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; a->default_erl = TA_DEFAULT_ERL; + a->t10_pi = TA_DEFAULT_T10_PI; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -273,8 +276,6 @@ int iscsit_tpg_del_portal_group( tpg->tpg_state = TPG_STATE_INACTIVE; spin_unlock(&tpg->tpg_state_lock); - iscsit_clear_tpg_np_login_threads(tpg, true); - if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { pr_err("Unable to delete iSCSI Target Portal Group:" " %hu while active sessions exist, and force=0\n", @@ -450,7 +451,7 @@ static bool iscsit_tpg_check_network_portal( match = iscsit_check_np_match(sockaddr, np, network_transport); - if (match == true) + if (match) break; } spin_unlock(&tpg->tpg_np_lock); @@ -472,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( if (!tpg_np_parent) { if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, - network_transport) == true) { + network_transport)) { pr_err("Network Portal: %s already exists on a" " different TPG on %s\n", ip_str, tpg->tpg_tiqn->tiqn); @@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( init_completion(&tpg_np->tpg_np_comp); kref_init(&tpg_np->tpg_np_kref); tpg_np->tpg_np = np; + np->tpg_np = tpg_np; tpg_np->tpg = tpg; spin_lock(&tpg->tpg_np_lock); @@ -858,3 +860,22 @@ int iscsit_ta_default_erl( return 0; } + +int iscsit_ta_t10_pi( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->t10_pi = flag; + pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:" + " %s\n", tpg->tpgt, (a->t10_pi) ? + "ON" : "OFF"); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index 213c0fc7fdc..e7265337bc4 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, struct iscsi_np *, struct iscsi_tpg_np **); extern int iscsit_get_tpg(struct iscsi_portal_group *); extern void iscsit_put_tpg(struct iscsi_portal_group *); -extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool); extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, @@ -39,5 +38,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); +extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 0819e688a39..fd90b28f1d9 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) * May be called from software interrupt (timer) context for allocating * iSCSI NopINs. */ -struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) +struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; int size, tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) + return NULL; + size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); memset(cmd, 0, size); @@ -702,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) } EXPORT_SYMBOL(iscsit_release_cmd); -static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, - bool check_queues) +void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, + bool check_queues) { struct iscsi_conn *conn = cmd->conn; @@ -926,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) u8 state; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); if (!cmd) return -1; @@ -1292,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta login->login_failed = 1; iscsit_collect_login_stats(conn, status_class, status_detail); + memset(&login->rsp[0], 0, ISCSI_HDR_LEN); + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; hdr->opcode = ISCSI_OP_LOGIN_RSP; hdr->status_class = status_class; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index e4fc34a02f5..a68508c4fec 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); @@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 1b41e677615..8c64b8776a9 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work) struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; struct scatterlist *sgl_bidi = NULL; - u32 sgl_bidi_count = 0; + u32 sgl_bidi_count = 0, transfer_length; int rc; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); @@ -212,12 +212,26 @@ static void tcm_loop_submission_work(struct work_struct *work) se_cmd->se_cmd_flags |= SCF_BIDI; } + + transfer_length = scsi_transfer_length(sc); + if (!scsi_prot_sg_count(sc) && + scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { + se_cmd->prot_pto = true; + /* + * loopback transport doesn't support + * WRITE_GENERATE, READ_STRIP protection + * information operations, go ahead unprotected. + */ + transfer_length = scsi_bufflen(sc); + } + rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - scsi_bufflen(sc), tcm_loop_sam_attr(sc), + transfer_length, tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), - sgl_bidi, sgl_bidi_count); + sgl_bidi, sgl_bidi_count, + scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); if (rc < 0) { set_host_byte(sc, DID_NO_CONNECT); goto out_done; @@ -225,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work) return; out_done: + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); sc->scsi_done(sc); return; } @@ -462,7 +477,7 @@ static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; - int error; + int error, host_prot; tl_hba = to_tcm_loop_hba(dev); @@ -486,6 +501,13 @@ static int tcm_loop_driver_probe(struct device *dev) sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(sh, host_prot); + scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); + error = scsi_add_host(sh, &tl_hba->dev); if (error) { pr_err("%s: scsi_add_host failed\n", __func__); @@ -907,6 +929,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) wake_up(&tl_tmr->tl_tmr_wait); } +static void tcm_loop_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) { switch (tl_hba->tl_proto_id) { @@ -934,7 +961,7 @@ static int tcm_loop_port_link( struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; atomic_inc(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Add Linux/SCSI struct scsi_device by HCTL */ @@ -969,7 +996,7 @@ static void tcm_loop_port_unlink( scsi_device_put(sd); atomic_dec(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } @@ -1001,7 +1028,7 @@ static int tcm_loop_make_nexus( /* * Initialize the struct se_session pointer */ - tl_nexus->se_sess = transport_init_session(); + tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); if (IS_ERR(tl_nexus->se_sess)) { ret = PTR_ERR(tl_nexus->se_sess); goto out; @@ -1228,7 +1255,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = { /* Start items for tcm_loop_naa_cit */ -struct se_portal_group *tcm_loop_make_naa_tpg( +static struct se_portal_group *tcm_loop_make_naa_tpg( struct se_wwn *wwn, struct config_group *group, const char *name) @@ -1273,7 +1300,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( return &tl_tpg->tl_se_tpg; } -void tcm_loop_drop_naa_tpg( +static void tcm_loop_drop_naa_tpg( struct se_portal_group *se_tpg) { struct se_wwn *wwn = se_tpg->se_tpg_wwn; @@ -1305,7 +1332,7 @@ void tcm_loop_drop_naa_tpg( /* Start items for tcm_loop_cit */ -struct se_wwn *tcm_loop_make_scsi_hba( +static struct se_wwn *tcm_loop_make_scsi_hba( struct target_fabric_configfs *tf, struct config_group *group, const char *name) @@ -1375,7 +1402,7 @@ out: return ERR_PTR(ret); } -void tcm_loop_drop_scsi_hba( +static void tcm_loop_drop_scsi_hba( struct se_wwn *wwn) { struct tcm_loop_hba *tl_hba = container_of(wwn, @@ -1475,6 +1502,7 @@ static int tcm_loop_register_configfs(void) fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; + fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 24884cac19c..e7e93727553 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create( return ERR_PTR(-ENOMEM); } - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { pr_err("failed to init se_session\n"); @@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) { } +static void sbp_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static int sbp_check_stop_free(struct se_cmd *se_cmd) { struct sbp_target_request *req = container_of(se_cmd, @@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { .queue_data_in = sbp_queue_data_in, .queue_status = sbp_queue_status, .queue_tm_rsp = sbp_queue_tm_rsp, + .aborted_task = sbp_aborted_task, .check_stop_free = sbp_check_stop_free, .fabric_make_wwn = sbp_make_tport, diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fdcee326bfb..fbc5ebb5f76 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -41,11 +41,14 @@ #include "target_core_alua.h" #include "target_core_ua.h" -static sense_reason_t core_alua_check_transition(int state, int *primary); +static sense_reason_t core_alua_check_transition(int state, int valid, + int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explicit, int offline); +static char *core_alua_dump_state(int state); + static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; @@ -55,6 +58,86 @@ static LIST_HEAD(lu_gps_list); struct t10_alua_lu_gp *default_lu_gp; /* + * REPORT REFERRALS + * + * See sbc3r35 section 5.23 + */ +sense_reason_t +target_emulate_report_referrals(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *map_mem; + unsigned char *buf; + u32 rd_len = 0, off; + + if (cmd->data_length < 4) { + pr_warn("REPORT REFERRALS allocation length %u too" + " small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + off = 4; + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + transport_kunmap_data_sg(cmd); + + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + int desc_num = off + 3; + int pg_num; + + off += 4; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); + off += 8; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); + off += 8; + rd_len += 20; + pg_num = 0; + list_for_each_entry(map_mem, &map->lba_map_mem_list, + lba_map_mem_list) { + int alua_state = map_mem->lba_map_mem_alua_state; + int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; + + if (cmd->data_length > off) + buf[off] = alua_state & 0x0f; + off += 2; + if (cmd->data_length > off) + buf[off] = (alua_pg_id >> 8) & 0xff; + off++; + if (cmd->data_length > off) + buf[off] = (alua_pg_id & 0xff); + off++; + rd_len += 4; + pg_num++; + } + if (cmd->data_length > desc_num) + buf[desc_num] = pg_num; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + put_unaligned_be16(rd_len, &buf[2]); + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, GOOD); + return 0; +} + +/* * REPORT_TARGET_PORT_GROUPS * * See spc4r17 section 6.27 @@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) unsigned char *ptr; sense_reason_t rc = TCM_NO_SENSE; u32 len = 4; /* Skip over RESERVED area in header */ - int alua_access_state, primary = 0; + int alua_access_state, primary = 0, valid_states; u16 tg_pt_id, rtpi; if (!l_port) @@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, &primary); + rc = core_alua_check_transition(alua_access_state, + valid_states, &primary); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -308,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) continue; atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -319,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -370,11 +455,26 @@ out: return rc; } -static inline int core_alua_state_nonoptimized( +static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) +{ + /* + * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; + * The ALUA additional sense code qualifier (ASCQ) is determined + * by the ALUA primary or secondary access state.. + */ + pr_debug("[%s]: ALUA TG Port not available, " + "SenseKey: NOT_READY, ASC/ASCQ: " + "0x04/0x%02x\n", + cmd->se_tfo->get_fabric_name(), alua_ascq); + + cmd->scsi_asc = 0x04; + cmd->scsi_ascq = alua_ascq; +} + +static inline void core_alua_state_nonoptimized( struct se_cmd *cmd, unsigned char *cdb, - int nonop_delay_msecs, - u8 *alua_ascq) + int nonop_delay_msecs) { /* * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked @@ -383,13 +483,85 @@ static inline int core_alua_state_nonoptimized( */ cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; cmd->alua_nonop_delay = nonop_delay_msecs; +} + +static inline int core_alua_state_lba_dependent( + struct se_cmd *cmd, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_device *dev = cmd->se_dev; + u64 segment_size, segment_mult, sectors, lba; + + /* Only need to check for cdb actually containing LBAs */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) + return 0; + + spin_lock(&dev->t10_alua.lba_map_lock); + segment_size = dev->t10_alua.lba_map_segment_size; + segment_mult = dev->t10_alua.lba_map_segment_multiplier; + sectors = cmd->data_length / dev->dev_attrib.block_size; + + lba = cmd->t_task_lba; + while (lba < cmd->t_task_lba + sectors) { + struct t10_alua_lba_map *cur_map = NULL, *map; + struct t10_alua_lba_map_member *map_mem; + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + u64 start_lba, last_lba; + u64 first_lba = map->lba_map_first_lba; + + if (segment_mult) { + u64 tmp = lba; + start_lba = do_div(tmp, segment_size * segment_mult); + + last_lba = first_lba + segment_size - 1; + if (start_lba >= first_lba && + start_lba <= last_lba) { + lba += segment_size; + cur_map = map; + break; + } + } else { + last_lba = map->lba_map_last_lba; + if (lba >= first_lba && lba <= last_lba) { + lba = last_lba + 1; + cur_map = map; + break; + } + } + } + if (!cur_map) { + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); + return 1; + } + list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, + lba_map_mem_list) { + if (map_mem->lba_map_mem_alua_pg_id != + tg_pt_gp->tg_pt_gp_id) + continue; + switch(map_mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_STANDBY: + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); + return 1; + case ALUA_ACCESS_STATE_UNAVAILABLE: + spin_unlock(&dev->t10_alua.lba_map_lock); + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); + return 1; + default: + break; + } + } + } + spin_unlock(&dev->t10_alua.lba_map_lock); return 0; } static inline int core_alua_state_standby( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by @@ -404,13 +576,22 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + case READ_CAPACITY: return 0; + case SERVICE_ACTION_IN: + switch (cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + return 0; + default: + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); + return 1; + } case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } case MAINTENANCE_OUT: @@ -418,7 +599,7 @@ static inline int core_alua_state_standby( case MO_SET_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } case REQUEST_SENSE: @@ -428,7 +609,7 @@ static inline int core_alua_state_standby( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); return 1; } @@ -437,8 +618,7 @@ static inline int core_alua_state_standby( static inline int core_alua_state_unavailable( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by @@ -453,7 +633,7 @@ static inline int core_alua_state_unavailable( case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } case MAINTENANCE_OUT: @@ -461,7 +641,7 @@ static inline int core_alua_state_unavailable( case MO_SET_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } case REQUEST_SENSE: @@ -469,7 +649,7 @@ static inline int core_alua_state_unavailable( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; + set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); return 1; } @@ -478,8 +658,7 @@ static inline int core_alua_state_unavailable( static inline int core_alua_state_transition( struct se_cmd *cmd, - unsigned char *cdb, - u8 *alua_ascq) + unsigned char *cdb) { /* * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by @@ -494,7 +673,7 @@ static inline int core_alua_state_transition( case MI_REPORT_TARGET_PGS: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); return 1; } case REQUEST_SENSE: @@ -502,7 +681,7 @@ static inline int core_alua_state_transition( case WRITE_BUFFER: return 0; default: - *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; + set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); return 1; } @@ -524,8 +703,6 @@ target_alua_state_check(struct se_cmd *cmd) struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; int out_alua_state, nonop_delay_msecs; - u8 alua_ascq; - int ret; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; @@ -541,9 +718,8 @@ target_alua_state_check(struct se_cmd *cmd) if (atomic_read(&port->sep_tg_pt_secondary_offline)) { pr_debug("ALUA: Got secondary offline status for local" " target port\n"); - alua_ascq = ASCQ_04H_ALUA_OFFLINE; - ret = 1; - goto out; + set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); + return TCM_CHECK_CONDITION_NOT_READY; } /* * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the @@ -571,17 +747,23 @@ target_alua_state_check(struct se_cmd *cmd) switch (out_alua_state) { case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: - ret = core_alua_state_nonoptimized(cmd, cdb, - nonop_delay_msecs, &alua_ascq); + core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); break; case ALUA_ACCESS_STATE_STANDBY: - ret = core_alua_state_standby(cmd, cdb, &alua_ascq); + if (core_alua_state_standby(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; break; case ALUA_ACCESS_STATE_UNAVAILABLE: - ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); + if (core_alua_state_unavailable(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; break; case ALUA_ACCESS_STATE_TRANSITION: - ret = core_alua_state_transition(cmd, cdb, &alua_ascq); + if (core_alua_state_transition(cmd, cdb)) + return TCM_CHECK_CONDITION_NOT_READY; + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) + return TCM_CHECK_CONDITION_NOT_READY; break; /* * OFFLINE is a secondary ALUA target port group access state, that is @@ -594,23 +776,6 @@ target_alua_state_check(struct se_cmd *cmd) return TCM_INVALID_CDB_FIELD; } -out: - if (ret > 0) { - /* - * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; - * The ALUA additional sense code qualifier (ASCQ) is determined - * by the ALUA primary or secondary access state.. - */ - pr_debug("[%s]: ALUA TG Port not available, " - "SenseKey: NOT_READY, ASC/ASCQ: " - "0x04/0x%02x\n", - cmd->se_tfo->get_fabric_name(), alua_ascq); - - cmd->scsi_asc = 0x04; - cmd->scsi_ascq = alua_ascq; - return TCM_CHECK_CONDITION_NOT_READY; - } - return 0; } @@ -618,17 +783,36 @@ out: * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int *primary) +core_alua_check_transition(int state, int valid, int *primary) { + /* + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are + * defined as primary target port asymmetric access states. + */ switch (state) { case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + if (!(valid & ALUA_AO_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + if (!(valid & ALUA_AN_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_STANDBY: + if (!(valid & ALUA_S_SUP)) + goto not_supported; + *primary = 1; + break; case ALUA_ACCESS_STATE_UNAVAILABLE: - /* - * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are - * defined as primary target port asymmetric access states. - */ + if (!(valid & ALUA_U_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (!(valid & ALUA_LBD_SUP)) + goto not_supported; *primary = 1; break; case ALUA_ACCESS_STATE_OFFLINE: @@ -636,14 +820,27 @@ core_alua_check_transition(int state, int *primary) * OFFLINE state is defined as a secondary target port * asymmetric access state. */ + if (!(valid & ALUA_O_SUP)) + goto not_supported; *primary = 0; break; + case ALUA_ACCESS_STATE_TRANSITION: + /* + * Transitioning is set internally, and + * cannot be selected manually. + */ + goto not_supported; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; } return 0; + +not_supported: + pr_err("ALUA access state %s not supported", + core_alua_dump_state(state)); + return TCM_INVALID_PARAMETER_LIST; } static char *core_alua_dump_state(int state) @@ -653,12 +850,16 @@ static char *core_alua_dump_state(int state) return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + return "LBA Dependent"; case ALUA_ACCESS_STATE_STANDBY: return "Standby"; case ALUA_ACCESS_STATE_UNAVAILABLE: return "Unavailable"; case ALUA_ACCESS_STATE_OFFLINE: return "Offline"; + case ALUA_ACCESS_STATE_TRANSITION: + return "Transitioning"; default: return "Unknown"; } @@ -735,58 +936,49 @@ static int core_alua_write_tpg_metadata( * Called with tg_pt_gp->tg_pt_gp_md_mutex held */ static int core_alua_update_tpg_primary_metadata( - struct t10_alua_tg_pt_gp *tg_pt_gp, - int primary_state, - unsigned char *md_buf) + struct t10_alua_tg_pt_gp *tg_pt_gp) { + unsigned char *md_buf; struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; char path[ALUA_METADATA_PATH_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); - len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "tg_pt_gp_id=%hu\n" "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", - tg_pt_gp->tg_pt_gp_id, primary_state, + tg_pt_gp->tg_pt_gp_id, + tg_pt_gp->tg_pt_gp_alua_pending_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + return rc; } -static int core_alua_do_transition_tg_pt( - struct t10_alua_tg_pt_gp *tg_pt_gp, - struct se_port *l_port, - struct se_node_acl *nacl, - unsigned char *md_buf, - int new_state, - int explicit) +static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct se_dev_entry *se_deve; struct se_lun_acl *lacl; struct se_port *port; struct t10_alua_tg_pt_gp_member *mem; - int old_state = 0; - /* - * Save the old primary ALUA access state, and set the current state - * to ALUA_ACCESS_STATE_TRANSITION. - */ - old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); - tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; - /* - * Check for the optional ALUA primary state transition delay - */ - if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) - msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, @@ -807,7 +999,7 @@ static int core_alua_do_transition_tg_pt( * TARGET PORT GROUPS command */ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); @@ -821,9 +1013,12 @@ static int core_alua_do_transition_tg_pt( if (!lacl) continue; - if (explicit && - (nacl != NULL) && (nacl == lacl->se_lun_nacl) && - (l_port != NULL) && (l_port == port)) + if ((tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && + (tg_pt_gp->tg_pt_gp_alua_port != NULL) && + (tg_pt_gp->tg_pt_gp_alua_port == port)) continue; core_scsi3_ua_allocate(lacl->se_lun_nacl, @@ -834,7 +1029,7 @@ static int core_alua_do_transition_tg_pt( spin_lock(&tg_pt_gp->tg_pt_gp_lock); atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* @@ -851,20 +1046,102 @@ static int core_alua_do_transition_tg_pt( */ if (tg_pt_gp->tg_pt_gp_write_metadata) { mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp, - new_state, md_buf); + core_alua_update_tpg_primary_metadata(tg_pt_gp); mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); } /* * Set the current primary ALUA access state to the requested new state */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + tg_pt_gp->tg_pt_gp_alua_pending_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " from primary access state %s to %s\n", (explicit) ? "explicit" : "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), - core_alua_dump_state(new_state)); + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), + core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (tg_pt_gp->tg_pt_gp_transition_complete) + complete(tg_pt_gp->tg_pt_gp_transition_complete); +} + +static int core_alua_do_transition_tg_pt( + struct t10_alua_tg_pt_gp *tg_pt_gp, + int new_state, + int explicit) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + DECLARE_COMPLETION_ONSTACK(wait); + + /* Nothing to be done here */ + if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + return 0; + + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return -EAGAIN; + + /* + * Flush any pending transitions + */ + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == + ALUA_ACCESS_STATE_TRANSITION) { + /* Just in case */ + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + return 0; + } + + /* + * Save the old primary ALUA access state, and set the current state + * to ALUA_ACCESS_STATE_TRANSITION. + */ + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_TRANSITION); + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + + /* + * Check for the optional ALUA primary state transition delay + */ + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + + /* + * Take a reference for workqueue item + */ + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + smp_mb__after_atomic(); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { + unsigned long transition_tmo; + + transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, + transition_tmo); + } else { + tg_pt_gp->tg_pt_gp_transition_complete = &wait; + queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, + &tg_pt_gp->tg_pt_gp_transition_work, 0); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + } return 0; } @@ -878,28 +1155,20 @@ int core_alua_do_port_transition( int explicit) { struct se_device *dev; - struct se_port *port; - struct se_node_acl *nacl; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; - int primary; + int primary, valid_states, rc = 0; - if (core_alua_check_transition(new_state, &primary) != 0) + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; + if (core_alua_check_transition(new_state, valid_states, &primary) != 0) return -EINVAL; - md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate buf for ALUA metadata\n"); - return -ENOMEM; - } - local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', @@ -911,12 +1180,13 @@ int core_alua_do_port_transition( * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - md_buf, new_state, explicit); + l_tg_pt_gp->tg_pt_gp_alua_port = l_port; + l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; + rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, + new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); - kfree(md_buf); - return 0; + smp_mb__after_atomic(); + return rc; } /* * For all other LU groups aside from 'default_lu_gp', walk all of @@ -929,7 +1199,7 @@ int core_alua_do_port_transition( dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -951,44 +1221,48 @@ int core_alua_do_port_transition( continue; if (l_tg_pt_gp == tg_pt_gp) { - port = l_port; - nacl = l_nacl; + tg_pt_gp->tg_pt_gp_alua_port = l_port; + tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; } else { - port = NULL; - nacl = NULL; + tg_pt_gp->tg_pt_gp_alua_port = NULL; + tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return * success. */ - core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, md_buf, new_state, explicit); + rc = core_alua_do_transition_tg_pt(tg_pt_gp, + new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); + if (rc) + break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&lu_gp->lu_gp_lock); - pr_debug("Successfully processed LU Group: %s all ALUA TG PT" - " Group IDs: %hu %s transition to primary state: %s\n", - config_item_name(&lu_gp->lu_gp_group.cg_item), - l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", - core_alua_dump_state(new_state)); + if (!rc) { + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" + " Group IDs: %hu %s transition to primary state: %s\n", + config_item_name(&lu_gp->lu_gp_group.cg_item), + l_tg_pt_gp->tg_pt_gp_id, + (explicit) ? "explicit" : "implicit", + core_alua_dump_state(new_state)); + } atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); - kfree(md_buf); - return 0; + smp_mb__after_atomic(); + return rc; } /* @@ -996,13 +1270,18 @@ int core_alua_do_port_transition( */ static int core_alua_update_tpg_secondary_metadata( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, - unsigned char *md_buf, - u32 md_buf_len) + struct se_port *port) { + unsigned char *md_buf; struct se_portal_group *se_tpg = port->sep_tpg; char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; - int len; + int len, rc; + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } memset(path, 0, ALUA_METADATA_PATH_LEN); memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); @@ -1014,7 +1293,7 @@ static int core_alua_update_tpg_secondary_metadata( snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", atomic_read(&port->sep_tg_pt_secondary_offline), port->sep_tg_pt_secondary_stat); @@ -1023,7 +1302,10 @@ static int core_alua_update_tpg_secondary_metadata( se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); - return core_alua_write_tpg_metadata(path, md_buf, len); + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(md_buf); + + return rc; } static int core_alua_set_tg_pt_secondary_state( @@ -1033,8 +1315,6 @@ static int core_alua_set_tg_pt_secondary_state( int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; - unsigned char *md_buf; - u32 md_buf_len; int trans_delay_msecs; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1055,7 +1335,6 @@ static int core_alua_set_tg_pt_secondary_state( else atomic_set(&port->sep_tg_pt_secondary_offline, 0); - md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; port->sep_tg_pt_secondary_stat = (explicit) ? ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; @@ -1077,23 +1356,115 @@ static int core_alua_set_tg_pt_secondary_state( * secondary state and status */ if (port->sep_tg_pt_secondary_write_md) { - md_buf = kzalloc(md_buf_len, GFP_KERNEL); - if (!md_buf) { - pr_err("Unable to allocate md_buf for" - " secondary ALUA access metadata\n"); - return -ENOMEM; - } mutex_lock(&port->sep_tg_pt_md_mutex); - core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, - md_buf, md_buf_len); + core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); mutex_unlock(&port->sep_tg_pt_md_mutex); + } + + return 0; +} + +struct t10_alua_lba_map * +core_alua_allocate_lba_map(struct list_head *list, + u64 first_lba, u64 last_lba) +{ + struct t10_alua_lba_map *lba_map; + + lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); + if (!lba_map) { + pr_err("Unable to allocate struct t10_alua_lba_map\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lba_map->lba_map_mem_list); + lba_map->lba_map_first_lba = first_lba; + lba_map->lba_map_last_lba = last_lba; + + list_add_tail(&lba_map->lba_map_list, list); + return lba_map; +} - kfree(md_buf); +int +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, + int pg_id, int state) +{ + struct t10_alua_lba_map_member *lba_map_mem; + + list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, + lba_map_mem_list) { + if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { + pr_err("Duplicate pg_id %d in lba_map\n", pg_id); + return -EINVAL; + } } + lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); + if (!lba_map_mem) { + pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); + return -ENOMEM; + } + lba_map_mem->lba_map_mem_alua_state = state; + lba_map_mem->lba_map_mem_alua_pg_id = pg_id; + + list_add_tail(&lba_map_mem->lba_map_mem_list, + &lba_map->lba_map_mem_list); return 0; } +void +core_alua_free_lba_map(struct list_head *lba_list) +{ + struct t10_alua_lba_map *lba_map, *lba_map_tmp; + struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; + + list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, + lba_map_list) { + list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, + &lba_map->lba_map_mem_list, + lba_map_mem_list) { + list_del(&lba_map_mem->lba_map_mem_list); + kmem_cache_free(t10_alua_lba_map_mem_cache, + lba_map_mem); + } + list_del(&lba_map->lba_map_list); + kmem_cache_free(t10_alua_lba_map_cache, lba_map); + } +} + +void +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, + int segment_size, int segment_mult) +{ + struct list_head old_lba_map_list; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int activate = 0, supported; + + INIT_LIST_HEAD(&old_lba_map_list); + spin_lock(&dev->t10_alua.lba_map_lock); + dev->t10_alua.lba_map_segment_size = segment_size; + dev->t10_alua.lba_map_segment_multiplier = segment_mult; + list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); + if (lba_map_list) { + list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); + activate = 1; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + supported = tg_pt_gp->tg_pt_gp_alua_supported_states; + if (activate) + supported |= ALUA_LBD_SUP; + else + supported &= ~ALUA_LBD_SUP; + tg_pt_gp->tg_pt_gp_alua_supported_states = supported; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + core_alua_free_lba_map(&old_lba_map_list); +} + struct t10_alua_lu_gp * core_alua_allocate_lu_gp(const char *name, int def_group) { @@ -1346,8 +1717,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); + INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; - tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); /* @@ -1475,6 +1847,8 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 88e2e835f14..0a7d65e8040 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -13,12 +13,13 @@ /* * ASYMMETRIC ACCESS STATE field * - * from spc4r17 section 6.27 Table 245 + * from spc4r36j section 6.37 Table 307 */ #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 #define ALUA_ACCESS_STATE_STANDBY 0x2 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 +#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4 #define ALUA_ACCESS_STATE_OFFLINE 0xe #define ALUA_ACCESS_STATE_TRANSITION 0xf @@ -78,18 +79,30 @@ */ #define ALUA_SECONDARY_METADATA_WWN_LEN 256 +/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ +#define ALUA_MD_BUF_LEN 1024 + extern struct kmem_cache *t10_alua_lu_gp_cache; extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +extern struct kmem_cache *t10_alua_lba_map_cache; +extern struct kmem_cache *t10_alua_lba_map_mem_cache; extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, struct se_node_acl *, int, int); extern char *core_alua_dump_status(int); +extern struct t10_alua_lba_map *core_alua_allocate_lba_map( + struct list_head *, u64, u64); +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); +extern void core_alua_free_lba_map(struct list_head *); +extern void core_alua_set_lba_map(struct se_device *, struct list_head *, + int, int); extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 272755d03e5..bf55c5a04cf 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check( pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } + if (!tfo->aborted_task) { + pr_err("Missing tfo->aborted_task()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in @@ -643,6 +647,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_3pc); SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(pi_prot_type); +SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_pi_prot_type); +SE_DEV_ATTR_RO(hw_pi_prot_type); + +DEF_DEV_ATTRIB(pi_prot_format); +SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); @@ -702,6 +715,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_emulate_caw.attr, &target_core_dev_attrib_emulate_3pc.attr, + &target_core_dev_attrib_pi_prot_type.attr, + &target_core_dev_attrib_hw_pi_prot_type.attr, + &target_core_dev_attrib_pi_prot_format.attr, &target_core_dev_attrib_enforce_pr_isids.attr, &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_emulate_rest_reord.attr, @@ -1741,6 +1757,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { .store = target_core_store_alua_lu_gp, }; +static ssize_t target_core_show_dev_lba_map(void *p, char *page) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *mem; + char *b = page; + int bl = 0; + char state; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + bl += sprintf(b + bl, "%u %u\n", + dev->t10_alua.lba_map_segment_size, + dev->t10_alua.lba_map_segment_multiplier); + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { + bl += sprintf(b + bl, "%llu %llu", + map->lba_map_first_lba, map->lba_map_last_lba); + list_for_each_entry(mem, &map->lba_map_mem_list, + lba_map_mem_list) { + switch (mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + state = 'O'; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + state = 'A'; + break; + case ALUA_ACCESS_STATE_STANDBY: + state = 'S'; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + state = 'U'; + break; + default: + state = '.'; + break; + } + bl += sprintf(b + bl, " %d:%c", + mem->lba_map_mem_alua_pg_id, state); + } + bl += sprintf(b + bl, "\n"); + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return bl; +} + +static ssize_t target_core_store_dev_lba_map( + void *p, + const char *page, + size_t count) +{ + struct se_device *dev = p; + struct t10_alua_lba_map *lba_map = NULL; + struct list_head lba_list; + char *map_entries, *ptr; + char state; + int pg_num = -1, pg; + int ret = 0, num = 0, pg_id, alua_state; + unsigned long start_lba = -1, end_lba = -1; + unsigned long segment_size = -1, segment_mult = -1; + + map_entries = kstrdup(page, GFP_KERNEL); + if (!map_entries) + return -ENOMEM; + + INIT_LIST_HEAD(&lba_list); + while ((ptr = strsep(&map_entries, "\n")) != NULL) { + if (!*ptr) + continue; + + if (num == 0) { + if (sscanf(ptr, "%lu %lu\n", + &segment_size, &segment_mult) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + num++; + continue; + } + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing end lba\n", num); + ret = -EINVAL; + break; + } + ptr++; + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing state definitions\n", + num); + ret = -EINVAL; + break; + } + ptr++; + lba_map = core_alua_allocate_lba_map(&lba_list, + start_lba, end_lba); + if (IS_ERR(lba_map)) { + ret = PTR_ERR(lba_map); + break; + } + pg = 0; + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { + switch (state) { + case 'O': + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + break; + case 'A': + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; + break; + case 'S': + alua_state = ALUA_ACCESS_STATE_STANDBY; + break; + case 'U': + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; + break; + default: + pr_err("Invalid ALUA state '%c'\n", state); + ret = -EINVAL; + goto out; + } + + ret = core_alua_allocate_lba_map_mem(lba_map, + pg_id, alua_state); + if (ret) { + pr_err("Invalid target descriptor %d:%c " + "at line %d\n", + pg_id, state, num); + break; + } + pg++; + ptr = strchr(ptr, ' '); + if (ptr) + ptr++; + else + break; + } + if (pg_num == -1) + pg_num = pg; + else if (pg != pg_num) { + pr_err("Only %d from %d port groups definitions " + "at line %d\n", pg, pg_num, num); + ret = -EINVAL; + break; + } + num++; + } +out: + if (ret) { + core_alua_free_lba_map(&lba_list); + count = ret; + } else + core_alua_set_lba_map(dev, &lba_list, + segment_size, segment_mult); + kfree(map_entries); + return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "lba_map", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_lba_map, + .store = target_core_store_dev_lba_map, +}; + static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_info.attr, &target_core_attr_dev_control.attr, @@ -1748,6 +1934,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_udev_path.attr, &target_core_attr_dev_enable.attr, &target_core_attr_dev_alua_lu_gp.attr, + &target_core_attr_dev_lba_map.attr, NULL, }; @@ -2040,6 +2227,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("Unable to set alua_access_state while device is" + " not configured\n"); + return -ENODEV; + } ret = kstrtoul(page, 0, &tmp); if (ret < 0) { @@ -2054,6 +2246,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( " transition while TPGS_IMPLICIT_ALUA is disabled\n"); return -EINVAL; } + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { + /* LBA DEPENDENT is only allowed with implicit ALUA */ + pr_err("Unable to process implicit configfs ALUA transition" + " while explicit ALUA management is enabled\n"); + return -EINVAL; + } ret = core_alua_do_port_transition(tg_pt_gp, dev, NULL, NULL, new_state, 0); @@ -2188,7 +2387,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); -SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO); SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, tg_pt_gp_alua_supported_states, ALUA_U_SUP); @@ -2937,7 +3136,7 @@ static int __init target_core_init_configfs(void) * and ALUA Logical Unit Group and Target Port Group infrastructure. */ target_cg = &subsys->su_group; - target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, + target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index d06de84b069..98da9016715 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; atomic_inc(&deve->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&nacl->device_list_lock); return deve; @@ -616,6 +616,7 @@ void core_dev_unexport( dev->export_count--; spin_unlock(&hba->device_lock); + lun->lun_sep = NULL; lun->lun_se_dev = NULL; } @@ -798,10 +799,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("emulate_write_cache not supported for pSCSI\n"); return -EINVAL; } - if (dev->transport->get_write_cache) { - pr_warn("emulate_write_cache cannot be changed when underlying" - " HW reports WriteCacheEnabled, ignoring request\n"); - return 0; + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; } dev->dev_attrib.emulate_write_cache = flag; @@ -918,6 +919,94 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag) return 0; } +int se_dev_set_pi_prot_type(struct se_device *dev, int flag) +{ + int rc, old_prot = dev->dev_attrib.pi_prot_type; + + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { + pr_err("Illegal value %d for pi_prot_type\n", flag); + return -EINVAL; + } + if (flag == 2) { + pr_err("DIF TYPE2 protection currently not supported\n"); + return -ENOSYS; + } + if (dev->dev_attrib.hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return 0; + } + if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return 0; + + pr_err("DIF protection not supported by backend: %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + dev->dev_attrib.pi_prot_type = flag; + + if (flag && !old_prot) { + rc = dev->transport->init_prot(dev); + if (rc) { + dev->dev_attrib.pi_prot_type = old_prot; + return rc; + } + + } else if (!flag && old_prot) { + dev->transport->free_prot(dev); + } + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + + return 0; +} + +int se_dev_set_pi_prot_format(struct se_device *dev, int flag) +{ + int rc; + + if (!flag) + return 0; + + if (flag != 1) { + pr_err("Illegal value %d for pi_prot_format\n", flag); + return -EINVAL; + } + if (!dev->transport->format_prot) { + pr_err("DIF protection format not supported by backend %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection format requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to format SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + rc = dev->transport->format_prot(dev); + if (rc) + return rc; + + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + + return 0; +} + int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { @@ -1117,23 +1206,23 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, struct se_device *dev, - u32 lun) + u32 unpacked_lun) { - struct se_lun *lun_p; + struct se_lun *lun; int rc; - lun_p = core_tpg_pre_addlun(tpg, lun); - if (IS_ERR(lun_p)) - return lun_p; + lun = core_tpg_alloc_lun(tpg, unpacked_lun); + if (IS_ERR(lun)) + return lun; - rc = core_tpg_post_addlun(tpg, lun_p, + rc = core_tpg_add_lun(tpg, lun, TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), - tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); /* * Update LUN maps for dynamically added initiators when @@ -1154,7 +1243,7 @@ struct se_lun *core_dev_add_lun( spin_unlock_irq(&tpg->acl_node_lock); } - return lun_p; + return lun; } /* core_dev_del_lun(): @@ -1308,7 +1397,7 @@ int core_dev_add_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); atomic_inc(&lun->lun_acl_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1342,7 +1431,7 @@ int core_dev_del_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); atomic_dec(&lun->lun_acl_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, @@ -1420,6 +1509,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_link_magic = SE_DEV_LINK_MAGIC; dev->se_hba = hba; dev->transport = hba->transport; + dev->prot_length = sizeof(struct se_dif_v1_tuple); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); @@ -1444,6 +1534,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) spin_lock_init(&dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); + spin_lock_init(&dev->t10_alua.lba_map_lock); dev->t10_wwn.t10_dev = dev; dev->t10_alua.t10_dev = dev; @@ -1460,6 +1552,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->dev_attrib.is_nonrot = DA_IS_NONROT; dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; @@ -1588,9 +1681,13 @@ void target_free_device(struct se_device *dev) } core_alua_free_lu_gp_mem(dev); + core_alua_set_lba_map(dev, NULL, 0, 0); core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); + if (dev->transport->free_prot) + dev->transport->free_prot(dev); + dev->transport->free_device(dev); } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index dae2ad6a669..7de9f0475d0 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun( lun_cg->default_groups[1] = NULL; port_stat_grp = &lun->port_stat_grps.stat_group; - port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4, GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 78241a53b55..7d6cddaec52 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev) kfree(fd_dev); } +static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, + int is_write) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *dev = FD_DEV(se_dev); + struct file *prot_fd = dev->fd_prot_file; + struct scatterlist *sg; + loff_t pos = (cmd->t_task_lba * se_dev->prot_length); + unsigned char *buf; + u32 prot_size, len, size; + int rc, ret = 1, i; + + prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * + se_dev->prot_length; + + if (!is_write) { + fd_prot->prot_buf = vzalloc(prot_size); + if (!fd_prot->prot_buf) { + pr_err("Unable to allocate fd_prot->prot_buf\n"); + return -ENOMEM; + } + buf = fd_prot->prot_buf; + + fd_prot->prot_sg_nents = cmd->t_prot_nents; + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * + fd_prot->prot_sg_nents, GFP_KERNEL); + if (!fd_prot->prot_sg) { + pr_err("Unable to allocate fd_prot->prot_sg\n"); + vfree(fd_prot->prot_buf); + return -ENOMEM; + } + size = prot_size; + + for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { + + len = min_t(u32, PAGE_SIZE, size); + sg_set_buf(sg, buf, len); + size -= len; + buf += len; + } + } + + if (is_write) { + rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); + if (rc < 0 || prot_size != rc) { + pr_err("kernel_write() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } else { + rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); + if (rc < 0) { + pr_err("kernel_read() for fd_do_prot_rw failed:" + " %d\n", rc); + ret = -EINVAL; + } + } + + if (is_write || ret < 0) { + kfree(fd_prot->prot_sg); + vfree(fd_prot->prot_buf); + } + + return ret; +} + static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, int is_write) { @@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { struct se_device *dev = cmd->se_dev; + struct fd_prot fd_prot; + sense_reason_t rc; int ret = 0; /* @@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * physical memory addresses to struct iovec virtual memory. */ if (data_direction == DMA_FROM_DEVICE) { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 0); + + if (ret > 0 && cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + } } else { + memset(&fd_prot, 0, sizeof(struct fd_prot)); + + if (cmd->prot_type) { + u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + + ret = fd_do_prot_rw(cmd, &fd_prot, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, + 0, fd_prot.prot_sg, 0); + if (rc) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); + return rc; + } + } + ret = fd_do_rw(cmd, sgl, sgl_nents, 1); /* * Perform implicit vfs_fsync_range() for fd_do_writev() ops @@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, vfs_fsync_range(fd_dev->fd_file, start, end, 1); } + + if (ret > 0 && cmd->prot_type) { + ret = fd_do_prot_rw(cmd, &fd_prot, true); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } } - if (ret < 0) + if (ret < 0) { + kfree(fd_prot.prot_sg); + vfree(fd_prot.prot_buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (ret) target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -700,6 +817,102 @@ static sector_t fd_get_blocks(struct se_device *dev) dev->dev_attrib.block_size); } +static int fd_init_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_file, *file = fd_dev->fd_file; + struct inode *inode; + int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + char buf[FD_MAX_DEV_PROT_NAME]; + + if (!file) { + pr_err("Unable to locate fd_dev->fd_file\n"); + return -ENODEV; + } + + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + pr_err("FILEIO Protection emulation only supported on" + " !S_ISBLK\n"); + return -ENOSYS; + } + + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) + flags &= ~O_DSYNC; + + snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", + fd_dev->fd_dev_name); + + prot_file = filp_open(buf, flags, 0600); + if (IS_ERR(prot_file)) { + pr_err("filp_open(%s) failed\n", buf); + ret = PTR_ERR(prot_file); + return ret; + } + fd_dev->fd_prot_file = prot_file; + + return 0; +} + +static int fd_format_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_fd = fd_dev->fd_prot_file; + sector_t prot_length, prot; + unsigned char *buf; + loff_t pos = 0; + int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; + int rc, ret = 0, size, len; + + if (!dev->dev_attrib.pi_prot_type) { + pr_err("Unable to format_prot while pi_prot_type == 0\n"); + return -ENODEV; + } + if (!prot_fd) { + pr_err("Unable to locate fd_dev->fd_prot_file\n"); + return -ENODEV; + } + + buf = vzalloc(unit_size); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; + size = prot_length; + + pr_debug("Using FILEIO prot_length: %llu\n", + (unsigned long long)prot_length); + + memset(buf, 0xff, unit_size); + for (prot = 0; prot < prot_length; prot += unit_size) { + len = min(unit_size, size); + rc = kernel_write(prot_fd, buf, len, pos); + if (rc != len) { + pr_err("vfs_write to prot file failed: %d\n", rc); + ret = -ENODEV; + goto out; + } + pos += len; + size -= len; + } + +out: + vfree(buf); + return ret; +} + +static void fd_free_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + + if (!fd_dev->fd_prot_file) + return; + + filp_close(fd_dev->fd_prot_file, NULL); + fd_dev->fd_prot_file = NULL; +} + static struct sbc_ops fd_sbc_ops = { .execute_rw = fd_execute_rw, .execute_sync_cache = fd_execute_sync_cache, @@ -730,6 +943,9 @@ static struct se_subsystem_api fileio_template = { .show_configfs_dev_params = fd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = fd_get_blocks, + .init_prot = fd_init_prot, + .format_prot = fd_format_prot, + .free_prot = fd_free_prot, }; static int __init fileio_module_init(void) diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index d7772c16768..182cbb29503 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,6 +4,7 @@ #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 +#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16 #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 @@ -18,6 +19,13 @@ #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 +#define FDBD_FORMAT_UNIT_SIZE 2048 + +struct fd_prot { + unsigned char *prot_buf; + struct scatterlist *prot_sg; + u32 prot_sg_nents; +}; struct fd_dev { struct se_device dev; @@ -32,6 +40,7 @@ struct fd_dev { u32 fd_block_size; unsigned long long fd_dev_size; struct file *fd_file; + struct file *fd_prot_file; /* FILEIO HBA device is connected to */ struct fd_host *fd_host; } ____cacheline_aligned; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c87959f1276..7e6b857c6b3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev) struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; struct block_device *bd = NULL; + struct blk_integrity *bi; fmode_t mode; int ret = -ENOMEM; @@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev) if (blk_queue_nonrot(q)) dev->dev_attrib.is_nonrot = 1; + bi = bdev_get_integrity(bd); + if (bi) { + struct bio_set *bs = ib_dev->ibd_bio_set; + + if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || + !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { + pr_err("IBLOCK export of blk_integrity: %s not" + " supported\n", bi->name); + ret = -ENOSYS; + goto out_blkdev_put; + } + + if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; + } + + if (dev->dev_attrib.pi_prot_type) { + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; + } + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + bs->bio_integrity_pool); + } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + } + return 0; +out_blkdev_put: + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); out_free_bioset: bioset_free(ib_dev->ibd_bio_set); ib_dev->ibd_bio_set = NULL; @@ -172,6 +205,7 @@ static void iblock_free_device(struct se_device *dev) blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); if (ib_dev->ibd_bio_set != NULL) bioset_free(ib_dev->ibd_bio_set); + kfree(ib_dev); } @@ -289,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err) * Bump the ib_bio_err_cnt and release bio. */ atomic_inc(&ibr->ib_bio_err_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } bio_put(bio); @@ -319,7 +353,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) bio->bi_bdev = ib_dev->ibd_bd; bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; - bio->bi_sector = lba; + bio->bi_iter.bi_sector = lba; return bio; } @@ -586,13 +620,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) return bl; } +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) +{ + struct se_device *dev = cmd->se_dev; + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct scatterlist *sg; + int i, rc; + + bi = bdev_get_integrity(ib_dev->ibd_bd); + if (!bi) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); + if (!bip) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return -ENOMEM; + } + + bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * + dev->prot_length; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; + + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, + (unsigned long long)bip->bip_iter.bi_sector); + + for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { + + rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, + sg->offset); + if (rc != sg->length) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + return -ENOMEM; + } + + pr_debug("Added bio integrity page: %p length: %d offset; %d\n", + sg_page(sg), sg->length, sg->offset); + } + + return 0; +} + static sense_reason_t iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; - struct bio *bio; + struct bio *bio, *bio_start; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; @@ -655,6 +734,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (!bio) goto fail_free_ibr; + bio_start = bio; bio_list_init(&list); bio_list_add(&list, bio); @@ -688,6 +768,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, sg_num--; } + if (cmd->prot_type) { + int rc = iblock_alloc_bip(cmd, bio_start); + if (rc) + goto fail_put_bios; + } + iblock_submit_bios(&list, rw); iblock_complete_cmd(cmd); return 0; @@ -763,7 +849,7 @@ iblock_parse_cdb(struct se_cmd *cmd) return sbc_parse_cdb(cmd, &iblock_sbc_ops); } -bool iblock_get_write_cache(struct se_device *dev) +static bool iblock_get_write_cache(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct block_device *bd = ib_dev->ibd_bd; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 47b63b094cd..de9cab708f4 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int); int se_dev_set_emulate_tpws(struct se_device *, int); int se_dev_set_emulate_caw(struct se_device *, int); int se_dev_set_emulate_3pc(struct se_device *, int); +int se_dev_set_pi_prot_type(struct se_device *, int); +int se_dev_set_pi_prot_format(struct se_device *, int); int se_dev_set_enforce_pr_isids(struct se_device *, int); int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); @@ -77,9 +79,9 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp const char *); void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); -struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); -int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, - u32, void *); +struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); +int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, + u32, struct se_device *); struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2f5d77932c8..df357862286 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { atomic_inc(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); spin_lock_bh(&port->sep_alua_lock); @@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( continue; atomic_inc(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_bh(&port->sep_alua_lock); /* * Grab a configfs group dependency that is released @@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); goto out; } /* @@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( sa_res_key, all_tg_pt, aptpl); if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } @@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&dev->se_port_lock); @@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; } atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) { atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_check_implicit_release( @@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) { atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } @@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) */ if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } nacl = lun_acl->se_lun_nacl; @@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static sense_reason_t @@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port( continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(tmp_tpg)) { pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; } @@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port( tmp_tpg, i_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&tmp_tpg->acl_node_lock); @@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port( pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_tpg_undepend_item(tmp_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; @@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port( pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; sense_reason_t ret = TCM_NO_SENSE; - int pr_holder = 0; + int pr_holder = 0, type; if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ret = TCM_RESERVATION_CONFLICT; goto out; } + type = pr_reg->pr_res_type; spin_lock(&pr_tmpl->registration_lock); /* @@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * Release the calling I_T Nexus registration now.. */ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); + pr_reg = NULL; /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, * RESERVATIONS RELEASED. */ if (pr_holder && - (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || - pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { + (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || + type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, pr_reg_list) { @@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ret = core_scsi3_update_and_write_aptpl(dev, aptpl); out: - core_scsi3_put_pr_reg(pr_reg); + if (pr_reg) + core_scsi3_put_pr_reg(pr_reg); return ret; } @@ -3165,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(dest_se_tpg)) { pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_put_pr_reg; } @@ -3270,7 +3273,7 @@ after_iport_check: initiator_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&dest_se_tpg->acl_node_lock); @@ -3286,7 +3289,7 @@ after_iport_check: pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_node_acl = NULL; ret = TCM_INVALID_PARAMETER_LIST; goto out; @@ -3311,7 +3314,7 @@ after_iport_check: if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_se_deve = NULL; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; @@ -3877,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) add_desc_len = 0; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); /* * Determine expected length of $FABRIC_MOD specific @@ -3891,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } /* @@ -3953,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index ed75cdd32cb..2ee2936fa0b 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -43,6 +43,11 @@ #define PR_APTPL_MAX_IPORT_LEN 256 #define PR_APTPL_MAX_TPORT_LEN 256 +/* + * Function defined in target_core_spc.c + */ +void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); + extern struct kmem_cache *t10_pr_reg_cache; extern void core_pr_dump_initiator_port(struct t10_pr_registration *, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 0f199f6a073..94d00df28f3 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1055,6 +1055,8 @@ pscsi_execute_cmd(struct se_cmd *cmd) ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto fail; } + + blk_rq_set_block_pc(req); } else { BUG_ON(!cmd->data_length); @@ -1071,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd) } } - req->cmd_type = REQ_TYPE_BLOCK_PC; req->end_io = pscsi_req_done; req->end_io_data = cmd; req->cmd_len = scsi_command_size(pt->pscsi_cdb); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4ffe5f2ec0e..b920db3388c 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba) hba->hba_ptr = NULL; } -/* rd_release_device_space(): - * - * - */ -static void rd_release_device_space(struct rd_dev *rd_dev) +static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 sg_table_count) { - u32 i, j, page_count = 0, sg_per_table; - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; + u32 i, j, page_count = 0, sg_per_table; - if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) - return; - - sg_table = rd_dev->sg_table_array; - - for (i = 0; i < rd_dev->sg_table_count; i++) { + for (i = 0; i < sg_table_count; i++) { sg = sg_table[i].sg_table; sg_per_table = sg_table[i].rd_sg_count; @@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev) page_count++; } } - kfree(sg); } + kfree(sg_table); + return page_count; +} + +static void rd_release_device_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, + rd_dev->sg_table_count); + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); - kfree(sg_table); rd_dev->sg_table_array = NULL; rd_dev->sg_table_count = 0; } @@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev) * * */ -static int rd_build_device_space(struct rd_dev *rd_dev) +static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 total_sg_needed, unsigned char init_payload) { - u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; + u32 i = 0, j, page_offset = 0, sg_per_table; u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / sizeof(struct scatterlist)); - struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; - - if (rd_dev->rd_page_count <= 0) { - pr_err("Illegal page count: %u for Ramdisk device\n", - rd_dev->rd_page_count); - return -EINVAL; - } - - /* Don't need backing pages for NULLIO */ - if (rd_dev->rd_flags & RDF_NULLIO) - return 0; - - total_sg_needed = rd_dev->rd_page_count; - - sg_tables = (total_sg_needed / max_sg_per_table) + 1; - - sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); - if (!sg_table) { - pr_err("Unable to allocate memory for Ramdisk" - " scatterlist tables\n"); - return -ENOMEM; - } - - rd_dev->sg_table_array = sg_table; - rd_dev->sg_table_count = sg_tables; + unsigned char *p; while (total_sg_needed) { sg_per_table = (total_sg_needed > max_sg_per_table) ? @@ -186,16 +166,119 @@ static int rd_build_device_space(struct rd_dev *rd_dev) } sg_assign_page(&sg[j], pg); sg[j].length = PAGE_SIZE; + + p = kmap(pg); + memset(p, init_payload, PAGE_SIZE); + kunmap(pg); } page_offset += sg_per_table; total_sg_needed -= sg_per_table; } + return 0; +} + +static int rd_build_device_space(struct rd_dev *rd_dev) +{ + struct rd_dev_sg_table *sg_table; + u32 sg_tables, total_sg_needed; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_page_count <= 0) { + pr_err("Illegal page count: %u for Ramdisk device\n", + rd_dev->rd_page_count); + return -EINVAL; + } + + /* Don't need backing pages for NULLIO */ + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + + total_sg_needed = rd_dev->rd_page_count; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_table_array = sg_table; + rd_dev->sg_table_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); + if (rc) + return rc; + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" - " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, - rd_dev->rd_dev_id, rd_dev->rd_page_count, - rd_dev->sg_table_count); + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count); + + return 0; +} + +static void rd_release_prot_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, + rd_dev->sg_prot_count); + + pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + rd_dev->sg_prot_array = NULL; + rd_dev->sg_prot_count = 0; +} + +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) +{ + struct rd_dev_sg_table *sg_table; + u32 total_sg_needed, sg_tables; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + /* + * prot_length=8byte dif data + * tot sg needed = rd_page_count * (PGSZ/block_size) * + * (prot_length/block_size) + pad + * PGSZ canceled each other. + */ + total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + + sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk protection" + " scatterlist tables\n"); + return -ENOMEM; + } + + rd_dev->sg_prot_array = sg_table; + rd_dev->sg_prot_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); + if (rc) + return rc; + + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); return 0; } @@ -278,6 +361,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } +static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) +{ + struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + + i = page / sg_per_table; + if (i < rd_dev->sg_prot_count) { + sg_table = &rd_dev->sg_prot_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + static sense_reason_t rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) @@ -292,6 +395,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, u32 rd_page; u32 src_len; u64 tmp; + sense_reason_t rc; if (dev->rd_flags & RDF_NULLIO) { target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -314,6 +418,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); + if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? @@ -375,6 +501,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } sg_miter_stop(&m); + if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + + rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + if (rc) + return rc; + } + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } @@ -456,6 +604,24 @@ static sector_t rd_get_blocks(struct se_device *dev) return blocks_long; } +static int rd_init_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + if (!dev->dev_attrib.pi_prot_type) + return 0; + + return rd_build_prot_space(rd_dev, dev->prot_length, + dev->dev_attrib.block_size); +} + +static void rd_free_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + rd_release_prot_space(rd_dev); +} + static struct sbc_ops rd_sbc_ops = { .execute_rw = rd_execute_rw, }; @@ -481,6 +647,8 @@ static struct se_subsystem_api rd_mcp_template = { .show_configfs_dev_params = rd_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = rd_get_blocks, + .init_prot = rd_init_prot, + .free_prot = rd_free_prot, }; int __init rd_module_init(void) diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 1789d1e1439..cc46a6a89b3 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -33,8 +33,12 @@ struct rd_dev { u32 rd_page_count; /* Number of SG tables in sg_table_array */ u32 sg_table_count; + /* Number of SG tables in sg_prot_array */ + u32 sg_prot_count; /* Array of rd_dev_sg_table_t containing scatterlists */ struct rd_dev_sg_table *sg_table_array; + /* Array of rd_dev_sg_table containing protection scatterlists */ + struct rd_dev_sg_table *sg_prot_array; /* Ramdisk HBA device is connected to */ struct rd_host *rd_host; } ____cacheline_aligned; diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 52ae54e6010..bd78d9235ac 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ratelimit.h> +#include <linux/crc-t10dif.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> @@ -33,7 +34,7 @@ #include "target_core_internal.h" #include "target_core_ua.h" - +#include "target_core_alua.h" static sense_reason_t sbc_emulate_readcapacity(struct se_cmd *cmd) @@ -80,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8); return 0; } @@ -88,6 +89,7 @@ static sense_reason_t sbc_emulate_readcapacity_16(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; unsigned char *rbuf; unsigned char buf[32]; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -105,6 +107,13 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff; + /* + * Set P_TYPE and PROT_EN bits for DIF support + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; + } if (dev->transport->get_lbppbe) buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; @@ -128,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 32); return 0; } @@ -167,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) return cmd->se_dev->dev_attrib.block_size * sectors; } -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned long long end_lba; - u32 sectors; - - sectors = cmd->data_length / dev->dev_attrib.block_size; - end_lba = dev->transport->get_blocks(dev) + 1; - - if (cmd->t_task_lba + sectors > end_lba) { - pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", - cmd->t_task_lba, sectors, end_lba); - return -EINVAL; - } - - return 0; -} - static inline u32 transport_get_sectors_6(unsigned char *cdb) { /* @@ -419,13 +410,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) goto out; } - write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, GFP_KERNEL); if (!write_sg) { pr_err("Unable to allocate compare_and_write sg\n"); ret = TCM_OUT_OF_RESOURCES; goto out; } + sg_init_table(write_sg, cmd->t_data_nents); /* * Setup verify and write data payloads from total NumberLBAs. */ @@ -563,6 +555,116 @@ sbc_compare_and_write(struct se_cmd *cmd) return TCM_NO_SENSE; } +static int +sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, + bool is_write, struct se_cmd *cmd) +{ + if (is_write) { + cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : + TARGET_PROT_DOUT_INSERT; + switch (protect) { + case 0x0: + case 0x3: + cmd->prot_checks = 0; + break; + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } else { + cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : + TARGET_PROT_DIN_STRIP; + switch (protect) { + case 0x0: + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x3: + cmd->prot_checks = 0; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } + + return 0; +} + +static bool +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, + u32 sectors, bool is_write) +{ + u8 protect = cdb[1] >> 5; + + if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) + return true; + + switch (dev->dev_attrib.pi_prot_type) { + case TARGET_DIF_TYPE3_PROT: + cmd->reftag_seed = 0xffffffff; + break; + case TARGET_DIF_TYPE2_PROT: + if (protect) + return false; + + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE1_PROT: + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE0_PROT: + default: + return true; + } + + if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, + is_write, cmd)) + return false; + + cmd->prot_type = dev->dev_attrib.pi_prot_type; + cmd->prot_length = dev->prot_length * sectors; + + /** + * In case protection information exists over the wire + * we modify command data length to describe pure data. + * The actual transfer length is data length + protection + * length + **/ + if (protect) + cmd->data_length = sectors * dev->dev_attrib.block_size; + + pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " + "prot_op=%d prot_checks=%d\n", + __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, + cmd->prot_op, cmd->prot_checks); + + return true; +} + sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) { @@ -583,6 +685,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_10: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -590,6 +696,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -597,6 +707,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case READ_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -612,6 +726,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -621,6 +739,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_12: sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -630,6 +752,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case WRITE_16: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + + if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) + return TCM_UNSUPPORTED_SCSI_OPCODE; + if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -731,6 +857,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case SAI_READ_CAPACITY_16: cmd->execute_cmd = sbc_emulate_readcapacity_16; break; + case SAI_REPORT_REFERRALS: + cmd->execute_cmd = target_emulate_report_referrals; + break; default: pr_err("Unsupported SA: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); @@ -741,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) { - size = 0; - cmd->execute_cmd = sbc_emulate_noop; - break; - } - - /* - * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE - */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); @@ -757,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); } - - size = sbc_get_size(cmd, sectors); - - /* - * Check to ensure that LBA + Range does not exceed past end of - * device for IBLOCK and FILEIO ->do_sync_cache() backend calls - */ - if (cmd->t_task_lba || sectors) { - if (sbc_check_valid_sectors(cmd) < 0) - return TCM_ADDRESS_OUT_OF_RANGE; + if (ops->execute_sync_cache) { + cmd->execute_cmd = ops->execute_sync_cache; + goto check_lba; } - cmd->execute_cmd = ops->execute_sync_cache; + size = 0; + cmd->execute_cmd = sbc_emulate_noop; break; case UNMAP: if (!ops->execute_unmap) @@ -811,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case VERIFY: size = 0; + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->execute_cmd = sbc_emulate_noop; - break; + goto check_lba; case REZERO_UNIT: case SEEK_6: case SEEK_10: @@ -852,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) dev->dev_attrib.hw_max_sectors); return TCM_INVALID_CDB_FIELD; } - +check_lba: end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { pr_err("cmd exceeds last lba %llu " @@ -959,3 +1075,262 @@ err: return ret; } EXPORT_SYMBOL(sbc_execute_unmap); + +void +sbc_dif_generate(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = cmd->t_prot_sg; + sector_t sector = cmd->t_task_lba; + void *daddr, *paddr; + int i, j, offset = 0; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, + dev->dev_attrib.block_size)); + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) + sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); + sdt->app_tag = 0; + + pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + sector++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } +} + +static sense_reason_t +sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, + const void *p, sector_t sector, unsigned int ei_lba) +{ + int block_size = dev->dev_attrib.block_size; + __be16 csum; + + csum = cpu_to_be16(crc_t10dif(p, block_size)); + + if (sdt->guard_tag != csum) { + pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" + " csum 0x%04x\n", (unsigned long long)sector, + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); + return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" + " sector MSB: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" + " ei_lba: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), ei_lba); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + return 0; +} + +static void +sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, + struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *psg; + void *paddr, *addr; + unsigned int i, len, left; + unsigned int offset = sg_off; + + left = sectors * dev->prot_length; + + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { + unsigned int psg_len, copied = 0; + + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + psg_len = min(left, psg->length); + while (psg_len) { + len = min(psg_len, sg->length - offset); + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + + if (read) + memcpy(paddr + copied, addr, len); + else + memcpy(addr, paddr + copied, len); + + left -= len; + offset += len; + copied += len; + psg_len -= len; + + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + kunmap_atomic(addr); + } + kunmap_atomic(paddr); + } +} + +sense_reason_t +sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = cmd->t_prot_sg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = 0; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + cmd->bad_sector = sector; + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); + + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_write); + +static sense_reason_t +__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct se_dif_v1_tuple *sdt; + struct scatterlist *dsg, *psg = sg; + sector_t sector = start; + void *daddr, *paddr; + int i, j, offset = sg_off; + sense_reason_t rc; + + for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + paddr = kmap_atomic(sg_page(psg)) + sg->offset; + + for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + + if (offset >= psg->length) { + kunmap_atomic(paddr); + psg = sg_next(psg); + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + offset = 0; + } + + sdt = paddr + offset; + + pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + if (sdt->app_tag == cpu_to_be16(0xffff)) { + sector++; + offset += sizeof(struct se_dif_v1_tuple); + continue; + } + + rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, + ei_lba); + if (rc) { + kunmap_atomic(paddr); + kunmap_atomic(daddr); + cmd->bad_sector = sector; + return rc; + } + + sector++; + ei_lba++; + offset += sizeof(struct se_dif_v1_tuple); + } + + kunmap_atomic(paddr); + kunmap_atomic(daddr); + } + + return 0; +} + +sense_reason_t +sbc_dif_read_strip(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors = cmd->prot_length / dev->prot_length; + + return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, + cmd->t_prot_sg, 0); +} + +sense_reason_t +sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ + sense_reason_t rc; + + rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); + if (rc) + return rc; + + sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_read); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 021c3f4a4f0..6cd7222738f 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; /* Set RMB (removable media) for tape devices */ if (dev->transport->get_device_type(dev) == TYPE_TAPE) @@ -100,6 +101,14 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) */ if (dev->dev_attrib.emulate_3pc) buf[5] |= 0x8; + /* + * Set Protection (PROTECT) bit when DIF has been enabled on the + * device, and the transport supports VERIFY + PASS. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + buf[5] |= 0x1; + } buf[7] = 0x2; /* CmdQue=1 */ @@ -120,15 +129,10 @@ static sense_reason_t spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u16 len = 0; + u16 len; if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { - u32 unit_serial_len; - - unit_serial_len = strlen(dev->t10_wwn.unit_serial); - unit_serial_len++; /* For NULL Terminator */ - - len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); + len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } @@ -267,7 +271,7 @@ check_t10_vend_desc: port = lun->lun_sep; if (port) { struct t10_alua_lu_gp *lu_gp; - u32 padding, scsi_name_len; + u32 padding, scsi_name_len, scsi_target_len; u16 lu_gp_id = 0; u16 tg_pt_gp_id = 0; u16 tpgt; @@ -365,16 +369,6 @@ check_lu_gp: * section 7.5.1 Table 362 */ check_scsi_name: - scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); - /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ - scsi_name_len += 10; - /* Check for 4-byte padding */ - padding = ((-scsi_name_len) & 3); - if (padding != 0) - scsi_name_len += padding; - /* Header size + Designation descriptor */ - scsi_name_len += 4; - buf[off] = (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x3; /* CODE SET == UTF-8 */ @@ -402,13 +396,57 @@ check_scsi_name: * shall be no larger than 256 and shall be a multiple * of four. */ + padding = ((-scsi_name_len) & 3); if (padding) scsi_name_len += padding; + if (scsi_name_len > 256) + scsi_name_len = 256; buf[off-1] = scsi_name_len; off += scsi_name_len; /* Header size + Designation descriptor */ len += (scsi_name_len + 4); + + /* + * Target device designator + */ + buf[off] = + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target device: 10b */ + buf[off] |= 0x20; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means "<iSCSI name>" in + * UTF-8 encoding. + */ + scsi_target_len = sprintf(&buf[off], "%s", + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + scsi_target_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + padding = ((-scsi_target_len) & 3); + if (padding) + scsi_target_len += padding; + if (scsi_target_len > 256) + scsi_target_len = 256; + + buf[off-1] = scsi_target_len; + off += scsi_target_len; + + /* Header size + Designation descriptor */ + len += (scsi_target_len + 4); } buf[2] = ((len >> 8) & 0xff); buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ @@ -434,14 +472,31 @@ static sense_reason_t spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; buf[3] = 0x3c; + /* + * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK + * only for TYPE3 protection. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) + buf[4] = 0x5; + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) + buf[4] = 0x4; + } + /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ if (spc_check_dev_wce(dev)) buf[6] = 0x01; + /* If an LBA map is present set R_SUP */ + spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + buf[8] = 0x10; + spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); return 0; } @@ -600,6 +655,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) return 0; } +/* Referrals VPD page */ +static sense_reason_t +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x0c; + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); + + return 0; +} + static sense_reason_t spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); @@ -614,6 +683,7 @@ static struct { { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, + { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, }; /* supported vital product data pages */ @@ -643,11 +713,16 @@ spc_emulate_inquiry(struct se_cmd *cmd) struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; unsigned char *rbuf; unsigned char *cdb = cmd->t_task_cdb; - unsigned char buf[SE_INQUIRY_BUF]; + unsigned char *buf; sense_reason_t ret; int p; + int len = 0; - memset(buf, 0, SE_INQUIRY_BUF); + buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate response buffer for INQUIRY\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } if (dev == tpg->tpg_virt_lun0.lun_se_dev) buf[0] = 0x3f; /* Not connected */ @@ -663,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) } ret = spc_emulate_inquiry_std(cmd, buf); + len = buf[4] + 5; goto out; } @@ -670,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; ret = evpd_handlers[p].emulate(cmd, buf); + len = get_unaligned_be16(&buf[2]) + 4; goto out; } } @@ -680,16 +757,17 @@ spc_emulate_inquiry(struct se_cmd *cmd) out: rbuf = transport_kmap_data_sg(cmd); if (rbuf) { - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); transport_kunmap_data_sg(cmd); } + kfree(buf); if (!ret) - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, len); return ret; } -static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) { p[0] = 0x01; p[1] = 0x0a; @@ -702,8 +780,11 @@ out: return 12; } -static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) { + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + p[0] = 0x0a; p[1] = 0x0a; @@ -785,6 +866,21 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) * status (see SAM-4). */ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; + /* + * From spc4r30, section 7.5.7 Control mode page + * + * Application Tag Owner (ATO) bit set to one. + * + * If the ATO bit is set to one the device server shall not modify the + * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection + * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE + * TAG field. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type) + p[5] |= 0x80; + } + p[8] = 0xff; p[9] = 0xff; p[11] = 30; @@ -793,8 +889,10 @@ out: return 12; } -static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) { + struct se_device *dev = cmd->se_dev; + p[0] = 0x08; p[1] = 0x12; @@ -810,7 +908,7 @@ out: return 20; } -static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) +static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) { p[0] = 0x1c; p[1] = 0x0a; @@ -826,7 +924,7 @@ out: static struct { uint8_t page; uint8_t subpage; - int (*emulate)(struct se_device *, u8, unsigned char *); + int (*emulate)(struct se_cmd *, u8, unsigned char *); } modesense_handlers[] = { { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, @@ -964,7 +1062,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) * the only two possibilities). */ if ((modesense_handlers[i].subpage & ~subpage) == 0) { - ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); + ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); if (!ten && length + ret >= 255) break; length += ret; @@ -977,7 +1075,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) if (modesense_handlers[i].page == page && modesense_handlers[i].subpage == subpage) { - length += modesense_handlers[i].emulate(dev, pc, &buf[length]); + length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); goto set_length; } @@ -1003,13 +1101,12 @@ set_length: transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, length); return 0; } static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; bool ten = cdb[0] == MODE_SELECT_10; int off = ten ? 8 : 4; @@ -1045,7 +1142,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) if (modesense_handlers[i].page == page && modesense_handlers[i].subpage == subpage) { memset(tbuf, 0, SE_MODE_PAGE_BUF); - length = modesense_handlers[i].emulate(dev, 0, tbuf); + length = modesense_handlers[i].emulate(cmd, 0, tbuf); goto check_contents; } @@ -1180,7 +1277,7 @@ done: buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); return 0; } EXPORT_SYMBOL(spc_emulate_report_luns); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 70c638f730a..f7cd95e8111 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort( struct se_cmd *cmd, int tas) { + bool remove = true; /* * TASK ABORTED status (TAS) bit support */ if ((tmr_nacl && - (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) + (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { + remove = false; transport_send_task_abort(cmd); + } - transport_cmd_finish_abort(cmd, 0); + transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -127,6 +130,11 @@ void core_tmr_abort_task( if (dev != se_cmd->se_dev) continue; + + /* skip se_cmd associated with tmr */ + if (tmr->task_cmd == se_cmd) + continue; + ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); if (tmr->ref_task_tag != ref_tag) continue; @@ -150,18 +158,9 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - /* - * Now send SAM_STAT_TASK_ABORTED status for the referenced - * se_cmd descriptor.. - */ - transport_send_task_abort(se_cmd); - /* - * Also deal with possible extra acknowledge reference.. - */ - if (se_cmd->se_cmd_flags & SCF_ACK_KREF) - target_put_sess_cmd(se_sess, se_cmd); target_put_sess_cmd(se_sess, se_cmd); + transport_cmd_finish_abort(se_cmd, true); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %d\n", ref_tag); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 2a573de19a9..c036595b17c 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -656,7 +656,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) spin_lock_init(&lun->lun_sep_lock); init_completion(&lun->lun_ref_comp); - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); if (ret < 0) return ret; @@ -781,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) } EXPORT_SYMBOL(core_tpg_deregister); -struct se_lun *core_tpg_pre_addlun( +struct se_lun *core_tpg_alloc_lun( struct se_portal_group *tpg, u32 unpacked_lun) { @@ -811,11 +811,11 @@ struct se_lun *core_tpg_pre_addlun( return lun; } -int core_tpg_post_addlun( +int core_tpg_add_lun( struct se_portal_group *tpg, struct se_lun *lun, u32 lun_access, - void *lun_ptr) + struct se_device *dev) { int ret; @@ -823,7 +823,7 @@ int core_tpg_post_addlun( if (ret < 0) return ret; - ret = core_dev_export(lun_ptr, tpg, lun); + ret = core_dev_export(dev, tpg, lun); if (ret < 0) { percpu_ref_cancel_init(&lun->lun_ref); return ret; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 91953da0f62..7fa62fc93e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, @@ -128,14 +130,36 @@ int init_se_kmem_caches(void) "mem_t failed\n"); goto out_free_tg_pt_gp_cache; } + t10_alua_lba_map_cache = kmem_cache_create( + "t10_alua_lba_map_cache", + sizeof(struct t10_alua_lba_map), + __alignof__(struct t10_alua_lba_map), 0, NULL); + if (!t10_alua_lba_map_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_" + "cache failed\n"); + goto out_free_tg_pt_gp_mem_cache; + } + t10_alua_lba_map_mem_cache = kmem_cache_create( + "t10_alua_lba_map_mem_cache", + sizeof(struct t10_alua_lba_map_member), + __alignof__(struct t10_alua_lba_map_member), 0, NULL); + if (!t10_alua_lba_map_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" + "cache failed\n"); + goto out_free_lba_map_cache; + } target_completion_wq = alloc_workqueue("target_completion", WQ_MEM_RECLAIM, 0); if (!target_completion_wq) - goto out_free_tg_pt_gp_mem_cache; + goto out_free_lba_map_mem_cache; return 0; +out_free_lba_map_mem_cache: + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: + kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_mem_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); out_free_tg_pt_gp_cache: @@ -164,6 +188,8 @@ void release_se_kmem_caches(void) kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kmem_cache_destroy(t10_alua_lba_map_cache); + kmem_cache_destroy(t10_alua_lba_map_mem_cache); } /* This code ensures unique mib indexes are handed out. */ @@ -209,7 +235,7 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } -struct se_session *transport_init_session(void) +struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; @@ -225,6 +251,7 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); + se_sess->sup_prot_ops = sup_prot_ops; return se_sess; } @@ -262,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess, EXPORT_SYMBOL(transport_alloc_session_tags); struct se_session *transport_init_session_tags(unsigned int tag_num, - unsigned int tag_size) + unsigned int tag_size, + enum target_prot_op sup_prot_ops) { struct se_session *se_sess; int rc; - se_sess = transport_init_session(); + se_sess = transport_init_session(sup_prot_ops); if (IS_ERR(se_sess)) return se_sess; @@ -476,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess) * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context. */ - if (se_nacl && comp_nacl == true) + if (se_nacl && comp_nacl) target_put_nacl(se_nacl); transport_free_session(se_sess); @@ -534,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return 1; } @@ -568,14 +596,24 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - if (!lun || !cmd->lun_ref_active) + if (!lun) return; - percpu_ref_put(&lun->lun_ref); + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) + transport_lun_remove_cmd(cmd); + /* + * Allow the fabric driver to unmap any resources before + * releasing the descriptor via TFO->release_cmd() + */ + if (remove) + cmd->se_tfo->aborted_task(cmd); + if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) @@ -642,9 +680,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) return; } - if (!success) - cmd->transport_state |= CMD_T_FAILED; - /* * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. @@ -652,9 +687,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) if (cmd->transport_state & CMD_T_ABORTED && cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; - } else if (cmd->transport_state & CMD_T_FAILED) { + } else if (!success) { INIT_WORK(&cmd->work, target_complete_failure_work); } else { INIT_WORK(&cmd->work, target_complete_ok_work); @@ -668,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -701,7 +753,7 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1078,6 +1130,7 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); + kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; @@ -1113,7 +1166,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Dormant to Active status. */ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, dev->transport->name); @@ -1284,6 +1337,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, * @sgl_count: scatterlist count for unidirectional mapping * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information * * Returns non zero to signal active I/O shutdown failure. All other * setup exceptions will be returned as a SCSI CHECK_CONDITION response, @@ -1296,7 +1351,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags, struct scatterlist *sgl, u32 sgl_count, - struct scatterlist *sgl_bidi, u32 sgl_bidi_count) + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count) { struct se_portal_group *se_tpg; sense_reason_t rc; @@ -1344,6 +1400,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess transport_generic_request_failure(se_cmd, rc); return 0; } + + /* + * Save pointers for SGLs containing protection information, + * if present. + */ + if (sgl_prot_count) { + se_cmd->t_prot_sg = sgl_prot; + se_cmd->t_prot_nents = sgl_prot_count; + } + /* * When a non zero sgl_count has been passed perform SGL passthrough * mapping for pre-allocated fabric memory instead of having target @@ -1380,6 +1446,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess return 0; } } + /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. @@ -1419,7 +1486,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, { return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, unpacked_lun, data_length, task_attr, data_dir, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, NULL, 0); } EXPORT_SYMBOL(target_submit_cmd); @@ -1565,6 +1632,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_NOT_READY: + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1653,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) return false; case MSG_ORDERED_TAG: atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " " se_ordered_id: %u\n", @@ -1671,7 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) * For SIMPLE and UNTAGGED Task Attribute commands */ atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } @@ -1708,13 +1778,22 @@ void target_execute_cmd(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } cmd->t_state = TRANSPORT_PROCESSING; cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); + /* + * Perform WRITE_INSERT of PI using software emulation when backend + * device has PI enabled, if the transport has not already generated + * PI using hardware WRITE_INSERT offload. + */ + if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) + sbc_dif_generate(cmd); + } if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); @@ -1767,7 +1846,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1779,7 +1858,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1838,12 +1917,27 @@ static void transport_handle_queue_full( spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); } +static bool target_check_read_strip(struct se_cmd *cmd) +{ + sense_reason_t rc; + + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { + rc = sbc_dif_read_strip(cmd); + if (rc) { + cmd->pi_err = rc; + return true; + } + } + + return false; +} + static void target_complete_ok_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -1908,6 +2002,22 @@ static void target_complete_ok_work(struct work_struct *work) cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); + /* + * Perform READ_STRIP of PI using software emulation when + * backend had PI enabled, if the transport will not be + * performing hardware READ_STRIP offload. + */ + if (cmd->prot_op == TARGET_PROT_DIN_STRIP && + target_check_read_strip(cmd)) { + ret = transport_send_check_condition_and_sense(cmd, + cmd->pi_err, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); @@ -2000,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd) transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; + + transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); + cmd->t_prot_sg = NULL; + cmd->t_prot_nents = 0; } /** @@ -2163,6 +2277,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } + if (cmd->prot_op != TARGET_PROT_NORMAL) { + ret = target_alloc_sgl(&cmd->t_prot_sg, + &cmd->t_prot_nents, + cmd->prot_length, true); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, cmd->data_length, zero_flag); if (ret < 0) @@ -2253,13 +2375,12 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref == true) { + if (ack_kref) { kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } @@ -2303,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { + if (!se_sess) { + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, &se_sess->sess_cmd_lock); } @@ -2455,6 +2580,19 @@ static int transport_get_sense_codes( return 0; } +static +void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) +{ + /* Place failed LBA in sense data information descriptor 0. */ + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; + buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ + buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; + buffer[SPC_VALIDITY_OFFSET] = 0x80; + + /* Descriptor Information: failing sector */ + put_unaligned_be64(bad_sector, &buffer[12]); +} + int transport_send_check_condition_and_sense(struct se_cmd *cmd, sense_reason_t reason, int from_transport) @@ -2648,6 +2786,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, buffer[SPC_ASC_KEY_OFFSET] = 0x1d; buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; break; + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK GUARD CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; + transport_err_sector_info(buffer, cmd->bad_sector); + break; + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; + transport_err_sector_info(buffer, cmd->bad_sector); + break; + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + /* CURRENT ERROR */ + buffer[0] = 0x70; + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ + buffer[SPC_ASC_KEY_OFFSET] = 0x10; + buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; + transport_err_sector_info(buffer, cmd->bad_sector); + break; case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: default: /* CURRENT ERROR */ @@ -2685,13 +2856,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) if (!(cmd->transport_state & CMD_T_ABORTED)) return 0; - if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) + /* + * If cmd has been aborted but either no status is to be sent or it has + * already been sent, just return + */ + if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) return 1; pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); - cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; + cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; cmd->scsi_status = SAM_STAT_TASK_ABORTED; trace_target_cmd_complete(cmd); cmd->se_tfo->queue_status(cmd); @@ -2705,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd) unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { + if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } @@ -2720,7 +2895,8 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; - smp_mb__after_atomic_inc(); + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + smp_mb__after_atomic(); return; } } @@ -2779,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work) int transport_generic_handle_tmr( struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state |= CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index b04467e7547..101858e245b 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -98,7 +98,6 @@ int core_scsi3_ua_allocate( pr_err("Unable to allocate struct se_ua\n"); return -ENOMEM; } - INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_nacl_list); ua->ua_nacl = nacl; @@ -163,7 +162,7 @@ int core_scsi3_ua_allocate( spin_unlock_irq(&nacl->device_list_lock); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -176,7 +175,7 @@ int core_scsi3_ua_allocate( asc, ascq); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -191,7 +190,7 @@ void core_scsi3_ua_release_all( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); } @@ -252,7 +251,7 @@ void core_scsi3_ua_for_check_condition( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); @@ -311,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 6b88a9958f6..e9186cdf35e 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -40,10 +40,6 @@ static struct workqueue_struct *xcopy_wq = NULL; /* - * From target_core_spc.c - */ -extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); -/* * From target_core_device.c */ extern struct mutex g_device_mutex; @@ -74,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; int rc; - if (src == true) + if (src) dev_wwn = &xop->dst_tid_wwn[0]; else dev_wwn = &xop->src_tid_wwn[0]; @@ -92,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op if (rc != 0) continue; - if (src == true) { + if (src) { xop->dst_dev = se_dev; pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" " se_dev\n", xop->dst_dev); @@ -170,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op return -EINVAL; } - if (src == true) { + if (src) { memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* * Determine if the source designator matches the local device @@ -240,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, /* * Assume target descriptors are in source -> destination order.. */ - if (src == true) + if (src) src = false; else src = true; @@ -564,7 +560,7 @@ static int target_xcopy_init_pt_lun( * reservations. The pt_cmd->se_lun pointer will be setup from within * target_xcopy_setup_pt_port() */ - if (remote_port == false) { + if (!remote_port) { pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; return 0; } diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 752863acecb..a0bcfd3e7e7 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -94,20 +94,19 @@ struct ft_lun { */ struct ft_tpg { u32 index; - struct ft_lport_acl *lport_acl; + struct ft_lport_wwn *lport_wwn; struct ft_tport *tport; /* active tport or NULL */ - struct list_head list; /* linkage in ft_lport_acl tpg_list */ struct list_head lun_list; /* head of LUNs */ struct se_portal_group se_tpg; struct workqueue_struct *workqueue; }; -struct ft_lport_acl { +struct ft_lport_wwn { u64 wwpn; char name[FT_NAMELEN]; - struct list_head list; - struct list_head tpg_list; - struct se_wwn fc_lport_wwn; + struct list_head ft_wwn_node; + struct ft_tpg *tpg; + struct se_wwn se_wwn; }; /* @@ -128,7 +127,6 @@ struct ft_cmd { u32 sg_cnt; /* No. of item in scatterlist */ }; -extern struct list_head ft_lport_list; extern struct mutex ft_lport_lock; extern struct fc4_prov ft_prov; extern struct target_fabric_configfs *ft_configfs; @@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *); u32 ft_get_task_tag(struct se_cmd *); int ft_get_cmd_state(struct se_cmd *); void ft_queue_tm_resp(struct se_cmd *); +void ft_aborted_task(struct se_cmd *); /* * other internal functions. diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 479ec5621a4..be0c0d08c56 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; - struct se_session *se_sess; + struct ft_sess *sess; if (!cmd) return; - se_sess = cmd->sess->se_sess; + sess = cmd->sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ft_sess_put(sess); /* undo get from lookup at recv */ } void ft_release_cmd(struct se_cmd *se_cmd) @@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd) struct fc_lport *lport; struct fc_exch *ep; size_t len; + int rc; if (cmd->aborted) return 0; @@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd) len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { - /* XXX shouldn't just drop it - requeue and retry? */ - return 0; + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; } + fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; @@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd) fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); - lport->tt.seq_send(lport, cmd->seq, fp); + rc = lport->tt.seq_send(lport, cmd->seq, fp); + if (rc) { + pr_info_ratelimited("%s: Failed to send response frame %p, " + "xid <0x%x>\n", __func__, fp, ep->xid); + /* + * Generate a TASK_SET_FULL status to notify the initiator + * to reduce it's queue_depth after the se_cmd response has + * been re-queued by target-core. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; + } lport->tt.exch_done(cmd->seq); return 0; } @@ -426,6 +439,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) ft_send_resp_code(cmd, code); } +void ft_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static void ft_send_work(struct work_struct *work); /* @@ -438,7 +456,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) struct se_session *se_sess = sess->se_sess; int tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) goto busy; diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index c6932fb53a8..efdcb9663a1 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -50,7 +50,7 @@ struct target_fabric_configfs *ft_configfs; -LIST_HEAD(ft_lport_list); +static LIST_HEAD(ft_wwn_list); DEFINE_MUTEX(ft_lport_lock); unsigned int ft_debug_logging; @@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) return found; } -struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) +static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) { struct ft_node_acl *acl; @@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg( struct config_group *group, const char *name) { - struct ft_lport_acl *lacl; + struct ft_lport_wwn *ft_wwn; struct ft_tpg *tpg; struct workqueue_struct *wq; unsigned long index; @@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg( if (index > UINT_MAX) return NULL; - lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); + if ((index != 1)) { + pr_err("Error, a single TPG=1 is used for HW port mappings\n"); + return ERR_PTR(-ENOSYS); + } + + ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn); tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); if (!tpg) return NULL; tpg->index = index; - tpg->lport_acl = lacl; + tpg->lport_wwn = ft_wwn; INIT_LIST_HEAD(&tpg->lun_list); wq = alloc_workqueue("tcm_fc", 0, 1); @@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg( tpg->workqueue = wq; mutex_lock(&ft_lport_lock); - list_add_tail(&tpg->list, &lacl->tpg_list); + ft_wwn->tpg = tpg; mutex_unlock(&ft_lport_lock); return &tpg->se_tpg; @@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg( static void ft_del_tpg(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + struct ft_lport_wwn *ft_wwn = tpg->lport_wwn; pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); @@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) synchronize_rcu(); mutex_lock(&ft_lport_lock); - list_del(&tpg->list); + ft_wwn->tpg = NULL; if (tpg->tport) { tpg->tport->tpg = NULL; tpg->tport = NULL; @@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) */ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) { - struct ft_lport_acl *lacl; - struct ft_tpg *tpg; + struct ft_lport_wwn *ft_wwn; - list_for_each_entry(lacl, &ft_lport_list, list) { - if (lacl->wwpn == lport->wwpn) { - list_for_each_entry(tpg, &lacl->tpg_list, list) - return tpg; /* XXX for now return first entry */ - return NULL; - } + list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (ft_wwn->wwpn == lport->wwpn) + return ft_wwn->tpg; } return NULL; } @@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) * Add lport to allowed config. * The name is the WWPN in lower-case ASCII, colon-separated bytes. */ -static struct se_wwn *ft_add_lport( +static struct se_wwn *ft_add_wwn( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { - struct ft_lport_acl *lacl; - struct ft_lport_acl *old_lacl; + struct ft_lport_wwn *ft_wwn; + struct ft_lport_wwn *old_ft_wwn; u64 wwpn; - pr_debug("add lport %s\n", name); + pr_debug("add wwn %s\n", name); if (ft_parse_wwn(name, &wwpn, 1) < 0) return NULL; - lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); - if (!lacl) + ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL); + if (!ft_wwn) return NULL; - lacl->wwpn = wwpn; - INIT_LIST_HEAD(&lacl->tpg_list); + ft_wwn->wwpn = wwpn; mutex_lock(&ft_lport_lock); - list_for_each_entry(old_lacl, &ft_lport_list, list) { - if (old_lacl->wwpn == wwpn) { + list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (old_ft_wwn->wwpn == wwpn) { mutex_unlock(&ft_lport_lock); - kfree(lacl); + kfree(ft_wwn); return NULL; } } - list_add_tail(&lacl->list, &ft_lport_list); - ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); + list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list); + ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn); mutex_unlock(&ft_lport_lock); - return &lacl->fc_lport_wwn; + return &ft_wwn->se_wwn; } -static void ft_del_lport(struct se_wwn *wwn) +static void ft_del_wwn(struct se_wwn *wwn) { - struct ft_lport_acl *lacl = container_of(wwn, - struct ft_lport_acl, fc_lport_wwn); + struct ft_lport_wwn *ft_wwn = container_of(wwn, + struct ft_lport_wwn, se_wwn); - pr_debug("del lport %s\n", lacl->name); + pr_debug("del wwn %s\n", ft_wwn->name); mutex_lock(&ft_lport_lock); - list_del(&lacl->list); + list_del(&ft_wwn->ft_wwn_node); mutex_unlock(&ft_lport_lock); - kfree(lacl); + kfree(ft_wwn); } static ssize_t ft_wwn_show_attr_version( @@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; - return tpg->lport_acl->name; + return tpg->lport_wwn->name; } static u16 ft_get_tag(struct se_portal_group *se_tpg) @@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = { .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, + .aborted_task = ft_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c */ - .fabric_make_wwn = &ft_add_lport, - .fabric_drop_wwn = &ft_del_lport, + .fabric_make_wwn = &ft_add_wwn, + .fabric_drop_wwn = &ft_del_wwn, .fabric_make_tpg = &ft_add_tpg, .fabric_drop_tpg = &ft_del_tpg, .fabric_post_link = NULL, @@ -552,7 +554,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .fabric_drop_nodeacl = &ft_del_acl, }; -int ft_register_configfs(void) +static int ft_register_configfs(void) { struct target_fabric_configfs *fabric; int ret; @@ -599,7 +601,7 @@ int ft_register_configfs(void) return 0; } -void ft_deregister_configfs(void) +static void ft_deregister_configfs(void) { if (!ft_configfs) return; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index e415af32115..97b486c3dda 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) if (cmd->aborted) return 0; + + if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) + goto queue_status; + ep = fc_seq_exch(cmd->seq); lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); @@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd) FC_TYPE_FCP, f_ctl, fh_off); error = lport->tt.seq_send(lport, seq, fp); if (error) { - /* XXX For now, initiator will retry */ - pr_err_ratelimited("%s: Failed to send frame %p, " + pr_info_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, remaining, lport->lso_max); + /* + * Go ahead and set TASK_SET_FULL status ignoring the + * rest of the DataIN, and immediately attempt to + * send the response via ft_queue_status() in order + * to notify the initiator that it should reduce it's + * per LUN queue_depth. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + break; } } +queue_status: return ft_queue_status(se_cmd); } diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index ae52c08dad0..21ce50880c7 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *); * Lookup or allocate target local port. * Caller holds ft_lport_lock. */ -static struct ft_tport *ft_tport_create(struct fc_lport *lport) +static struct ft_tport *ft_tport_get(struct fc_lport *lport) { struct ft_tpg *tpg; struct ft_tport *tport; @@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport) if (tport) { tport->tpg = tpg; + tpg->tport = tport; return tport; } @@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport) void ft_lport_add(struct fc_lport *lport, void *arg) { mutex_lock(&ft_lport_lock); - ft_tport_create(lport); + ft_tport_get(lport); mutex_unlock(&ft_lport_lock); } @@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, return NULL; sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, - sizeof(struct ft_cmd)); + sizeof(struct ft_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; @@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, struct ft_node_acl *acl; u32 fcp_parm; - tport = ft_tport_create(rdata->local_port); + tport = ft_tport_get(rdata->local_port); if (!tport) goto not_target; /* not a target for this local port */ |
