diff options
Diffstat (limited to 'drivers/target')
51 files changed, 3590 insertions, 1278 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 18303686eb5..dc2d84ac5a0 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -3,6 +3,7 @@ menuconfig TARGET_CORE  	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"  	depends on SCSI && BLOCK  	select CONFIGFS_FS +	select CRC_T10DIF  	default n  	help  	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled @@ -13,6 +14,7 @@ if TARGET_CORE  config TCM_IBLOCK  	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" +	select BLK_DEV_INTEGRITY  	help  	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered  	access to Linux/Block devices using BIO diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 35b61f7d6c6..1f4c794f5fc 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -52,7 +52,7 @@  static LIST_HEAD(g_tiqn_list);  static LIST_HEAD(g_np_list);  static DEFINE_SPINLOCK(tiqn_lock); -static DEFINE_SPINLOCK(np_lock); +static DEFINE_MUTEX(np_lock);  static struct idr tiqn_idr;  struct idr sess_idr; @@ -300,13 +300,16 @@ bool iscsit_check_np_match(  		port = ntohs(sock_in->sin_port);  	} -	if ((ip_match == true) && (np->np_port == port) && +	if (ip_match && (np->np_port == port) &&  	    (np->np_network_transport == network_transport))  		return true;  	return false;  } +/* + * Called with mutex np_lock held + */  static struct iscsi_np *iscsit_get_np(  	struct __kernel_sockaddr_storage *sockaddr,  	int network_transport) @@ -314,29 +317,26 @@ static struct iscsi_np *iscsit_get_np(  	struct iscsi_np *np;  	bool match; -	spin_lock_bh(&np_lock);  	list_for_each_entry(np, &g_np_list, np_list) { -		spin_lock(&np->np_thread_lock); +		spin_lock_bh(&np->np_thread_lock);  		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { -			spin_unlock(&np->np_thread_lock); +			spin_unlock_bh(&np->np_thread_lock);  			continue;  		}  		match = iscsit_check_np_match(sockaddr, np, network_transport); -		if (match == true) { +		if (match) {  			/*  			 * Increment the np_exports reference count now to  			 * prevent iscsit_del_np() below from being called  			 * while iscsi_tpg_add_network_portal() is called.  			 */  			np->np_exports++; -			spin_unlock(&np->np_thread_lock); -			spin_unlock_bh(&np_lock); +			spin_unlock_bh(&np->np_thread_lock);  			return np;  		} -		spin_unlock(&np->np_thread_lock); +		spin_unlock_bh(&np->np_thread_lock);  	} -	spin_unlock_bh(&np_lock);  	return NULL;  } @@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(  	struct sockaddr_in6 *sock_in6;  	struct iscsi_np *np;  	int ret; + +	mutex_lock(&np_lock); +  	/*  	 * Locate the existing struct iscsi_np if already active..  	 */  	np = iscsit_get_np(sockaddr, network_transport); -	if (np) +	if (np) { +		mutex_unlock(&np_lock);  		return np; +	}  	np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);  	if (!np) {  		pr_err("Unable to allocate memory for struct iscsi_np\n"); +		mutex_unlock(&np_lock);  		return ERR_PTR(-ENOMEM);  	} @@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(  	ret = iscsi_target_setup_login_socket(np, sockaddr);  	if (ret != 0) {  		kfree(np); +		mutex_unlock(&np_lock);  		return ERR_PTR(ret);  	} @@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(  		pr_err("Unable to create kthread: iscsi_np\n");  		ret = PTR_ERR(np->np_thread);  		kfree(np); +		mutex_unlock(&np_lock);  		return ERR_PTR(ret);  	}  	/* @@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(  	 * point because iscsi_np has not been added to g_np_list yet.  	 */  	np->np_exports = 1; +	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; -	spin_lock_bh(&np_lock);  	list_add_tail(&np->np_list, &g_np_list); -	spin_unlock_bh(&np_lock); +	mutex_unlock(&np_lock);  	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",  		np->np_ip, np->np_port, np->np_transport->name); @@ -452,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np)  	spin_lock_bh(&np->np_thread_lock);  	np->np_exports--;  	if (np->np_exports) { +		np->enabled = true;  		spin_unlock_bh(&np->np_thread_lock);  		return 0;  	} @@ -465,13 +474,14 @@ int iscsit_del_np(struct iscsi_np *np)  		 */  		send_sig(SIGINT, np->np_thread, 1);  		kthread_stop(np->np_thread); +		np->np_thread = NULL;  	}  	np->np_transport->iscsit_free_np(np); -	spin_lock_bh(&np_lock); +	mutex_lock(&np_lock);  	list_del(&np->np_list); -	spin_unlock_bh(&np_lock); +	mutex_unlock(&np_lock);  	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",  		np->np_ip, np->np_port, np->np_transport->name); @@ -490,6 +500,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)  	return 0;  } +static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ +	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + +	spin_lock_bh(&conn->cmd_lock); +	if (!list_empty(&cmd->i_conn_node)) +		list_del_init(&cmd->i_conn_node); +	spin_unlock_bh(&conn->cmd_lock); + +	__iscsit_free_cmd(cmd, scsi_cmd, true); +} + +static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) +{ +	return TARGET_PROT_NORMAL; +} +  static struct iscsit_transport iscsi_target_transport = {  	.name			= "iSCSI/TCP",  	.transport_type		= ISCSI_TCP, @@ -504,6 +531,8 @@ static struct iscsit_transport iscsi_target_transport = {  	.iscsit_response_queue	= iscsit_response_queue,  	.iscsit_queue_data_in	= iscsit_queue_rsp,  	.iscsit_queue_status	= iscsit_queue_rsp, +	.iscsit_aborted_task	= iscsit_aborted_task, +	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,  };  static int __init iscsi_target_init_module(void) @@ -621,7 +650,7 @@ static int iscsit_add_reject(  {  	struct iscsi_cmd *cmd; -	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  	if (!cmd)  		return -1; @@ -753,7 +782,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)  static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)  { -	struct iscsi_cmd *cmd; +	LIST_HEAD(ack_list); +	struct iscsi_cmd *cmd, *cmd_p;  	conn->exp_statsn = exp_statsn; @@ -761,19 +791,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)  		return;  	spin_lock_bh(&conn->cmd_lock); -	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { +	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {  		spin_lock(&cmd->istate_lock);  		if ((cmd->i_state == ISTATE_SENT_STATUS) &&  		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {  			cmd->i_state = ISTATE_REMOVE;  			spin_unlock(&cmd->istate_lock); -			iscsit_add_cmd_to_immediate_queue(cmd, conn, -						cmd->i_state); +			list_move_tail(&cmd->i_conn_node, &ack_list);  			continue;  		}  		spin_unlock(&cmd->istate_lock);  	}  	spin_unlock_bh(&conn->cmd_lock); + +	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { +		list_del_init(&cmd->i_conn_node); +		iscsit_free_cmd(cmd, false); +	}  }  static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) @@ -800,14 +834,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  	int iscsi_task_attr;  	int sam_task_attr; -	spin_lock_bh(&conn->sess->session_stats_lock); -	conn->sess->cmd_pdus++; -	if (conn->sess->se_sess->se_node_acl) { -		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); -		conn->sess->se_sess->se_node_acl->num_cmds++; -		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); -	} -	spin_unlock_bh(&conn->sess->session_stats_lock); +	atomic_long_inc(&conn->sess->cmd_pdus);  	hdr			= (struct iscsi_scsi_req *) buf;  	payload_length		= ntoh24(hdr->dlength); @@ -825,24 +852,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||  	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {  		/* -		 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) -		 * that adds support for RESERVE/RELEASE.  There is a bug -		 * add with this new functionality that sets R/W bits when -		 * neither CDB carries any READ or WRITE datapayloads. +		 * From RFC-3720 Section 10.3.1: +		 * +		 * "Either or both of R and W MAY be 1 when either the +		 *  Expected Data Transfer Length and/or Bidirectional Read +		 *  Expected Data Transfer Length are 0" +		 * +		 * For this case, go ahead and clear the unnecssary bits +		 * to avoid any confusion with ->data_direction.  		 */ -		if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { -			hdr->flags &= ~ISCSI_FLAG_CMD_READ; -			hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; -			goto done; -		} +		hdr->flags &= ~ISCSI_FLAG_CMD_READ; +		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; -		pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" +		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"  			" set when Expected Data Transfer Length is 0 for" -			" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); -		return iscsit_add_reject_cmd(cmd, -					     ISCSI_REASON_BOOKMARK_INVALID, buf); +			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);  	} -done:  	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&  	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { @@ -1096,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,  	/*  	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.  	 */ -	if (dump_payload == true) +	if (dump_payload)  		goto after_immediate_data;  	immed_ret = iscsit_handle_immediate_data(cmd, hdr, @@ -1249,20 +1274,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,  	int rc;  	if (!payload_length) { -		pr_err("DataOUT payload is ZERO, protocol error.\n"); -		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, -					 buf); +		pr_warn("DataOUT payload is ZERO, ignoring.\n"); +		return 0;  	}  	/* iSCSI write */ -	spin_lock_bh(&conn->sess->session_stats_lock); -	conn->sess->rx_data_octets += payload_length; -	if (conn->sess->se_sess->se_node_acl) { -		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); -		conn->sess->se_sess->se_node_acl->write_bytes += payload_length; -		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); -	} -	spin_unlock_bh(&conn->sess->session_stats_lock); +	atomic_long_add(payload_length, &conn->sess->rx_data_octets);  	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {  		pr_err("DataSegmentLength: %u is greater than" @@ -1292,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,  	if (cmd->data_direction != DMA_TO_DEVICE) {  		pr_err("Command ITT: 0x%08x received DataOUT for a"  			" NON-WRITE command.\n", cmd->init_task_tag); -		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); +		return iscsit_dump_data_payload(conn, payload_length, 1);  	}  	se_cmd = &cmd->se_cmd;  	iscsit_mod_dataout_timer(cmd); @@ -1481,7 +1498,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);  static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)  { -	struct iscsi_cmd *cmd; +	struct iscsi_cmd *cmd = NULL;  	struct iscsi_data *hdr = (struct iscsi_data *)buf;  	int rc;  	bool data_crc_failed = false; @@ -1506,6 +1523,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  {  	u32 payload_length = ntoh24(hdr->dlength); +	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { +		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); +		if (!cmd) +			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, +						 (unsigned char *)hdr); +		 +		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, +					 (unsigned char *)hdr); +	} +  	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {  		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"  			" not set, protocol error.\n"); @@ -1567,7 +1594,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  	 * Initiator is expecting a NopIN ping reply..  	 */  	if (hdr->itt != RESERVED_ITT) { -		BUG_ON(!cmd); +		if (!cmd) +			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, +						(unsigned char *)hdr);  		spin_lock_bh(&conn->cmd_lock);  		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); @@ -1949,6 +1978,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  					 (unsigned char *)hdr);  	} +	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || +	     (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { +		pr_err("Multi sequence text commands currently not supported\n"); +		return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, +					(unsigned char *)hdr); +	} +  	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"  		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,  		hdr->exp_statsn, payload_length); @@ -2464,6 +2500,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)  {  	struct iscsi_cmd *cmd;  	struct iscsi_conn *conn_p; +	bool found = false;  	/*  	 * Only send a Asynchronous Message on connections whos network @@ -2472,14 +2509,15 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)  	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {  		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {  			iscsit_inc_conn_usage_count(conn_p); +			found = true;  			break;  		}  	} -	if (!conn_p) +	if (!found)  		return; -	cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); +	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);  	if (!cmd) {  		iscsit_dec_conn_usage_count(conn_p);  		return; @@ -2625,14 +2663,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)  		return -1;  	} -	spin_lock_bh(&conn->sess->session_stats_lock); -	conn->sess->tx_data_octets += datain.length; -	if (conn->sess->se_sess->se_node_acl) { -		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); -		conn->sess->se_sess->se_node_acl->read_bytes += datain.length; -		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); -	} -	spin_unlock_bh(&conn->sess->session_stats_lock); +	atomic_long_add(datain.length, &conn->sess->tx_data_octets);  	/*  	 * Special case for successfully execution w/ both DATAIN  	 * and Sense Data. @@ -3157,9 +3188,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,  	if (inc_stat_sn)  		cmd->stat_sn = conn->stat_sn++; -	spin_lock_bh(&conn->sess->session_stats_lock); -	conn->sess->rsp_pdus++; -	spin_unlock_bh(&conn->sess->session_stats_lock); +	atomic_long_inc(&conn->sess->rsp_pdus);  	memset(hdr, 0, ISCSI_HDR_LEN);  	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP; @@ -3361,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)  #define SENDTARGETS_BUF_LIMIT 32768U -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) +static int +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, +				  enum iscsit_transport_type network_transport)  {  	char *payload = NULL;  	struct iscsi_conn *conn = cmd->conn; @@ -3369,6 +3400,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)  	struct iscsi_tiqn *tiqn;  	struct iscsi_tpg_np *tpg_np;  	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; +	int target_name_printed;  	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */  	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; @@ -3406,19 +3438,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)  			continue;  		} -		len = sprintf(buf, "TargetName=%s", tiqn->tiqn); -		len += 1; - -		if ((len + payload_len) > buffer_len) { -			end_of_buf = 1; -			goto eob; -		} -		memcpy(payload + payload_len, buf, len); -		payload_len += len; +		target_name_printed = 0;  		spin_lock(&tiqn->tiqn_tpg_lock);  		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { +			/* If demo_mode_discovery=0 and generate_node_acls=0 +			 * (demo mode dislabed) do not return +			 * TargetName+TargetAddress unless a NodeACL exists. +			 */ + +			if ((tpg->tpg_attrib.generate_node_acls == 0) && +			    (tpg->tpg_attrib.demo_mode_discovery == 0) && +			    (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, +				cmd->conn->sess->sess_ops->InitiatorName))) { +				continue; +			} +  			spin_lock(&tpg->tpg_state_lock);  			if ((tpg->tpg_state == TPG_STATE_FREE) ||  			    (tpg->tpg_state == TPG_STATE_INACTIVE)) { @@ -3433,12 +3469,29 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)  				struct iscsi_np *np = tpg_np->tpg_np;  				bool inaddr_any = iscsit_check_inaddr_any(np); +				if (np->np_network_transport != network_transport) +					continue; + +				if (!target_name_printed) { +					len = sprintf(buf, "TargetName=%s", +						      tiqn->tiqn); +					len += 1; + +					if ((len + payload_len) > buffer_len) { +						spin_unlock(&tpg->tpg_np_lock); +						spin_unlock(&tiqn->tiqn_tpg_lock); +						end_of_buf = 1; +						goto eob; +					} +					memcpy(payload + payload_len, buf, len); +					payload_len += len; +					target_name_printed = 1; +				} +  				len = sprintf(buf, "TargetAddress="  					"%s:%hu,%hu", -					(inaddr_any == false) ? -						np->np_ip : conn->local_ip, -					(inaddr_any == false) ? -						np->np_port : conn->local_port, +					inaddr_any ? conn->local_ip : np->np_ip, +					inaddr_any ? conn->local_port : np->np_port,  					tpg->tpgt);  				len += 1; @@ -3470,11 +3523,12 @@ eob:  int  iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, -		      struct iscsi_text_rsp *hdr) +		      struct iscsi_text_rsp *hdr, +		      enum iscsit_transport_type network_transport)  {  	int text_length, padding; -	text_length = iscsit_build_sendtargets_response(cmd); +	text_length = iscsit_build_sendtargets_response(cmd, network_transport);  	if (text_length < 0)  		return text_length; @@ -3512,7 +3566,7 @@ static int iscsit_send_text_rsp(  	u32 tx_size = 0;  	int text_length, iov_count = 0, rc; -	rc = iscsit_build_text_rsp(cmd, conn, hdr); +	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);  	if (rc < 0)  		return rc; @@ -3692,7 +3746,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state  		break;  	case ISTATE_REMOVE:  		spin_lock_bh(&conn->cmd_lock); -		list_del(&cmd->i_conn_node); +		list_del_init(&cmd->i_conn_node);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_free_cmd(cmd, false); @@ -3943,7 +3997,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)  	switch (hdr->opcode & ISCSI_OPCODE_MASK) {  	case ISCSI_OP_SCSI_CMD: -		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  		if (!cmd)  			goto reject; @@ -3955,28 +4009,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)  	case ISCSI_OP_NOOP_OUT:  		cmd = NULL;  		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { -			cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  			if (!cmd)  				goto reject;  		}  		ret = iscsit_handle_nop_out(conn, cmd, buf);  		break;  	case ISCSI_OP_SCSI_TMFUNC: -		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  		if (!cmd)  			goto reject;  		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);  		break;  	case ISCSI_OP_TEXT: -		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  		if (!cmd)  			goto reject;  		ret = iscsit_handle_text_cmd(conn, cmd, buf);  		break;  	case ISCSI_OP_LOGOUT: -		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); +		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);  		if (!cmd)  			goto reject; @@ -4087,9 +4141,7 @@ restart:  				 * hit default in the switch below.  				 */  				memset(buffer, 0xff, ISCSI_HDR_LEN); -				spin_lock_bh(&conn->sess->session_stats_lock); -				conn->sess->conn_digest_errors++; -				spin_unlock_bh(&conn->sess->session_stats_lock); +				atomic_long_inc(&conn->sess->conn_digest_errors);  			} else {  				pr_debug("Got HeaderDigest CRC32C"  						" 0x%08x\n", checksum); @@ -4137,7 +4189,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)  	spin_lock_bh(&conn->cmd_lock);  	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { -		list_del(&cmd->i_conn_node); +		list_del_init(&cmd->i_conn_node);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_increment_maxcmdsn(cmd, sess); @@ -4182,7 +4234,9 @@ int iscsit_close_connection(  	iscsit_stop_timers_for_cmds(conn);  	iscsit_stop_nopin_response_timer(conn);  	iscsit_stop_nopin_timer(conn); -	iscsit_free_queue_reqs_for_conn(conn); + +	if (conn->conn_transport->iscsit_wait_conn) +		conn->conn_transport->iscsit_wait_conn(conn);  	/*  	 * During Connection recovery drop unacknowledged out of order @@ -4200,6 +4254,7 @@ int iscsit_close_connection(  		iscsit_clear_ooo_cmdsns_for_conn(conn);  		iscsit_release_commands_from_conn(conn);  	} +	iscsit_free_queue_reqs_for_conn(conn);  	/*  	 * Handle decrementing session or connection usage count if @@ -4376,7 +4431,7 @@ int iscsit_close_connection(  int iscsit_close_session(struct iscsi_session *sess)  { -	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); +	struct iscsi_portal_group *tpg = sess->tpg;  	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;  	if (atomic_read(&sess->nconn)) { diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 7505fddca15..ab4915c0d93 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -71,6 +71,40 @@ static void chap_gen_challenge(  			challenge_asciihex);  } +static int chap_check_algorithm(const char *a_str) +{ +	char *tmp, *orig, *token; + +	tmp = kstrdup(a_str, GFP_KERNEL); +	if (!tmp) { +		pr_err("Memory allocation failed for CHAP_A temporary buffer\n"); +		return CHAP_DIGEST_UNKNOWN; +	} +	orig = tmp; + +	token = strsep(&tmp, "="); +	if (!token) +		goto out; + +	if (strcmp(token, "CHAP_A")) { +		pr_err("Unable to locate CHAP_A key\n"); +		goto out; +	} +	while (token) { +		token = strsep(&tmp, ","); +		if (!token) +			goto out; + +		if (!strncmp(token, "5", 1)) { +			pr_debug("Selected MD5 Algorithm\n"); +			kfree(orig); +			return CHAP_DIGEST_MD5; +		} +	} +out: +	kfree(orig); +	return CHAP_DIGEST_UNKNOWN; +}  static struct iscsi_chap *chap_server_open(  	struct iscsi_conn *conn, @@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open(  	char *aic_str,  	unsigned int *aic_len)  { +	int ret;  	struct iscsi_chap *chap;  	if (!(auth->naf_flags & NAF_USERID_SET) || @@ -93,25 +128,28 @@ static struct iscsi_chap *chap_server_open(  		return NULL;  	chap = conn->auth_protocol; -	/* -	 * We only support MD5 MDA presently. -	 */ -	if (strncmp(a_str, "CHAP_A=5", 8)) { -		pr_err("CHAP_A is not MD5.\n"); +	ret = chap_check_algorithm(a_str); +	switch (ret) { +	case CHAP_DIGEST_MD5: +		pr_debug("[server] Got CHAP_A=5\n"); +		/* +		 * Send back CHAP_A set to MD5. +		*/ +		*aic_len = sprintf(aic_str, "CHAP_A=5"); +		*aic_len += 1; +		chap->digest_type = CHAP_DIGEST_MD5; +		pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); +		break; +	case CHAP_DIGEST_UNKNOWN: +	default: +		pr_err("Unsupported CHAP_A value\n");  		return NULL;  	} -	pr_debug("[server] Got CHAP_A=5\n"); -	/* -	 * Send back CHAP_A set to MD5. -	 */ -	*aic_len = sprintf(aic_str, "CHAP_A=5"); -	*aic_len += 1; -	chap->digest_type = CHAP_DIGEST_MD5; -	pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type); +  	/*  	 * Set Identifier.  	 */ -	chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; +	chap->id = conn->tpg->tpg_chap_id++;  	*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);  	*aic_len += 1;  	pr_debug("[server] Sending CHAP_I=%d\n", chap->id); @@ -136,7 +174,6 @@ static int chap_server_compute_md5(  	char *nr_out_ptr,  	unsigned int *nr_out_len)  { -	char *endptr;  	unsigned long id;  	unsigned char id_as_uchar;  	unsigned char digest[MD5_SIGNATURE_SIZE]; @@ -146,6 +183,7 @@ static int chap_server_compute_md5(  	unsigned char client_digest[MD5_SIGNATURE_SIZE];  	unsigned char server_digest[MD5_SIGNATURE_SIZE];  	unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; +	size_t compare_len;  	struct iscsi_chap *chap = conn->auth_protocol;  	struct crypto_hash *tfm;  	struct hash_desc desc; @@ -184,7 +222,9 @@ static int chap_server_compute_md5(  		goto out;  	} -	if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { +	/* Include the terminating NULL in the compare */ +	compare_len = strlen(auth->userid) + 1; +	if (strncmp(chap_n, auth->userid, compare_len) != 0) {  		pr_err("CHAP_N values do not match!\n");  		goto out;  	} @@ -279,9 +319,14 @@ static int chap_server_compute_md5(  	}  	if (type == HEX) -		id = simple_strtoul(&identifier[2], &endptr, 0); +		ret = kstrtoul(&identifier[2], 0, &id);  	else -		id = simple_strtoul(identifier, &endptr, 0); +		ret = kstrtoul(identifier, 0, &id); + +	if (ret < 0) { +		pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); +		goto out; +	}  	if (id > 255) {  		pr_err("chap identifier: %lu greater than 255\n", id);  		goto out; @@ -310,6 +355,20 @@ static int chap_server_compute_md5(  		pr_err("Unable to convert incoming challenge\n");  		goto out;  	} +	if (challenge_len > 1024) { +		pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); +		goto out; +	} +	/* +	 * During mutual authentication, the CHAP_C generated by the +	 * initiator must not match the original CHAP_C generated by +	 * the target. +	 */ +	if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { +		pr_err("initiator CHAP_C matches target CHAP_C, failing" +		       " login attempt\n"); +		goto out; +	}  	/*  	 * Generate CHAP_N and CHAP_R for mutual authentication.  	 */ diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h index 2f463c09626..d22f7b96a06 100644 --- a/drivers/target/iscsi/iscsi_target_auth.h +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -1,6 +1,7 @@  #ifndef _ISCSI_CHAP_H_  #define _ISCSI_CHAP_H_ +#define CHAP_DIGEST_UNKNOWN	0  #define CHAP_DIGEST_MD5		5  #define CHAP_DIGEST_SHA		6 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index fd145259361..ae03f3e5de1 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name(				\  	struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \  					se_node_acl);			\  									\ -	return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name);	\ +	return sprintf(page, "%u\n", nacl->node_attrib.name);		\  }									\  									\  static ssize_t iscsi_nacl_attrib_store_##name(				\ @@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name(				\  									\  	if (!capable(CAP_SYS_ADMIN))					\  		return -EPERM;						\ -									\ +	if (count >= sizeof(auth->name))				\ +		return -EINVAL;						\  	snprintf(auth->name, sizeof(auth->name), "%s", page);		\  	if (!strncmp("NULL", auth->name, 4))				\  		auth->naf_flags &= ~flags;				\ @@ -897,7 +898,7 @@ static struct se_node_acl *lio_target_make_nodeacl(  	if (!se_nacl_new)  		return ERR_PTR(-ENOMEM); -	cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; +	cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;  	/*  	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()  	 * when converting a NdoeACL from demo mode -> explict @@ -920,9 +921,9 @@ static struct se_node_acl *lio_target_make_nodeacl(  		return ERR_PTR(-ENOMEM);  	} -	stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; +	stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;  	stats_cg->default_groups[1] = NULL; -	config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, +	config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,  			"iscsi_sess_stats", &iscsi_stat_sess_cit);  	return se_nacl; @@ -967,7 +968,7 @@ static ssize_t iscsi_tpg_attrib_show_##name(				\  	if (iscsit_get_tpg(tpg) < 0)					\  		return -EINVAL;						\  									\ -	rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name);	\ +	rb = sprintf(page, "%u\n", tpg->tpg_attrib.name);		\  	iscsit_put_tpg(tpg);						\  	return rb;							\  }									\ @@ -1041,6 +1042,21 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);   */  DEF_TPG_ATTRIB(prod_mode_write_protect);  TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_demo_mode_discovery, + */ +DEF_TPG_ATTRIB(demo_mode_discovery); +TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_default_erl + */ +DEF_TPG_ATTRIB(default_erl); +TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_t10_pi + */ +DEF_TPG_ATTRIB(t10_pi); +TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);  static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {  	&iscsi_tpg_attrib_authentication.attr, @@ -1051,6 +1067,9 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {  	&iscsi_tpg_attrib_cache_dynamic_acls.attr,  	&iscsi_tpg_attrib_demo_mode_write_protect.attr,  	&iscsi_tpg_attrib_prod_mode_write_protect.attr, +	&iscsi_tpg_attrib_demo_mode_discovery.attr, +	&iscsi_tpg_attrib_default_erl.attr, +	&iscsi_tpg_attrib_t10_pi.attr,  	NULL,  }; @@ -1514,21 +1533,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(  		return ERR_PTR(-ENOMEM);  	} -	stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; -	stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; -	stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; -	stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; -	stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; +	stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group; +	stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group; +	stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group; +	stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group; +	stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;  	stats_cg->default_groups[5] = NULL; -	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, +	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,  			"iscsi_instance", &iscsi_stat_instance_cit); -	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, +	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,  			"iscsi_sess_err", &iscsi_stat_sess_err_cit); -	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, +	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,  			"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); -	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, +	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,  			"iscsi_login_stats", &iscsi_stat_login_cit); -	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, +	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,  			"iscsi_logout_stats", &iscsi_stat_logout_cit);  	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); @@ -1784,6 +1803,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)  	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);  	cmd->i_state = ISTATE_SEND_STATUS; + +	if (cmd->se_cmd.scsi_status || cmd->sense_reason) { +		iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); +		return 0; +	}  	cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);  	return 0; @@ -1797,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)  	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);  } +static void lio_aborted_task(struct se_cmd *se_cmd) +{ +	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + +	cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); +} +  static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; @@ -1815,21 +1846,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; -	return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; +	return tpg->tpg_attrib.default_cmdsn_depth;  }  static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; -	return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; +	return tpg->tpg_attrib.generate_node_acls;  }  static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; -	return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; +	return tpg->tpg_attrib.cache_dynamic_acls;  }  static int lio_tpg_check_demo_mode_write_protect( @@ -1837,7 +1868,7 @@ static int lio_tpg_check_demo_mode_write_protect(  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; -	return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; +	return tpg->tpg_attrib.demo_mode_write_protect;  }  static int lio_tpg_check_prod_mode_write_protect( @@ -1845,7 +1876,7 @@ static int lio_tpg_check_prod_mode_write_protect(  {  	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; -	return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; +	return tpg->tpg_attrib.prod_mode_write_protect;  }  static void lio_tpg_release_fabric_acl( @@ -1908,9 +1939,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)  {  	struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,  				se_node_acl); +	struct se_portal_group *se_tpg = se_acl->se_tpg; +	struct iscsi_portal_group *tpg = container_of(se_tpg, +				struct iscsi_portal_group, tpg_se_tpg); -	ISCSI_NODE_ATTRIB(acl)->nacl = acl; -	iscsit_set_default_node_attribues(acl); +	acl->node_attrib.nacl = acl; +	iscsit_set_default_node_attribues(acl, tpg);  }  static int lio_check_stop_free(struct se_cmd *se_cmd) @@ -1978,6 +2012,7 @@ int iscsi_target_register_configfs(void)  	fabric->tf_ops.queue_data_in = &lio_queue_data_in;  	fabric->tf_ops.queue_status = &lio_queue_status;  	fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; +	fabric->tf_ops.aborted_task = &lio_aborted_task;  	/*  	 * Setup function pointers for generic logic in target_core_fabric_configfs.c  	 */ @@ -1995,17 +2030,17 @@ int iscsi_target_register_configfs(void)  	 * Setup default attribute lists for various fabric->tf_cit_tmpl  	 * sturct config_item_type's  	 */ -	TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; -	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; +	fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; +	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;  	ret = target_fabric_configfs_register(fabric);  	if (ret < 0) { diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 9a5721b8ff9..302eb3b7871 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -37,9 +37,6 @@  #define NA_RANDOM_DATAIN_PDU_OFFSETS	0  #define NA_RANDOM_DATAIN_SEQ_OFFSETS	0  #define NA_RANDOM_R2T_OFFSETS		0 -#define NA_DEFAULT_ERL			0 -#define NA_DEFAULT_ERL_MAX		2 -#define NA_DEFAULT_ERL_MIN		0  /* struct iscsi_tpg_attrib sanity values */  #define TA_AUTHENTICATION		1 @@ -58,8 +55,11 @@  #define TA_DEMO_MODE_WRITE_PROTECT	1  /* Disabled by default in production mode w/ explict ACLs */  #define TA_PROD_MODE_WRITE_PROTECT	0 +#define TA_DEMO_MODE_DISCOVERY		1 +#define TA_DEFAULT_ERL			0  #define TA_CACHE_CORE_NPS		0 - +/* T10 protection information disabled by default */ +#define TA_DEFAULT_T10_PI		0  #define ISCSI_IOV_DATA_BUFFER		5 @@ -192,6 +192,7 @@ enum recover_cmdsn_ret_table {  	CMDSN_NORMAL_OPERATION		= 0,  	CMDSN_LOWER_THAN_EXP		= 1,  	CMDSN_HIGHER_THAN_EXP		= 2, +	CMDSN_MAXCMDSN_OVERRUN		= 3,  };  /* Used for iscsi_handle_immediate_data() return values */ @@ -556,7 +557,7 @@ struct iscsi_conn {  	struct completion	rx_half_close_comp;  	/* socket used by this connection */  	struct socket		*sock; -	void			(*orig_data_ready)(struct sock *, int); +	void			(*orig_data_ready)(struct sock *);  	void			(*orig_state_change)(struct sock *);  #define LOGIN_FLAGS_READ_ACTIVE		1  #define LOGIN_FLAGS_CLOSED		2 @@ -650,14 +651,13 @@ struct iscsi_session {  	/* Used for session reference counting */  	int			session_usage_count;  	int			session_waiting_on_uc; -	u32			cmd_pdus; -	u32			rsp_pdus; -	u64			tx_data_octets; -	u64			rx_data_octets; -	u32			conn_digest_errors; -	u32			conn_timeout_errors; +	atomic_long_t		cmd_pdus; +	atomic_long_t		rsp_pdus; +	atomic_long_t		tx_data_octets; +	atomic_long_t		rx_data_octets; +	atomic_long_t		conn_digest_errors; +	atomic_long_t		conn_timeout_errors;  	u64			creation_time; -	spinlock_t		session_stats_lock;  	/* Number of active connections */  	atomic_t		nconn;  	atomic_t		session_continuation; @@ -755,11 +755,6 @@ struct iscsi_node_acl {  	struct se_node_acl	se_node_acl;  }; -#define NODE_STAT_GRPS(nacl)	(&(nacl)->node_stat_grps) - -#define ISCSI_NODE_ATTRIB(t)	(&(t)->node_attrib) -#define ISCSI_NODE_AUTH(t)	(&(t)->node_auth) -  struct iscsi_tpg_attrib {  	u32			authentication;  	u32			login_timeout; @@ -769,6 +764,9 @@ struct iscsi_tpg_attrib {  	u32			default_cmdsn_depth;  	u32			demo_mode_write_protect;  	u32			prod_mode_write_protect; +	u32			demo_mode_discovery; +	u32			default_erl; +	u8			t10_pi;  	struct iscsi_portal_group *tpg;  }; @@ -777,6 +775,7 @@ struct iscsi_np {  	int			np_ip_proto;  	int			np_sock_type;  	enum np_thread_state_table np_thread_state; +	bool                    enabled;  	enum iscsi_timer_flags_table np_login_timer_flags;  	u32			np_exports;  	enum np_flags_table	np_flags; @@ -791,6 +790,7 @@ struct iscsi_np {  	void			*np_context;  	struct iscsit_transport *np_transport;  	struct list_head	np_list; +	struct iscsi_tpg_np	*tpg_np;  } ____cacheline_aligned;  struct iscsi_tpg_np { @@ -835,12 +835,6 @@ struct iscsi_portal_group {  	struct list_head	tpg_list;  } ____cacheline_aligned; -#define ISCSI_TPG_C(c)		((struct iscsi_portal_group *)(c)->tpg) -#define ISCSI_TPG_LUN(c, l)  ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l]) -#define ISCSI_TPG_S(s)		((struct iscsi_portal_group *)(s)->tpg) -#define ISCSI_TPG_ATTRIB(t)	(&(t)->tpg_attrib) -#define SE_TPG(tpg)		(&(tpg)->tpg_se_tpg) -  struct iscsi_wwn_stat_grps {  	struct config_group	iscsi_stat_group;  	struct config_group	iscsi_instance_group; @@ -871,8 +865,6 @@ struct iscsi_tiqn {  	struct iscsi_logout_stats    logout_stats;  } ____cacheline_aligned; -#define WWN_STAT_GRPS(tiqn)	(&(tiqn)->tiqn_stat_grps) -  struct iscsit_global {  	/* In core shutdown */  	u32			in_shutdown; diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index 6c7a5104a4c..7087c736daa 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess  	cmd->maxcmdsn_inc = 1; -	if (!mutex_trylock(&sess->cmdsn_mutex)) { -		sess->max_cmd_sn += 1; -		pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); -		return; -	} +	mutex_lock(&sess->cmdsn_mutex);  	sess->max_cmd_sn += 1;  	pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);  	mutex_unlock(&sess->cmdsn_mutex); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 41052e512d9..0d1e6ee3e99 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -757,7 +757,7 @@ int iscsit_check_post_dataout(  static void iscsit_handle_time2retain_timeout(unsigned long data)  {  	struct iscsi_session *sess = (struct iscsi_session *) data; -	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); +	struct iscsi_portal_group *tpg = sess->tpg;  	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;  	spin_lock_bh(&se_tpg->session_lock); @@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)  		tiqn->sess_err_stats.last_sess_failure_type =  				ISCSI_SESS_ERR_CXN_TIMEOUT;  		tiqn->sess_err_stats.cxn_timeout_errors++; -		sess->conn_timeout_errors++; +		atomic_long_inc(&sess->conn_timeout_errors);  		spin_unlock(&tiqn->sess_err_stats.lock);  	}  	} @@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)  	 * Only start Time2Retain timer when the associated TPG is still in  	 * an ACTIVE (eg: not disabled or shutdown) state.  	 */ -	spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); -	tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); -	spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); +	spin_lock(&sess->tpg->tpg_state_lock); +	tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); +	spin_unlock(&sess->tpg->tpg_state_lock);  	if (!tpg_active)  		return; @@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)   */  int iscsit_stop_time2retain_timer(struct iscsi_session *sess)  { -	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); +	struct iscsi_portal_group *tpg = sess->tpg;  	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;  	if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index e048d6439f4..cda4d80cfae 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -507,7 +507,9 @@ int iscsit_handle_status_snack(  	u32 last_statsn;  	int found_cmd; -	if (conn->exp_statsn > begrun) { +	if (!begrun) { +		begrun = conn->exp_statsn; +	} else if (conn->exp_statsn > begrun) {  		pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"  			" 0x%08x but already got ExpStatSN: 0x%08x on CID:"  			" %hu.\n", begrun, runlength, conn->exp_statsn, diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 33be1fb1df3..4ca8fd2a70d 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  		list_for_each_entry_safe(cmd, cmd_tmp,  				&cr->conn_recovery_cmd_list, i_conn_node) { -			list_del(&cmd->i_conn_node); +			list_del_init(&cmd->i_conn_node);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock);  			iscsit_free_cmd(cmd, true); @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  		list_for_each_entry_safe(cmd, cmd_tmp,  				&cr->conn_recovery_cmd_list, i_conn_node) { -			list_del(&cmd->i_conn_node); +			list_del_init(&cmd->i_conn_node);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock);  			iscsit_free_cmd(cmd, true); @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(  	}  	cr = cmd->cr; -	list_del(&cmd->i_conn_node); +	list_del_init(&cmd->i_conn_node);  	return --cr->cmd_count;  } @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)  		if (!(cmd->cmd_flags & ICF_OOO_CMDSN))  			continue; -		list_del(&cmd->i_conn_node); +		list_del_init(&cmd->i_conn_node);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_free_cmd(cmd, true); @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  	/*  	 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or  	 * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call -	 * list_del(&cmd->i_conn_node); to release the command to the +	 * list_del_init(&cmd->i_conn_node); to release the command to the  	 * session pool and remove it from the connection's list.  	 *  	 * Also stop the DataOUT timer, which will be restarted after @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  				" CID: %hu\n", cmd->iscsi_opcode,  				cmd->init_task_tag, cmd->cmd_sn, conn->cid); -			list_del(&cmd->i_conn_node); +			list_del_init(&cmd->i_conn_node);  			spin_unlock_bh(&conn->cmd_lock);  			iscsit_free_cmd(cmd, true);  			spin_lock_bh(&conn->cmd_lock); @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		 */  		if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&  		     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { -			list_del(&cmd->i_conn_node); +			list_del_init(&cmd->i_conn_node);  			spin_unlock_bh(&conn->cmd_lock);  			iscsit_free_cmd(cmd, true);  			spin_lock_bh(&conn->cmd_lock); @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		cmd->sess = conn->sess; -		list_del(&cmd->i_conn_node); +		list_del_init(&cmd->i_conn_node);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_free_all_datain_reqs(cmd); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 1794c753954..5e71ac60941 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -249,6 +249,28 @@ static void iscsi_login_set_conn_values(  	mutex_unlock(&auth_id_lock);  } +static __printf(2, 3) int iscsi_change_param_sprintf( +	struct iscsi_conn *conn, +	const char *fmt, ...) +{ +	va_list args; +	unsigned char buf[64]; + +	memset(buf, 0, sizeof buf); + +	va_start(args, fmt); +	vsnprintf(buf, sizeof buf, fmt, args); +	va_end(args); + +	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { +		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, +				ISCSI_LOGIN_STATUS_NO_RESOURCES); +		return -1; +	} + +	return 0; +} +  /*   *	This is the leading connection of a new session,   *	or session reinstatement. @@ -259,6 +281,7 @@ static int iscsi_login_zero_tsih_s1(  {  	struct iscsi_session *sess = NULL;  	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; +	enum target_prot_op sup_pro_ops;  	int ret;  	sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); @@ -305,7 +328,6 @@ static int iscsi_login_zero_tsih_s1(  	}  	sess->creation_time = get_jiffies_64(); -	spin_lock_init(&sess->session_stats_lock);  	/*  	 * The FFP CmdSN window values will be allocated from the TPG's  	 * Initiator Node's ACL once the login has been successfully completed. @@ -321,8 +343,9 @@ static int iscsi_login_zero_tsih_s1(  		kfree(sess);  		return -ENOMEM;  	} +	sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); -	sess->se_sess = transport_init_session(); +	sess->se_sess = transport_init_session(sup_pro_ops);  	if (IS_ERR(sess->se_sess)) {  		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,  				ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -338,7 +361,6 @@ static int iscsi_login_zero_tsih_s2(  {  	struct iscsi_node_attrib *na;  	struct iscsi_session *sess = conn->sess; -	unsigned char buf[32];  	bool iser = false;  	sess->tpg = conn->tpg; @@ -347,15 +369,15 @@ static int iscsi_login_zero_tsih_s2(  	 * Assign a new TPG Session Handle.  Note this is protected with  	 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().  	 */ -	sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; +	sess->tsih = ++sess->tpg->ntsih;  	if (!sess->tsih) -		sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; +		sess->tsih = ++sess->tpg->ntsih;  	/*  	 * Create the default params from user defined values..  	 */  	if (iscsi_copy_param_list(&conn->param_list, -				ISCSI_TPG_C(conn)->param_list, 1) < 0) { +				conn->tpg->param_list, 1) < 0) {  		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,  				ISCSI_LOGIN_STATUS_NO_RESOURCES);  		return -1; @@ -379,26 +401,16 @@ static int iscsi_login_zero_tsih_s2(  	 *  	 * In our case, we have already located the struct iscsi_tiqn at this point.  	 */ -	memset(buf, 0, 32); -	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); -	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { -		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -				ISCSI_LOGIN_STATUS_NO_RESOURCES); +	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))  		return -1; -	}  	/*  	 * Workaround for Initiators that have broken connection recovery logic.  	 *  	 * "We would really like to get rid of this." Linux-iSCSI.org team  	 */ -	memset(buf, 0, 32); -	sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); -	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { -		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -				ISCSI_LOGIN_STATUS_NO_RESOURCES); +	if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))  		return -1; -	}  	if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)  		return -1; @@ -410,12 +422,9 @@ static int iscsi_login_zero_tsih_s2(  		unsigned long mrdsl, off;  		int rc; -		sprintf(buf, "RDMAExtensions=Yes"); -		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { -			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -				ISCSI_LOGIN_STATUS_NO_RESOURCES); +		if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))  			return -1; -		} +  		/*  		 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for  		 * Immediate Data + Unsolicitied Data-OUT if necessary.. @@ -435,7 +444,7 @@ static int iscsi_login_zero_tsih_s2(  		}  		off = mrdsl % PAGE_SIZE;  		if (!off) -			return 0; +			goto check_prot;  		if (mrdsl < PAGE_SIZE)  			mrdsl = PAGE_SIZE; @@ -445,11 +454,25 @@ static int iscsi_login_zero_tsih_s2(  		pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"  			" to PAGE_SIZE\n", mrdsl); -		sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); -		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { -			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -				ISCSI_LOGIN_STATUS_NO_RESOURCES); +		if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))  			return -1; +		/* +		 * ISER currently requires that ImmediateData + Unsolicited +		 * Data be disabled when protection / signature MRs are enabled. +		 */ +check_prot: +		if (sess->se_sess->sup_prot_ops & +		   (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | +		    TARGET_PROT_DOUT_INSERT)) { + +			if (iscsi_change_param_sprintf(conn, "ImmediateData=No")) +				return -1; + +			if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes")) +				return -1; + +			pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" +				 " T10-PI enabled ISER session\n");  		}  	} @@ -575,7 +598,7 @@ static int iscsi_login_non_zero_tsih_s2(  	iscsi_login_set_conn_values(sess, conn, pdu->cid);  	if (iscsi_copy_param_list(&conn->param_list, -			ISCSI_TPG_C(conn)->param_list, 0) < 0) { +			conn->tpg->param_list, 0) < 0) {  		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,  				ISCSI_LOGIN_STATUS_NO_RESOURCES);  		return -1; @@ -592,13 +615,8 @@ static int iscsi_login_non_zero_tsih_s2(  	 *  	 * In our case, we have already located the struct iscsi_tiqn at this point.  	 */ -	memset(buf, 0, 32); -	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); -	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { -		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -				ISCSI_LOGIN_STATUS_NO_RESOURCES); +	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))  		return -1; -	}  	return iscsi_login_disable_FIM_keys(conn->param_list, conn);  } @@ -691,7 +709,7 @@ int iscsi_post_login_handler(  	int stop_timer = 0;  	struct iscsi_session *sess = conn->sess;  	struct se_session *se_sess = sess->se_sess; -	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); +	struct iscsi_portal_group *tpg = sess->tpg;  	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;  	struct iscsi_thread_set *ts; @@ -983,6 +1001,7 @@ int iscsi_target_setup_login_socket(  	}  	np->np_transport = t; +	np->enabled = true;  	return 0;  } @@ -1126,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)  void iscsi_target_login_sess_out(struct iscsi_conn *conn,  		struct iscsi_np *np, bool zero_tsih, bool new_sess)  { -	if (new_sess == false) +	if (!new_sess)  		goto old_sess_out;  	pr_err("iSCSI Login negotiation failed.\n"); @@ -1154,7 +1173,7 @@ old_sess_out:  		spin_lock_bh(&conn->sess->conn_lock);  		if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {  			struct se_portal_group *se_tpg = -					&ISCSI_TPG_C(conn)->tpg_se_tpg; +					&conn->tpg->tpg_se_tpg;  			atomic_set(&conn->sess->session_continuation, 0);  			spin_unlock_bh(&conn->sess->conn_lock); @@ -1197,7 +1216,7 @@ old_sess_out:  static int __iscsi_target_login_thread(struct iscsi_np *np)  {  	u8 *buffer, zero_tsih = 0; -	int ret = 0, rc, stop; +	int ret = 0, rc;  	struct iscsi_conn *conn = NULL;  	struct iscsi_login *login;  	struct iscsi_portal_group *tpg = NULL; @@ -1211,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)  	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {  		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;  		complete(&np->np_restart_comp); +	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { +		spin_unlock_bh(&np->np_thread_lock); +		goto exit;  	} else {  		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;  	} @@ -1403,20 +1425,12 @@ old_sess_out:  	}  out: -	stop = kthread_should_stop(); -	if (!stop && signal_pending(current)) { -		spin_lock_bh(&np->np_thread_lock); -		stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); -		spin_unlock_bh(&np->np_thread_lock); -	} -	/* Wait for another socket.. */ -	if (!stop) -		return 1; +	return 1; +  exit:  	iscsi_stop_login_thread_timer(np);  	spin_lock_bh(&np->np_thread_lock);  	np->np_thread_state = ISCSI_NP_THREAD_EXIT; -	np->np_thread = NULL;  	spin_unlock_bh(&np->np_thread_lock);  	return 0; @@ -1429,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)  	allow_signal(SIGINT); -	while (!kthread_should_stop()) { +	while (1) {  		ret = __iscsi_target_login_thread(np);  		/*  		 * We break and exit here unless another sock_accept() call diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 14d1aed5af1..62a095f36bf 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -88,7 +88,7 @@ int extract_param(  	if (len < 0)  		return -1; -	if (len > max_length) { +	if (len >= max_length) {  		pr_err("Length of input: %d exceeds max_length:"  			" %d\n", len, max_length);  		return -1; @@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(  			iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,  						  se_node_acl); -			auth = ISCSI_NODE_AUTH(iscsi_nacl); +			auth = &iscsi_nacl->node_auth;  		}  	} else {  		/* @@ -375,7 +375,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log  	return 0;  } -static void iscsi_target_sk_data_ready(struct sock *sk, int count) +static void iscsi_target_sk_data_ready(struct sock *sk)  {  	struct iscsi_conn *conn = sk->sk_user_data;  	bool rc; @@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk, int count)  	}  	rc = schedule_delayed_work(&conn->login_work, 0); -	if (rc == false) { +	if (!rc) {  		pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"  			 " got false\n");  	} @@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)  	state = (tpg->tpg_state == TPG_STATE_ACTIVE);  	spin_unlock(&tpg->tpg_state_lock); -	if (state == false) { +	if (!state) {  		pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");  		iscsi_target_restore_sock_callbacks(conn);  		iscsi_target_login_drop(conn, login); @@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)  		state = iscsi_target_sk_state_check(sk);  		read_unlock_bh(&sk->sk_callback_lock); -		if (state == false) { +		if (!state) {  			pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");  			iscsi_target_restore_sock_callbacks(conn);  			iscsi_target_login_drop(conn, login); @@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero(  		}  		goto do_auth; +	} else if (!payload_length) { +		pr_err("Initiator sent zero length security payload," +		       " login failed\n"); +		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, +				    ISCSI_LOGIN_STATUS_AUTH_FAILED); +		return -1;  	}  	if (login->first_request) @@ -789,7 +795,7 @@ static int iscsi_target_handle_csg_zero(  		return -1;  	if (!iscsi_check_negotiated_keys(conn->param_list)) { -		if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && +		if (conn->tpg->tpg_attrib.authentication &&  		    !strncmp(param->value, NONE, 4)) {  			pr_err("Initiator sent AuthMethod=None but"  				" Target is enforcing iSCSI Authentication," @@ -799,7 +805,7 @@ static int iscsi_target_handle_csg_zero(  			return -1;  		} -		if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && +		if (conn->tpg->tpg_attrib.authentication &&  		    !login->auth_complete)  			return 0; @@ -862,7 +868,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log  	}  	if (!login->auth_complete && -	     ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { +	     conn->tpg->tpg_attrib.authentication) {  		pr_err("Initiator is requesting CSG: 1, has not been"  			 " successfully authenticated, and the Target is"  			" enforcing iSCSI Authentication, login failed.\n"); @@ -1192,7 +1198,7 @@ get_target:  	 */  alloc_tags:  	tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); -	tag_num += ISCSIT_EXTRA_TAGS; +	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;  	tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;  	ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index 93bdc475eb0..16454a922e2 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(  }  void iscsit_set_default_node_attribues( -	struct iscsi_node_acl *acl) +	struct iscsi_node_acl *acl, +	struct iscsi_portal_group *tpg)  {  	struct iscsi_node_attrib *a = &acl->node_attrib; @@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(  	a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;  	a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;  	a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; -	a->default_erl = NA_DEFAULT_ERL; +	a->default_erl = tpg->tpg_attrib.default_erl;  }  int iscsit_na_dataout_timeout( diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h index c970b326ef2..0c69a46a62e 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.h +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -1,7 +1,8 @@  #ifndef ISCSI_TARGET_NODEATTRIB_H  #define ISCSI_TARGET_NODEATTRIB_H -extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); +extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, +					      struct iscsi_portal_group *);  extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);  extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);  extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 4d2e23fc76f..02f9de26f38 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate(  		if (!strcmp(param->name, AUTHMETHOD)) {  			SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, HEADERDIGEST)) { -			if (iser == false) +			if (!iser)  				SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, DATADIGEST)) { -			if (iser == false) +			if (!iser)  				SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, MAXCONNECTIONS)) {  			SET_PSTATE_NEGOTIATE(param); @@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate(  		} else if (!strcmp(param->name, IMMEDIATEDATA)) {  			SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { -			if (iser == false) +			if (!iser)  				SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {  			continue; @@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate(  		} else if (!strcmp(param->name, OFMARKINT)) {  			SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, RDMAEXTENSIONS)) { -			if (iser == true) +			if (iser)  				SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { -			if (iser == true) +			if (iser)  				SET_PSTATE_NEGOTIATE(param);  		} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { -			if (iser == true) +			if (iser)  				SET_PSTATE_NEGOTIATE(param);  		}  	} @@ -1605,7 +1605,7 @@ int iscsi_decode_text_input(  	tmpbuf = kzalloc(length + 1, GFP_KERNEL);  	if (!tmpbuf) { -		pr_err("Unable to allocate memory for tmpbuf.\n"); +		pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);  		return -1;  	} diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index f788e8b5e85..10339551030 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->cmd_pdus));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->rsp_pdus));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%llu\n", -				(unsigned long long)sess->tx_data_octets); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->tx_data_octets));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%llu\n", -				(unsigned long long)sess->rx_data_octets); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->rx_data_octets));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%u\n", -					sess->conn_digest_errors); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->conn_digest_errors));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(  	if (se_sess) {  		sess = se_sess->fabric_sess_ptr;  		if (sess) -			ret = snprintf(page, PAGE_SIZE, "%u\n", -					sess->conn_timeout_errors); +			ret = snprintf(page, PAGE_SIZE, "%lu\n", +				       atomic_long_read(&sess->conn_timeout_errors));  	}  	spin_unlock_bh(&se_nacl->nacl_sess_lock); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 4faeb47fa5e..c3cb5c15efd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(  	list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {  		spin_lock(&tpg->tpg_state_lock); -		if (tpg->tpg_state == TPG_STATE_FREE) { +		if (tpg->tpg_state != TPG_STATE_ACTIVE) {  			spin_unlock(&tpg->tpg_state_lock);  			continue;  		} @@ -184,10 +184,12 @@ static void iscsit_clear_tpg_np_login_thread(  		return;  	} +	if (shutdown) +		tpg_np->tpg_np->enabled = false;  	iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);  } -void iscsit_clear_tpg_np_login_threads( +static void iscsit_clear_tpg_np_login_threads(  	struct iscsi_portal_group *tpg,  	bool shutdown)  { @@ -223,6 +225,9 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)  	a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;  	a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;  	a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; +	a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; +	a->default_erl = TA_DEFAULT_ERL; +	a->t10_pi = TA_DEFAULT_T10_PI;  }  int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -237,7 +242,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro  	if (iscsi_create_default_params(&tpg->param_list) < 0)  		goto err_out; -	ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; +	tpg->tpg_attrib.tpg = tpg;  	spin_lock(&tpg->tpg_state_lock);  	tpg->tpg_state	= TPG_STATE_INACTIVE; @@ -271,8 +276,6 @@ int iscsit_tpg_del_portal_group(  	tpg->tpg_state = TPG_STATE_INACTIVE;  	spin_unlock(&tpg->tpg_state_lock); -	iscsit_clear_tpg_np_login_threads(tpg, true); -  	if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {  		pr_err("Unable to delete iSCSI Target Portal Group:"  			" %hu while active sessions exist, and force=0\n", @@ -330,7 +333,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)  		return -EINVAL;  	} -	if (ISCSI_TPG_ATTRIB(tpg)->authentication) { +	if (tpg->tpg_attrib.authentication) {  		if (!strcmp(param->value, NONE)) {  			ret = iscsi_update_param_value(param, CHAP);  			if (ret) @@ -448,7 +451,7 @@ static bool iscsit_tpg_check_network_portal(  			match = iscsit_check_np_match(sockaddr, np,  						network_transport); -			if (match == true) +			if (match)  				break;  		}  		spin_unlock(&tpg->tpg_np_lock); @@ -470,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(  	if (!tpg_np_parent) {  		if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, -				network_transport) == true) { +				network_transport)) {  			pr_err("Network Portal: %s already exists on a"  				" different TPG on %s\n", ip_str,  				tpg->tpg_tiqn->tiqn); @@ -498,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(  	init_completion(&tpg_np->tpg_np_comp);  	kref_init(&tpg_np->tpg_np_kref);  	tpg_np->tpg_np		= np; +	np->tpg_np		= tpg_np;  	tpg_np->tpg		= tpg;  	spin_lock(&tpg->tpg_np_lock); @@ -820,3 +824,58 @@ int iscsit_ta_prod_mode_write_protect(  	return 0;  } + +int iscsit_ta_demo_mode_discovery( +	struct iscsi_portal_group *tpg, +	u32 flag) +{ +	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + +	if ((flag != 0) && (flag != 1)) { +		pr_err("Illegal value %d\n", flag); +		return -EINVAL; +	} + +	a->demo_mode_discovery = flag; +	pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:" +		" %s\n", tpg->tpgt, (a->demo_mode_discovery) ? +		"ON" : "OFF"); + +	return 0; +} + +int iscsit_ta_default_erl( +	struct iscsi_portal_group *tpg, +	u32 default_erl) +{ +	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + +	if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) { +		pr_err("Illegal value for default_erl: %u\n", default_erl); +		return -EINVAL; +	} + +	a->default_erl = default_erl; +	pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl); + +	return 0; +} + +int iscsit_ta_t10_pi( +	struct iscsi_portal_group *tpg, +	u32 flag) +{ +	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + +	if ((flag != 0) && (flag != 1)) { +		pr_err("Illegal value %d\n", flag); +		return -EINVAL; +	} + +	a->t10_pi = flag; +	pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:" +		" %s\n", tpg->tpgt, (a->t10_pi) ? +		"ON" : "OFF"); + +	return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index b77693e2c20..e7265337bc4 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,  			struct iscsi_np *, struct iscsi_tpg_np **);  extern int iscsit_get_tpg(struct iscsi_portal_group *);  extern void iscsit_put_tpg(struct iscsi_portal_group *); -extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);  extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);  extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);  extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, @@ -37,5 +36,8 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);  extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);  extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);  extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); +extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);  #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index f2de28e178f..fd90b28f1d9 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)   * May be called from software interrupt (timer) context for allocating   * iSCSI NopINs.   */ -struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) +struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)  {  	struct iscsi_cmd *cmd;  	struct se_session *se_sess = conn->sess->se_sess;  	int size, tag; -	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); +	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); +	if (tag < 0) +		return NULL; +  	size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;  	cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));  	memset(cmd, 0, size); @@ -242,9 +245,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm  	 */  	if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {  		pr_err("Received CmdSN: 0x%08x is greater than" -		       " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, +		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,  		       sess->max_cmd_sn); -		ret = CMDSN_ERROR_CANNOT_RECOVER; +		ret = CMDSN_MAXCMDSN_OVERRUN;  	} else if (cmdsn == sess->exp_cmd_sn) {  		sess->exp_cmd_sn++; @@ -303,14 +306,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,  		ret = CMDSN_HIGHER_THAN_EXP;  		break;  	case CMDSN_LOWER_THAN_EXP: +	case CMDSN_MAXCMDSN_OVERRUN: +	default:  		cmd->i_state = ISTATE_REMOVE;  		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); -		ret = cmdsn_ret; -		break; -	default: -		reason = ISCSI_REASON_PROTOCOL_ERROR; -		reject = true; -		ret = cmdsn_ret; +		/* +		 * Existing callers for iscsit_sequence_cmd() will silently +		 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this +		 * return for CMDSN_MAXCMDSN_OVERRUN as well.. +		 */ +		ret = CMDSN_LOWER_THAN_EXP;  		break;  	}  	mutex_unlock(&conn->sess->cmdsn_mutex); @@ -700,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)  }  EXPORT_SYMBOL(iscsit_release_cmd); -static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, -			      bool check_queues) +void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, +		       bool check_queues)  {  	struct iscsi_conn *conn = cmd->conn; @@ -736,7 +741,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)  		 * Fallthrough  		 */  	case ISCSI_OP_SCSI_TMFUNC: -		rc = transport_generic_free_cmd(&cmd->se_cmd, 1); +		rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);  		if (!rc && shutdown && se_cmd && se_cmd->se_sess) {  			__iscsit_free_cmd(cmd, true, shutdown);  			target_put_sess_cmd(se_cmd->se_sess, se_cmd); @@ -752,7 +757,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)  			se_cmd = &cmd->se_cmd;  			__iscsit_free_cmd(cmd, true, shutdown); -			rc = transport_generic_free_cmd(&cmd->se_cmd, 1); +			rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);  			if (!rc && shutdown && se_cmd->se_sess) {  				__iscsit_free_cmd(cmd, true, shutdown);  				target_put_sess_cmd(se_cmd->se_sess, se_cmd); @@ -924,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)  	u8 state;  	struct iscsi_cmd *cmd; -	cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); +	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);  	if (!cmd)  		return -1; @@ -980,7 +985,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)  		tiqn->sess_err_stats.last_sess_failure_type =  				ISCSI_SESS_ERR_CXN_TIMEOUT;  		tiqn->sess_err_stats.cxn_timeout_errors++; -		conn->sess->conn_timeout_errors++; +		atomic_long_inc(&conn->sess->conn_timeout_errors);  		spin_unlock_bh(&tiqn->sess_err_stats.lock);  	}  	} @@ -1290,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta  	login->login_failed = 1;  	iscsit_collect_login_stats(conn, status_class, status_detail); +	memset(&login->rsp[0], 0, ISCSI_HDR_LEN); +  	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];  	hdr->opcode		= ISCSI_OP_LOGIN_RSP;  	hdr->status_class	= status_class; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index e4fc34a02f5..a68508c4fec 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);  extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);  extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);  extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);  extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);  extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);  extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); @@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co  extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);  extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);  extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);  extern void iscsit_free_cmd(struct iscsi_cmd *, bool);  extern int iscsit_check_session_usage_count(struct iscsi_session *);  extern void iscsit_dec_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 0f6d69dabca..8c64b8776a9 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(  	return sdev->queue_depth;  } +static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) +{ +	if (sdev->tagged_supported) { +		scsi_set_tag_type(sdev, tag); + +		if (tag) +			scsi_activate_tcq(sdev, sdev->queue_depth); +		else +			scsi_deactivate_tcq(sdev, sdev->queue_depth); +	} else +		tag = 0; + +	return tag; +} +  /*   * Locate the SAM Task Attr from struct scsi_cmnd *   */ @@ -164,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)  	struct tcm_loop_hba *tl_hba;  	struct tcm_loop_tpg *tl_tpg;  	struct scatterlist *sgl_bidi = NULL; -	u32 sgl_bidi_count = 0; +	u32 sgl_bidi_count = 0, transfer_length;  	int rc;  	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); @@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)  		set_host_byte(sc, DID_NO_CONNECT);  		goto out_done;  	} - +	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { +		set_host_byte(sc, DID_TRANSPORT_DISRUPTED); +		goto out_done; +	}  	tl_nexus = tl_hba->tl_nexus;  	if (!tl_nexus) {  		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" @@ -194,12 +212,26 @@ static void tcm_loop_submission_work(struct work_struct *work)  		se_cmd->se_cmd_flags |= SCF_BIDI;  	} + +	transfer_length = scsi_transfer_length(sc); +	if (!scsi_prot_sg_count(sc) && +	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { +		se_cmd->prot_pto = true; +		/* +		 * loopback transport doesn't support +		 * WRITE_GENERATE, READ_STRIP protection +		 * information operations, go ahead unprotected. +		 */ +		transfer_length = scsi_bufflen(sc); +	} +  	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,  			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, -			scsi_bufflen(sc), tcm_loop_sam_attr(sc), +			transfer_length, tcm_loop_sam_attr(sc),  			sc->sc_data_direction, 0,  			scsi_sglist(sc), scsi_sg_count(sc), -			sgl_bidi, sgl_bidi_count); +			sgl_bidi, sgl_bidi_count, +			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));  	if (rc < 0) {  		set_host_byte(sc, DID_NO_CONNECT);  		goto out_done; @@ -207,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)  	return;  out_done: +	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);  	sc->scsi_done(sc);  	return;  } @@ -233,6 +266,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)  	}  	tl_cmd->sc = sc; +	tl_cmd->sc_cmd_tag = sc->tag;  	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);  	queue_work(tcm_loop_workqueue, &tl_cmd->work);  	return 0; @@ -242,41 +276,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)   * Called from SCSI EH process context to issue a LUN_RESET TMR   * to struct scsi_device   */ -static int tcm_loop_device_reset(struct scsi_cmnd *sc) +static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, +			      struct tcm_loop_nexus *tl_nexus, +			      int lun, int task, enum tcm_tmreq_table tmr)  {  	struct se_cmd *se_cmd = NULL; -	struct se_portal_group *se_tpg;  	struct se_session *se_sess; +	struct se_portal_group *se_tpg;  	struct tcm_loop_cmd *tl_cmd = NULL; -	struct tcm_loop_hba *tl_hba; -	struct tcm_loop_nexus *tl_nexus;  	struct tcm_loop_tmr *tl_tmr = NULL; -	struct tcm_loop_tpg *tl_tpg; -	int ret = FAILED, rc; -	/* -	 * Locate the tcm_loop_hba_t pointer -	 */ -	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); -	/* -	 * Locate the tl_nexus and se_sess pointers -	 */ -	tl_nexus = tl_hba->tl_nexus; -	if (!tl_nexus) { -		pr_err("Unable to perform device reset without" -				" active I_T Nexus\n"); -		return FAILED; -	} -	se_sess = tl_nexus->se_sess; -	/* -	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id -	 */ -	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; -	se_tpg = &tl_tpg->tl_se_tpg; +	int ret = TMR_FUNCTION_FAILED, rc;  	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);  	if (!tl_cmd) {  		pr_err("Unable to allocate memory for tl_cmd\n"); -		return FAILED; +		return ret;  	}  	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); @@ -287,6 +301,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  	init_waitqueue_head(&tl_tmr->tl_tmr_wait);  	se_cmd = &tl_cmd->tl_se_cmd; +	se_tpg = &tl_tpg->tl_se_tpg; +	se_sess = tl_nexus->se_sess;  	/*  	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure  	 */ @@ -294,17 +310,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  				DMA_NONE, MSG_SIMPLE_TAG,  				&tl_cmd->tl_sense_buf[0]); -	rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); +	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);  	if (rc < 0)  		goto release; + +	if (tmr == TMR_ABORT_TASK) +		se_cmd->se_tmr_req->ref_task_tag = task; +  	/* -	 * Locate the underlying TCM struct se_lun from sc->device->lun +	 * Locate the underlying TCM struct se_lun  	 */ -	if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) +	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { +		ret = TMR_LUN_DOES_NOT_EXIST;  		goto release; +	}  	/* -	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() -	 * to wake us up. +	 * Queue the TMR to TCM Core and sleep waiting for +	 * tcm_loop_queue_tm_rsp() to wake us up.  	 */  	transport_generic_handle_tmr(se_cmd);  	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); @@ -312,8 +334,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  	 * The TMR LUN_RESET has completed, check the response status and  	 * then release allocations.  	 */ -	ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? -		SUCCESS : FAILED; +	ret = se_cmd->se_tmr_req->response;  release:  	if (se_cmd)  		transport_generic_free_cmd(se_cmd, 1); @@ -323,6 +344,94 @@ release:  	return ret;  } +static int tcm_loop_abort_task(struct scsi_cmnd *sc) +{ +	struct tcm_loop_hba *tl_hba; +	struct tcm_loop_nexus *tl_nexus; +	struct tcm_loop_tpg *tl_tpg; +	int ret = FAILED; + +	/* +	 * Locate the tcm_loop_hba_t pointer +	 */ +	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); +	/* +	 * Locate the tl_nexus and se_sess pointers +	 */ +	tl_nexus = tl_hba->tl_nexus; +	if (!tl_nexus) { +		pr_err("Unable to perform device reset without" +				" active I_T Nexus\n"); +		return FAILED; +	} + +	/* +	 * Locate the tl_tpg pointer from TargetID in sc->device->id +	 */ +	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +	ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, +				 sc->tag, TMR_ABORT_TASK); +	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_device_reset(struct scsi_cmnd *sc) +{ +	struct tcm_loop_hba *tl_hba; +	struct tcm_loop_nexus *tl_nexus; +	struct tcm_loop_tpg *tl_tpg; +	int ret = FAILED; + +	/* +	 * Locate the tcm_loop_hba_t pointer +	 */ +	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); +	/* +	 * Locate the tl_nexus and se_sess pointers +	 */ +	tl_nexus = tl_hba->tl_nexus; +	if (!tl_nexus) { +		pr_err("Unable to perform device reset without" +				" active I_T Nexus\n"); +		return FAILED; +	} +	/* +	 * Locate the tl_tpg pointer from TargetID in sc->device->id +	 */ +	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +	ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, +				 0, TMR_LUN_RESET); +	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +static int tcm_loop_target_reset(struct scsi_cmnd *sc) +{ +	struct tcm_loop_hba *tl_hba; +	struct tcm_loop_tpg *tl_tpg; + +	/* +	 * Locate the tcm_loop_hba_t pointer +	 */ +	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); +	if (!tl_hba) { +		pr_err("Unable to perform device reset without" +				" active I_T Nexus\n"); +		return FAILED; +	} +	/* +	 * Locate the tl_tpg pointer from TargetID in sc->device->id +	 */ +	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +	if (tl_tpg) { +		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; +		return SUCCESS; +	} +	return FAILED; +} +  static int tcm_loop_slave_alloc(struct scsi_device *sd)  {  	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); @@ -331,6 +440,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)  static int tcm_loop_slave_configure(struct scsi_device *sd)  { +	if (sd->tagged_supported) { +		scsi_activate_tcq(sd, sd->queue_depth); +		scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, +					sd->host->cmd_per_lun); +	} else { +		scsi_adjust_queue_depth(sd, 0, +					sd->host->cmd_per_lun); +	} +  	return 0;  } @@ -340,7 +458,10 @@ static struct scsi_host_template tcm_loop_driver_template = {  	.name			= "TCM_Loopback",  	.queuecommand		= tcm_loop_queuecommand,  	.change_queue_depth	= tcm_loop_change_queue_depth, +	.change_queue_type	= tcm_loop_change_queue_type, +	.eh_abort_handler = tcm_loop_abort_task,  	.eh_device_reset_handler = tcm_loop_device_reset, +	.eh_target_reset_handler = tcm_loop_target_reset,  	.can_queue		= 1024,  	.this_id		= -1,  	.sg_tablesize		= 256, @@ -356,7 +477,7 @@ static int tcm_loop_driver_probe(struct device *dev)  {  	struct tcm_loop_hba *tl_hba;  	struct Scsi_Host *sh; -	int error; +	int error, host_prot;  	tl_hba = to_tcm_loop_hba(dev); @@ -380,6 +501,13 @@ static int tcm_loop_driver_probe(struct device *dev)  	sh->max_channel = 0;  	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; +	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | +		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | +		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + +	scsi_host_set_prot(sh, host_prot); +	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); +  	error = scsi_add_host(sh, &tl_hba->dev);  	if (error) {  		pr_err("%s: scsi_add_host failed\n", __func__); @@ -699,7 +827,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)  static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)  { -	return 1; +	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, +			struct tcm_loop_cmd, tl_se_cmd); + +	return tl_cmd->sc_cmd_tag;  }  static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) @@ -798,6 +929,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)  	wake_up(&tl_tmr->tl_tmr_wait);  } +static void tcm_loop_aborted_task(struct se_cmd *se_cmd) +{ +	return; +} +  static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)  {  	switch (tl_hba->tl_proto_id) { @@ -825,7 +961,7 @@ static int tcm_loop_port_link(  	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;  	atomic_inc(&tl_tpg->tl_tpg_port_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	/*  	 * Add Linux/SCSI struct scsi_device by HCTL  	 */ @@ -860,7 +996,7 @@ static void tcm_loop_port_unlink(  	scsi_device_put(sd);  	atomic_dec(&tl_tpg->tl_tpg_port_count); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");  } @@ -892,7 +1028,7 @@ static int tcm_loop_make_nexus(  	/*  	 * Initialize the struct se_session pointer  	 */ -	tl_nexus->se_sess = transport_init_session(); +	tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);  	if (IS_ERR(tl_nexus->se_sess)) {  		ret = PTR_ERR(tl_nexus->se_sess);  		goto out; @@ -932,7 +1068,10 @@ static int tcm_loop_drop_nexus(  	struct tcm_loop_nexus *tl_nexus;  	struct tcm_loop_hba *tl_hba = tpg->tl_hba; -	tl_nexus = tpg->tl_hba->tl_nexus; +	if (!tl_hba) +		return -ENODEV; + +	tl_nexus = tl_hba->tl_nexus;  	if (!tl_nexus)  		return -ENODEV; @@ -1061,14 +1200,62 @@ check_newline:  TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); +static ssize_t tcm_loop_tpg_show_transport_status( +	struct se_portal_group *se_tpg, +	char *page) +{ +	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, +			struct tcm_loop_tpg, tl_se_tpg); +	const char *status = NULL; +	ssize_t ret = -EINVAL; + +	switch (tl_tpg->tl_transport_status) { +	case TCM_TRANSPORT_ONLINE: +		status = "online"; +		break; +	case TCM_TRANSPORT_OFFLINE: +		status = "offline"; +		break; +	default: +		break; +	} + +	if (status) +		ret = snprintf(page, PAGE_SIZE, "%s\n", status); + +	return ret; +} + +static ssize_t tcm_loop_tpg_store_transport_status( +	struct se_portal_group *se_tpg, +	const char *page, +	size_t count) +{ +	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, +			struct tcm_loop_tpg, tl_se_tpg); + +	if (!strncmp(page, "online", 6)) { +		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; +		return count; +	} +	if (!strncmp(page, "offline", 7)) { +		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; +		return count; +	} +	return -EINVAL; +} + +TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR); +  static struct configfs_attribute *tcm_loop_tpg_attrs[] = {  	&tcm_loop_tpg_nexus.attr, +	&tcm_loop_tpg_transport_status.attr,  	NULL,  };  /* Start items for tcm_loop_naa_cit */ -struct se_portal_group *tcm_loop_make_naa_tpg( +static struct se_portal_group *tcm_loop_make_naa_tpg(  	struct se_wwn *wwn,  	struct config_group *group,  	const char *name) @@ -1113,7 +1300,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(  	return &tl_tpg->tl_se_tpg;  } -void tcm_loop_drop_naa_tpg( +static void tcm_loop_drop_naa_tpg(  	struct se_portal_group *se_tpg)  {  	struct se_wwn *wwn = se_tpg->se_tpg_wwn; @@ -1145,7 +1332,7 @@ void tcm_loop_drop_naa_tpg(  /* Start items for tcm_loop_cit */ -struct se_wwn *tcm_loop_make_scsi_hba( +static struct se_wwn *tcm_loop_make_scsi_hba(  	struct target_fabric_configfs *tf,  	struct config_group *group,  	const char *name) @@ -1215,7 +1402,7 @@ out:  	return ERR_PTR(ret);  } -void tcm_loop_drop_scsi_hba( +static void tcm_loop_drop_scsi_hba(  	struct se_wwn *wwn)  {  	struct tcm_loop_hba *tl_hba = container_of(wwn, @@ -1315,6 +1502,7 @@ static int tcm_loop_register_configfs(void)  	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;  	fabric->tf_ops.queue_status = &tcm_loop_queue_status;  	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; +	fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;  	/*  	 * Setup function pointers for generic logic in target_core_fabric_configfs.c @@ -1334,11 +1522,11 @@ static int tcm_loop_register_configfs(void)  	/*  	 * Setup default attribute lists for various fabric->tf_cit_tmpl  	 */ -	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;  	/*  	 * Once fabric->tf_ops has been setup, now register the fabric for  	 * use within TCM diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index dd7a84ee78e..54c59d0b660 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -10,6 +10,8 @@  struct tcm_loop_cmd {  	/* State of Linux/SCSI CDB+Data descriptor */  	u32 sc_cmd_state; +	/* Tagged command queueing */ +	u32 sc_cmd_tag;  	/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */  	struct scsi_cmnd *sc;  	/* The TCM I/O descriptor that is accessed via container_of() */ @@ -40,8 +42,12 @@ struct tcm_loop_nacl {  	struct se_node_acl se_node_acl;  }; +#define TCM_TRANSPORT_ONLINE 0 +#define TCM_TRANSPORT_OFFLINE 1 +  struct tcm_loop_tpg {  	unsigned short tl_tpgt; +	unsigned short tl_transport_status;  	atomic_t tl_tpg_port_count;  	struct se_portal_group tl_se_tpg;  	struct tcm_loop_hba *tl_hba; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index e51b09a04d5..e7e93727553 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(  		return ERR_PTR(-ENOMEM);  	} -	sess->se_sess = transport_init_session(); +	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);  	if (IS_ERR(sess->se_sess)) {  		pr_err("failed to init se_session\n"); @@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)  {  } +static void sbp_aborted_task(struct se_cmd *se_cmd) +{ +	return; +} +  static int sbp_check_stop_free(struct se_cmd *se_cmd)  {  	struct sbp_target_request *req = container_of(se_cmd, @@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {  	.queue_data_in			= sbp_queue_data_in,  	.queue_status			= sbp_queue_status,  	.queue_tm_rsp			= sbp_queue_tm_rsp, +	.aborted_task			= sbp_aborted_task,  	.check_stop_free		= sbp_check_stop_free,  	.fabric_make_wwn		= sbp_make_tport, @@ -2556,15 +2562,15 @@ static int sbp_register_configfs(void)  	/*  	 * Setup default attribute lists for various fabric->tf_cit_tmpl  	 */ -	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;  	ret = target_fabric_configfs_register(fabric);  	if (ret < 0) { diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47244102281..fbc5ebb5f76 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -41,10 +41,13 @@  #include "target_core_alua.h"  #include "target_core_ua.h" -static sense_reason_t core_alua_check_transition(int state, int *primary); +static sense_reason_t core_alua_check_transition(int state, int valid, +						 int *primary);  static int core_alua_set_tg_pt_secondary_state(  		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, -		struct se_port *port, int explict, int offline); +		struct se_port *port, int explicit, int offline); + +static char *core_alua_dump_state(int state);  static u16 alua_lu_gps_counter;  static u32 alua_lu_gps_count; @@ -55,6 +58,86 @@ static LIST_HEAD(lu_gps_list);  struct t10_alua_lu_gp *default_lu_gp;  /* + * REPORT REFERRALS + * + * See sbc3r35 section 5.23 + */ +sense_reason_t +target_emulate_report_referrals(struct se_cmd *cmd) +{ +	struct se_device *dev = cmd->se_dev; +	struct t10_alua_lba_map *map; +	struct t10_alua_lba_map_member *map_mem; +	unsigned char *buf; +	u32 rd_len = 0, off; + +	if (cmd->data_length < 4) { +		pr_warn("REPORT REFERRALS allocation length %u too" +			" small\n", cmd->data_length); +		return TCM_INVALID_CDB_FIELD; +	} + +	buf = transport_kmap_data_sg(cmd); +	if (!buf) +		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +	off = 4; +	spin_lock(&dev->t10_alua.lba_map_lock); +	if (list_empty(&dev->t10_alua.lba_map_list)) { +		spin_unlock(&dev->t10_alua.lba_map_lock); +		transport_kunmap_data_sg(cmd); + +		return TCM_UNSUPPORTED_SCSI_OPCODE; +	} + +	list_for_each_entry(map, &dev->t10_alua.lba_map_list, +			    lba_map_list) { +		int desc_num = off + 3; +		int pg_num; + +		off += 4; +		if (cmd->data_length > off) +			put_unaligned_be64(map->lba_map_first_lba, &buf[off]); +		off += 8; +		if (cmd->data_length > off) +			put_unaligned_be64(map->lba_map_last_lba, &buf[off]); +		off += 8; +		rd_len += 20; +		pg_num = 0; +		list_for_each_entry(map_mem, &map->lba_map_mem_list, +				    lba_map_mem_list) { +			int alua_state = map_mem->lba_map_mem_alua_state; +			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; + +			if (cmd->data_length > off) +				buf[off] = alua_state & 0x0f; +			off += 2; +			if (cmd->data_length > off) +				buf[off] = (alua_pg_id >> 8) & 0xff; +			off++; +			if (cmd->data_length > off) +				buf[off] = (alua_pg_id & 0xff); +			off++; +			rd_len += 4; +			pg_num++; +		} +		if (cmd->data_length > desc_num) +			buf[desc_num] = pg_num; +	} +	spin_unlock(&dev->t10_alua.lba_map_lock); + +	/* +	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload +	 */ +	put_unaligned_be16(rd_len, &buf[2]); + +	transport_kunmap_data_sg(cmd); + +	target_complete_cmd(cmd, GOOD); +	return 0; +} + +/*   * REPORT_TARGET_PORT_GROUPS   *   * See spc4r17 section 6.27 @@ -117,12 +200,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)  		/*  		 * Set supported ASYMMETRIC ACCESS State bits  		 */ -		buf[off] = 0x80; /* T_SUP */ -		buf[off] |= 0x40; /* O_SUP */ -		buf[off] |= 0x8; /* U_SUP */ -		buf[off] |= 0x4; /* S_SUP */ -		buf[off] |= 0x2; /* AN_SUP */ -		buf[off++] |= 0x1; /* AO_SUP */ +		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;  		/*  		 * TARGET PORT GROUP  		 */ @@ -175,7 +253,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)  	if (ext_hdr != 0) {  		buf[4] = 0x10;  		/* -		 * Set the implict transition time (in seconds) for the application +		 * Set the implicit transition time (in seconds) for the application  		 * client to use as a base for it's transition timeout value.  		 *  		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN @@ -188,7 +266,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)  			spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);  			tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;  			if (tg_pt_gp) -				buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; +				buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;  			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);  		}  	} @@ -199,7 +277,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)  }  /* - * SET_TARGET_PORT_GROUPS for explict ALUA operation. + * SET_TARGET_PORT_GROUPS for explicit ALUA operation.   *   * See spc4r17 section 6.35   */ @@ -215,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  	unsigned char *ptr;  	sense_reason_t rc = TCM_NO_SENSE;  	u32 len = 4; /* Skip over RESERVED area in header */ -	int alua_access_state, primary = 0; +	int alua_access_state, primary = 0, valid_states;  	u16 tg_pt_id, rtpi;  	if (!l_port) @@ -232,7 +310,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  	/* -	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed +	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed  	 * for the local tg_pt_gp.  	 */  	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; @@ -251,12 +329,13 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  	}  	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); -	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { +	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {  		pr_debug("Unable to process SET_TARGET_PORT_GROUPS" -				" while TPGS_EXPLICT_ALUA is disabled\n"); +				" while TPGS_EXPLICIT_ALUA is disabled\n");  		rc = TCM_UNSUPPORTED_SCSI_OPCODE;  		goto out;  	} +	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;  	ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -268,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  		 * the state is a primary or secondary target port asymmetric  		 * access state.  		 */ -		rc = core_alua_check_transition(alua_access_state, &primary); +		rc = core_alua_check_transition(alua_access_state, +						valid_states, &primary);  		if (rc) {  			/*  			 * If the SET TARGET PORT GROUPS attempts to establish @@ -313,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  					continue;  				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); -				smp_mb__after_atomic_inc(); +				smp_mb__after_atomic();  				spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -324,13 +404,13 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)  				spin_lock(&dev->t10_alua.tg_pt_gps_lock);  				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				break;  			}  			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);  		} else {  			/* -			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify +			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify  			 * the Target Port in question for the the incoming  			 * SET_TARGET_PORT_GROUPS op.  			 */ @@ -375,11 +455,26 @@ out:  	return rc;  } -static inline int core_alua_state_nonoptimized( +static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) +{ +	/* +	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; +	 * The ALUA additional sense code qualifier (ASCQ) is determined +	 * by the ALUA primary or secondary access state.. +	 */ +	pr_debug("[%s]: ALUA TG Port not available, " +		"SenseKey: NOT_READY, ASC/ASCQ: " +		"0x04/0x%02x\n", +		cmd->se_tfo->get_fabric_name(), alua_ascq); + +	cmd->scsi_asc = 0x04; +	cmd->scsi_ascq = alua_ascq; +} + +static inline void core_alua_state_nonoptimized(  	struct se_cmd *cmd,  	unsigned char *cdb, -	int nonop_delay_msecs, -	u8 *alua_ascq) +	int nonop_delay_msecs)  {  	/*  	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked @@ -388,13 +483,85 @@ static inline int core_alua_state_nonoptimized(  	 */  	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;  	cmd->alua_nonop_delay = nonop_delay_msecs; +} + +static inline int core_alua_state_lba_dependent( +	struct se_cmd *cmd, +	struct t10_alua_tg_pt_gp *tg_pt_gp) +{ +	struct se_device *dev = cmd->se_dev; +	u64 segment_size, segment_mult, sectors, lba; + +	/* Only need to check for cdb actually containing LBAs */ +	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) +		return 0; + +	spin_lock(&dev->t10_alua.lba_map_lock); +	segment_size = dev->t10_alua.lba_map_segment_size; +	segment_mult = dev->t10_alua.lba_map_segment_multiplier; +	sectors = cmd->data_length / dev->dev_attrib.block_size; + +	lba = cmd->t_task_lba; +	while (lba < cmd->t_task_lba + sectors) { +		struct t10_alua_lba_map *cur_map = NULL, *map; +		struct t10_alua_lba_map_member *map_mem; + +		list_for_each_entry(map, &dev->t10_alua.lba_map_list, +				    lba_map_list) { +			u64 start_lba, last_lba; +			u64 first_lba = map->lba_map_first_lba; + +			if (segment_mult) { +				u64 tmp = lba; +				start_lba = do_div(tmp, segment_size * segment_mult); + +				last_lba = first_lba + segment_size - 1; +				if (start_lba >= first_lba && +				    start_lba <= last_lba) { +					lba += segment_size; +					cur_map = map; +					break; +				} +			} else { +				last_lba = map->lba_map_last_lba; +				if (lba >= first_lba && lba <= last_lba) { +					lba = last_lba + 1; +					cur_map = map; +					break; +				} +			} +		} +		if (!cur_map) { +			spin_unlock(&dev->t10_alua.lba_map_lock); +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); +			return 1; +		} +		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, +				    lba_map_mem_list) { +			if (map_mem->lba_map_mem_alua_pg_id != +			    tg_pt_gp->tg_pt_gp_id) +				continue; +			switch(map_mem->lba_map_mem_alua_state) { +			case ALUA_ACCESS_STATE_STANDBY: +				spin_unlock(&dev->t10_alua.lba_map_lock); +				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); +				return 1; +			case ALUA_ACCESS_STATE_UNAVAILABLE: +				spin_unlock(&dev->t10_alua.lba_map_lock); +				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); +				return 1; +			default: +				break; +			} +		} +	} +	spin_unlock(&dev->t10_alua.lba_map_lock);  	return 0;  }  static inline int core_alua_state_standby(  	struct se_cmd *cmd, -	unsigned char *cdb, -	u8 *alua_ascq) +	unsigned char *cdb)  {  	/*  	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by @@ -409,13 +576,22 @@ static inline int core_alua_state_standby(  	case REPORT_LUNS:  	case RECEIVE_DIAGNOSTIC:  	case SEND_DIAGNOSTIC: +	case READ_CAPACITY:  		return 0; +	case SERVICE_ACTION_IN: +		switch (cdb[1] & 0x1f) { +		case SAI_READ_CAPACITY_16: +			return 0; +		default: +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); +			return 1; +		}  	case MAINTENANCE_IN:  		switch (cdb[1] & 0x1f) {  		case MI_REPORT_TARGET_PGS:  			return 0;  		default: -			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);  			return 1;  		}  	case MAINTENANCE_OUT: @@ -423,7 +599,7 @@ static inline int core_alua_state_standby(  		case MO_SET_TARGET_PGS:  			return 0;  		default: -			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);  			return 1;  		}  	case REQUEST_SENSE: @@ -433,7 +609,7 @@ static inline int core_alua_state_standby(  	case WRITE_BUFFER:  		return 0;  	default: -		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; +		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);  		return 1;  	} @@ -442,8 +618,7 @@ static inline int core_alua_state_standby(  static inline int core_alua_state_unavailable(  	struct se_cmd *cmd, -	unsigned char *cdb, -	u8 *alua_ascq) +	unsigned char *cdb)  {  	/*  	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by @@ -458,7 +633,7 @@ static inline int core_alua_state_unavailable(  		case MI_REPORT_TARGET_PGS:  			return 0;  		default: -			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);  			return 1;  		}  	case MAINTENANCE_OUT: @@ -466,7 +641,7 @@ static inline int core_alua_state_unavailable(  		case MO_SET_TARGET_PGS:  			return 0;  		default: -			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; +			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);  			return 1;  		}  	case REQUEST_SENSE: @@ -474,7 +649,7 @@ static inline int core_alua_state_unavailable(  	case WRITE_BUFFER:  		return 0;  	default: -		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; +		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);  		return 1;  	} @@ -483,11 +658,10 @@ static inline int core_alua_state_unavailable(  static inline int core_alua_state_transition(  	struct se_cmd *cmd, -	unsigned char *cdb, -	u8 *alua_ascq) +	unsigned char *cdb)  {  	/* -	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by +	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by  	 * spc4r17 section 5.9.2.5  	 */  	switch (cdb[0]) { @@ -499,7 +673,7 @@ static inline int core_alua_state_transition(  		case MI_REPORT_TARGET_PGS:  			return 0;  		default: -			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; +			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);  			return 1;  		}  	case REQUEST_SENSE: @@ -507,7 +681,7 @@ static inline int core_alua_state_transition(  	case WRITE_BUFFER:  		return 0;  	default: -		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; +		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);  		return 1;  	} @@ -515,9 +689,9 @@ static inline int core_alua_state_transition(  }  /* - * return 1: Is used to signal LUN not accecsable, and check condition/not ready + * return 1: Is used to signal LUN not accessible, and check condition/not ready   * return 0: Used to signal success - * reutrn -1: Used to signal failure, and invalid cdb field + * return -1: Used to signal failure, and invalid cdb field   */  sense_reason_t  target_alua_state_check(struct se_cmd *cmd) @@ -529,8 +703,6 @@ target_alua_state_check(struct se_cmd *cmd)  	struct t10_alua_tg_pt_gp *tg_pt_gp;  	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;  	int out_alua_state, nonop_delay_msecs; -	u8 alua_ascq; -	int ret;  	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)  		return 0; @@ -546,9 +718,8 @@ target_alua_state_check(struct se_cmd *cmd)  	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {  		pr_debug("ALUA: Got secondary offline status for local"  				" target port\n"); -		alua_ascq = ASCQ_04H_ALUA_OFFLINE; -		ret = 1; -		goto out; +		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); +		return TCM_CHECK_CONDITION_NOT_READY;  	}  	 /*  	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the @@ -566,27 +737,33 @@ target_alua_state_check(struct se_cmd *cmd)  	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;  	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);  	/* -	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional +	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional  	 * statement so the compiler knows explicitly to check this case first.  	 * For the Optimized ALUA access state case, we want to process the  	 * incoming fabric cmd ASAP..  	 */ -	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) +	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)  		return 0;  	switch (out_alua_state) {  	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: -		ret = core_alua_state_nonoptimized(cmd, cdb, -					nonop_delay_msecs, &alua_ascq); +		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);  		break;  	case ALUA_ACCESS_STATE_STANDBY: -		ret = core_alua_state_standby(cmd, cdb, &alua_ascq); +		if (core_alua_state_standby(cmd, cdb)) +			return TCM_CHECK_CONDITION_NOT_READY;  		break;  	case ALUA_ACCESS_STATE_UNAVAILABLE: -		ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); +		if (core_alua_state_unavailable(cmd, cdb)) +			return TCM_CHECK_CONDITION_NOT_READY;  		break;  	case ALUA_ACCESS_STATE_TRANSITION: -		ret = core_alua_state_transition(cmd, cdb, &alua_ascq); +		if (core_alua_state_transition(cmd, cdb)) +			return TCM_CHECK_CONDITION_NOT_READY; +		break; +	case ALUA_ACCESS_STATE_LBA_DEPENDENT: +		if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) +			return TCM_CHECK_CONDITION_NOT_READY;  		break;  	/*  	 * OFFLINE is a secondary ALUA target port group access state, that is @@ -599,41 +776,43 @@ target_alua_state_check(struct se_cmd *cmd)  		return TCM_INVALID_CDB_FIELD;  	} -out: -	if (ret > 0) { -		/* -		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; -		 * The ALUA additional sense code qualifier (ASCQ) is determined -		 * by the ALUA primary or secondary access state.. -		 */ -		pr_debug("[%s]: ALUA TG Port not available, " -			"SenseKey: NOT_READY, ASC/ASCQ: " -			"0x04/0x%02x\n", -			cmd->se_tfo->get_fabric_name(), alua_ascq); - -		cmd->scsi_asc = 0x04; -		cmd->scsi_ascq = alua_ascq; -		return TCM_CHECK_CONDITION_NOT_READY; -	} -  	return 0;  }  /* - * Check implict and explict ALUA state change request. + * Check implicit and explicit ALUA state change request.   */  static sense_reason_t -core_alua_check_transition(int state, int *primary) +core_alua_check_transition(int state, int valid, int *primary)  { +	/* +	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are +	 * defined as primary target port asymmetric access states. +	 */  	switch (state) { -	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: +	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: +		if (!(valid & ALUA_AO_SUP)) +			goto not_supported; +		*primary = 1; +		break;  	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: +		if (!(valid & ALUA_AN_SUP)) +			goto not_supported; +		*primary = 1; +		break;  	case ALUA_ACCESS_STATE_STANDBY: +		if (!(valid & ALUA_S_SUP)) +			goto not_supported; +		*primary = 1; +		break;  	case ALUA_ACCESS_STATE_UNAVAILABLE: -		/* -		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are -		 * defined as primary target port asymmetric access states. -		 */ +		if (!(valid & ALUA_U_SUP)) +			goto not_supported; +		*primary = 1; +		break; +	case ALUA_ACCESS_STATE_LBA_DEPENDENT: +		if (!(valid & ALUA_LBD_SUP)) +			goto not_supported;  		*primary = 1;  		break;  	case ALUA_ACCESS_STATE_OFFLINE: @@ -641,29 +820,46 @@ core_alua_check_transition(int state, int *primary)  		 * OFFLINE state is defined as a secondary target port  		 * asymmetric access state.  		 */ +		if (!(valid & ALUA_O_SUP)) +			goto not_supported;  		*primary = 0;  		break; +	case ALUA_ACCESS_STATE_TRANSITION: +		/* +		 * Transitioning is set internally, and +		 * cannot be selected manually. +		 */ +		goto not_supported;  	default:  		pr_err("Unknown ALUA access state: 0x%02x\n", state);  		return TCM_INVALID_PARAMETER_LIST;  	}  	return 0; + +not_supported: +	pr_err("ALUA access state %s not supported", +	       core_alua_dump_state(state)); +	return TCM_INVALID_PARAMETER_LIST;  }  static char *core_alua_dump_state(int state)  {  	switch (state) { -	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: +	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:  		return "Active/Optimized";  	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:  		return "Active/NonOptimized"; +	case ALUA_ACCESS_STATE_LBA_DEPENDENT: +		return "LBA Dependent";  	case ALUA_ACCESS_STATE_STANDBY:  		return "Standby";  	case ALUA_ACCESS_STATE_UNAVAILABLE:  		return "Unavailable";  	case ALUA_ACCESS_STATE_OFFLINE:  		return "Offline"; +	case ALUA_ACCESS_STATE_TRANSITION: +		return "Transitioning";  	default:  		return "Unknown";  	} @@ -676,10 +872,10 @@ char *core_alua_dump_status(int status)  	switch (status) {  	case ALUA_STATUS_NONE:  		return "None"; -	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: -		return "Altered by Explict STPG"; -	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: -		return "Altered by Implict ALUA"; +	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: +		return "Altered by Explicit STPG"; +	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: +		return "Altered by Implicit ALUA";  	default:  		return "Unknown";  	} @@ -740,58 +936,49 @@ static int core_alua_write_tpg_metadata(   * Called with tg_pt_gp->tg_pt_gp_md_mutex held   */  static int core_alua_update_tpg_primary_metadata( -	struct t10_alua_tg_pt_gp *tg_pt_gp, -	int primary_state, -	unsigned char *md_buf) +	struct t10_alua_tg_pt_gp *tg_pt_gp)  { +	unsigned char *md_buf;  	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;  	char path[ALUA_METADATA_PATH_LEN]; -	int len; +	int len, rc; + +	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); +	if (!md_buf) { +		pr_err("Unable to allocate buf for ALUA metadata\n"); +		return -ENOMEM; +	}  	memset(path, 0, ALUA_METADATA_PATH_LEN); -	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, +	len = snprintf(md_buf, ALUA_MD_BUF_LEN,  			"tg_pt_gp_id=%hu\n"  			"alua_access_state=0x%02x\n"  			"alua_access_status=0x%02x\n", -			tg_pt_gp->tg_pt_gp_id, primary_state, +			tg_pt_gp->tg_pt_gp_id, +			tg_pt_gp->tg_pt_gp_alua_pending_state,  			tg_pt_gp->tg_pt_gp_alua_access_status);  	snprintf(path, ALUA_METADATA_PATH_LEN,  		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],  		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); -	return core_alua_write_tpg_metadata(path, md_buf, len); +	rc = core_alua_write_tpg_metadata(path, md_buf, len); +	kfree(md_buf); +	return rc;  } -static int core_alua_do_transition_tg_pt( -	struct t10_alua_tg_pt_gp *tg_pt_gp, -	struct se_port *l_port, -	struct se_node_acl *nacl, -	unsigned char *md_buf, -	int new_state, -	int explict) +static void core_alua_do_transition_tg_pt_work(struct work_struct *work)  { +	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, +		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); +	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;  	struct se_dev_entry *se_deve;  	struct se_lun_acl *lacl;  	struct se_port *port;  	struct t10_alua_tg_pt_gp_member *mem; -	int old_state = 0; -	/* -	 * Save the old primary ALUA access state, and set the current state -	 * to ALUA_ACCESS_STATE_TRANSITION. -	 */ -	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); -	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, -			ALUA_ACCESS_STATE_TRANSITION); -	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? -				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : -				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; -	/* -	 * Check for the optional ALUA primary state transition delay -	 */ -	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) -		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); +	bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == +			 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);  	spin_lock(&tg_pt_gp->tg_pt_gp_lock);  	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, @@ -802,7 +989,7 @@ static int core_alua_do_transition_tg_pt(  		 * change, a device server shall establish a unit attention  		 * condition for the initiator port associated with every I_T  		 * nexus with the additional sense code set to ASYMMETRIC -		 * ACCESS STATE CHAGED. +		 * ACCESS STATE CHANGED.  		 *  		 * After an explicit target port asymmetric access state  		 * change, a device server shall establish a unit attention @@ -812,7 +999,7 @@ static int core_alua_do_transition_tg_pt(  		 * TARGET PORT GROUPS command  		 */  		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);  		spin_lock_bh(&port->sep_alua_lock); @@ -821,14 +1008,17 @@ static int core_alua_do_transition_tg_pt(  			lacl = se_deve->se_lun_acl;  			/*  			 * se_deve->se_lun_acl pointer may be NULL for a -			 * entry created without explict Node+MappedLUN ACLs +			 * entry created without explicit Node+MappedLUN ACLs  			 */  			if (!lacl)  				continue; -			if (explict && -			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) && -			   (l_port != NULL) && (l_port == port)) +			if ((tg_pt_gp->tg_pt_gp_alua_access_status == +			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && +			   (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && +			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && +			   (tg_pt_gp->tg_pt_gp_alua_port != NULL) && +			    (tg_pt_gp->tg_pt_gp_alua_port == port))  				continue;  			core_scsi3_ua_allocate(lacl->se_lun_nacl, @@ -839,7 +1029,7 @@ static int core_alua_do_transition_tg_pt(  		spin_lock(&tg_pt_gp->tg_pt_gp_lock);  		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);  	/* @@ -856,20 +1046,102 @@ static int core_alua_do_transition_tg_pt(  	 */  	if (tg_pt_gp->tg_pt_gp_write_metadata) {  		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); -		core_alua_update_tpg_primary_metadata(tg_pt_gp, -					new_state, md_buf); +		core_alua_update_tpg_primary_metadata(tg_pt_gp);  		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);  	}  	/*  	 * Set the current primary ALUA access state to the requested new state  	 */ -	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); +	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, +		   tg_pt_gp->tg_pt_gp_alua_pending_state);  	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" -		" from primary access state %s to %s\n", (explict) ? "explict" : -		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), -		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), -		core_alua_dump_state(new_state)); +		" from primary access state %s to %s\n", (explicit) ? "explicit" : +		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), +		tg_pt_gp->tg_pt_gp_id, +		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), +		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); +	spin_lock(&dev->t10_alua.tg_pt_gps_lock); +	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); +	smp_mb__after_atomic(); +	spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + +	if (tg_pt_gp->tg_pt_gp_transition_complete) +		complete(tg_pt_gp->tg_pt_gp_transition_complete); +} + +static int core_alua_do_transition_tg_pt( +	struct t10_alua_tg_pt_gp *tg_pt_gp, +	int new_state, +	int explicit) +{ +	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; +	DECLARE_COMPLETION_ONSTACK(wait); + +	/* Nothing to be done here */ +	if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) +		return 0; + +	if (new_state == ALUA_ACCESS_STATE_TRANSITION) +		return -EAGAIN; + +	/* +	 * Flush any pending transitions +	 */ +	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && +	    atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == +	    ALUA_ACCESS_STATE_TRANSITION) { +		/* Just in case */ +		tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; +		tg_pt_gp->tg_pt_gp_transition_complete = &wait; +		flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); +		wait_for_completion(&wait); +		tg_pt_gp->tg_pt_gp_transition_complete = NULL; +		return 0; +	} + +	/* +	 * Save the old primary ALUA access state, and set the current state +	 * to ALUA_ACCESS_STATE_TRANSITION. +	 */ +	tg_pt_gp->tg_pt_gp_alua_previous_state = +		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); +	tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + +	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, +			ALUA_ACCESS_STATE_TRANSITION); +	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? +				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : +				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + +	/* +	 * Check for the optional ALUA primary state transition delay +	 */ +	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) +		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + +	/* +	 * Take a reference for workqueue item +	 */ +	spin_lock(&dev->t10_alua.tg_pt_gps_lock); +	atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); +	smp_mb__after_atomic(); +	spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + +	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { +		unsigned long transition_tmo; + +		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; +		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, +				   &tg_pt_gp->tg_pt_gp_transition_work, +				   transition_tmo); +	} else { +		tg_pt_gp->tg_pt_gp_transition_complete = &wait; +		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, +				   &tg_pt_gp->tg_pt_gp_transition_work, 0); +		wait_for_completion(&wait); +		tg_pt_gp->tg_pt_gp_transition_complete = NULL; +	}  	return 0;  } @@ -880,31 +1152,23 @@ int core_alua_do_port_transition(  	struct se_port *l_port,  	struct se_node_acl *l_nacl,  	int new_state, -	int explict) +	int explicit)  {  	struct se_device *dev; -	struct se_port *port; -	struct se_node_acl *nacl;  	struct t10_alua_lu_gp *lu_gp;  	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;  	struct t10_alua_tg_pt_gp *tg_pt_gp; -	unsigned char *md_buf; -	int primary; +	int primary, valid_states, rc = 0; -	if (core_alua_check_transition(new_state, &primary) != 0) +	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; +	if (core_alua_check_transition(new_state, valid_states, &primary) != 0)  		return -EINVAL; -	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); -	if (!md_buf) { -		pr_err("Unable to allocate buf for ALUA metadata\n"); -		return -ENOMEM; -	} -  	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;  	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);  	lu_gp = local_lu_gp_mem->lu_gp;  	atomic_inc(&lu_gp->lu_gp_ref_cnt); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);  	/*  	 * For storage objects that are members of the 'default_lu_gp', @@ -916,12 +1180,13 @@ int core_alua_do_port_transition(  		 * core_alua_do_transition_tg_pt() will always return  		 * success.  		 */ -		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, -					md_buf, new_state, explict); +		l_tg_pt_gp->tg_pt_gp_alua_port = l_port; +		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; +		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, +						   new_state, explicit);  		atomic_dec(&lu_gp->lu_gp_ref_cnt); -		smp_mb__after_atomic_dec(); -		kfree(md_buf); -		return 0; +		smp_mb__after_atomic(); +		return rc;  	}  	/*  	 * For all other LU groups aside from 'default_lu_gp', walk all of @@ -934,7 +1199,7 @@ int core_alua_do_port_transition(  		dev = lu_gp_mem->lu_gp_mem_dev;  		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&lu_gp->lu_gp_lock);  		spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -946,7 +1211,7 @@ int core_alua_do_port_transition(  				continue;  			/*  			 * If the target behavior port asymmetric access state -			 * is changed for any target port group accessiable via +			 * is changed for any target port group accessible via  			 * a logical unit within a LU group, the target port  			 * behavior group asymmetric access states for the same  			 * target port group accessible via other logical units @@ -956,44 +1221,48 @@ int core_alua_do_port_transition(  				continue;  			if (l_tg_pt_gp == tg_pt_gp) { -				port = l_port; -				nacl = l_nacl; +				tg_pt_gp->tg_pt_gp_alua_port = l_port; +				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;  			} else { -				port = NULL; -				nacl = NULL; +				tg_pt_gp->tg_pt_gp_alua_port = NULL; +				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;  			}  			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); -			smp_mb__after_atomic_inc(); +			smp_mb__after_atomic();  			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);  			/*  			 * core_alua_do_transition_tg_pt() will always return  			 * success.  			 */ -			core_alua_do_transition_tg_pt(tg_pt_gp, port, -					nacl, md_buf, new_state, explict); +			rc = core_alua_do_transition_tg_pt(tg_pt_gp, +					new_state, explicit);  			spin_lock(&dev->t10_alua.tg_pt_gps_lock);  			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); -			smp_mb__after_atomic_dec(); +			smp_mb__after_atomic(); +			if (rc) +				break;  		}  		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);  		spin_lock(&lu_gp->lu_gp_lock);  		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&lu_gp->lu_gp_lock); -	pr_debug("Successfully processed LU Group: %s all ALUA TG PT" -		" Group IDs: %hu %s transition to primary state: %s\n", -		config_item_name(&lu_gp->lu_gp_group.cg_item), -		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", -		core_alua_dump_state(new_state)); +	if (!rc) { +		pr_debug("Successfully processed LU Group: %s all ALUA TG PT" +			 " Group IDs: %hu %s transition to primary state: %s\n", +			 config_item_name(&lu_gp->lu_gp_group.cg_item), +			 l_tg_pt_gp->tg_pt_gp_id, +			 (explicit) ? "explicit" : "implicit", +			 core_alua_dump_state(new_state)); +	}  	atomic_dec(&lu_gp->lu_gp_ref_cnt); -	smp_mb__after_atomic_dec(); -	kfree(md_buf); -	return 0; +	smp_mb__after_atomic(); +	return rc;  }  /* @@ -1001,13 +1270,18 @@ int core_alua_do_port_transition(   */  static int core_alua_update_tpg_secondary_metadata(  	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, -	struct se_port *port, -	unsigned char *md_buf, -	u32 md_buf_len) +	struct se_port *port)  { +	unsigned char *md_buf;  	struct se_portal_group *se_tpg = port->sep_tpg;  	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; -	int len; +	int len, rc; + +	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); +	if (!md_buf) { +		pr_err("Unable to allocate buf for ALUA metadata\n"); +		return -ENOMEM; +	}  	memset(path, 0, ALUA_METADATA_PATH_LEN);  	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); @@ -1019,7 +1293,7 @@ static int core_alua_update_tpg_secondary_metadata(  		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",  				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); -	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" +	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"  			"alua_tg_pt_status=0x%02x\n",  			atomic_read(&port->sep_tg_pt_secondary_offline),  			port->sep_tg_pt_secondary_stat); @@ -1028,18 +1302,19 @@ static int core_alua_update_tpg_secondary_metadata(  			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,  			port->sep_lun->unpacked_lun); -	return core_alua_write_tpg_metadata(path, md_buf, len); +	rc = core_alua_write_tpg_metadata(path, md_buf, len); +	kfree(md_buf); + +	return rc;  }  static int core_alua_set_tg_pt_secondary_state(  	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,  	struct se_port *port, -	int explict, +	int explicit,  	int offline)  {  	struct t10_alua_tg_pt_gp *tg_pt_gp; -	unsigned char *md_buf; -	u32 md_buf_len;  	int trans_delay_msecs;  	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1060,14 +1335,13 @@ static int core_alua_set_tg_pt_secondary_state(  	else  		atomic_set(&port->sep_tg_pt_secondary_offline, 0); -	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; -	port->sep_tg_pt_secondary_stat = (explict) ? -			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : -			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; +	port->sep_tg_pt_secondary_stat = (explicit) ? +			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : +			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;  	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" -		" to secondary access state: %s\n", (explict) ? "explict" : -		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), +		" to secondary access state: %s\n", (explicit) ? "explicit" : +		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),  		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");  	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1082,23 +1356,115 @@ static int core_alua_set_tg_pt_secondary_state(  	 * secondary state and status  	 */  	if (port->sep_tg_pt_secondary_write_md) { -		md_buf = kzalloc(md_buf_len, GFP_KERNEL); -		if (!md_buf) { -			pr_err("Unable to allocate md_buf for" -				" secondary ALUA access metadata\n"); -			return -ENOMEM; -		}  		mutex_lock(&port->sep_tg_pt_md_mutex); -		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, -				md_buf, md_buf_len); +		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);  		mutex_unlock(&port->sep_tg_pt_md_mutex); +	} + +	return 0; +} -		kfree(md_buf); +struct t10_alua_lba_map * +core_alua_allocate_lba_map(struct list_head *list, +			   u64 first_lba, u64 last_lba) +{ +	struct t10_alua_lba_map *lba_map; + +	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); +	if (!lba_map) { +		pr_err("Unable to allocate struct t10_alua_lba_map\n"); +		return ERR_PTR(-ENOMEM);  	} +	INIT_LIST_HEAD(&lba_map->lba_map_mem_list); +	lba_map->lba_map_first_lba = first_lba; +	lba_map->lba_map_last_lba = last_lba; + +	list_add_tail(&lba_map->lba_map_list, list); +	return lba_map; +} + +int +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, +			       int pg_id, int state) +{ +	struct t10_alua_lba_map_member *lba_map_mem; +	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, +			    lba_map_mem_list) { +		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { +			pr_err("Duplicate pg_id %d in lba_map\n", pg_id); +			return -EINVAL; +		} +	} + +	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); +	if (!lba_map_mem) { +		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); +		return -ENOMEM; +	} +	lba_map_mem->lba_map_mem_alua_state = state; +	lba_map_mem->lba_map_mem_alua_pg_id = pg_id; + +	list_add_tail(&lba_map_mem->lba_map_mem_list, +		      &lba_map->lba_map_mem_list);  	return 0;  } +void +core_alua_free_lba_map(struct list_head *lba_list) +{ +	struct t10_alua_lba_map *lba_map, *lba_map_tmp; +	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; + +	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, +				 lba_map_list) { +		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, +					 &lba_map->lba_map_mem_list, +					 lba_map_mem_list) { +			list_del(&lba_map_mem->lba_map_mem_list); +			kmem_cache_free(t10_alua_lba_map_mem_cache, +					lba_map_mem); +		} +		list_del(&lba_map->lba_map_list); +		kmem_cache_free(t10_alua_lba_map_cache, lba_map); +	} +} + +void +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, +		      int segment_size, int segment_mult) +{ +	struct list_head old_lba_map_list; +	struct t10_alua_tg_pt_gp *tg_pt_gp; +	int activate = 0, supported; + +	INIT_LIST_HEAD(&old_lba_map_list); +	spin_lock(&dev->t10_alua.lba_map_lock); +	dev->t10_alua.lba_map_segment_size = segment_size; +	dev->t10_alua.lba_map_segment_multiplier = segment_mult; +	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); +	if (lba_map_list) { +		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); +		activate = 1; +	} +	spin_unlock(&dev->t10_alua.lba_map_lock); +	spin_lock(&dev->t10_alua.tg_pt_gps_lock); +	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, +			    tg_pt_gp_list) { + +		if (!tg_pt_gp->tg_pt_gp_valid_id) +			continue; +		supported = tg_pt_gp->tg_pt_gp_alua_supported_states; +		if (activate) +			supported |= ALUA_LBD_SUP; +		else +			supported &= ~ALUA_LBD_SUP; +		tg_pt_gp->tg_pt_gp_alua_supported_states = supported; +	} +	spin_unlock(&dev->t10_alua.tg_pt_gps_lock); +	core_alua_free_lba_map(&old_lba_map_list); +} +  struct t10_alua_lu_gp *  core_alua_allocate_lu_gp(const char *name, int def_group)  { @@ -1232,7 +1598,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)  		 * struct se_device is released via core_alua_free_lu_gp_mem().  		 *  		 * If the passed lu_gp does NOT match the default_lu_gp, assume -		 * we want to re-assocate a given lu_gp_mem with default_lu_gp. +		 * we want to re-associate a given lu_gp_mem with default_lu_gp.  		 */  		spin_lock(&lu_gp_mem->lu_gp_mem_lock);  		if (lu_gp != default_lu_gp) @@ -1351,21 +1717,29 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,  	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);  	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);  	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); +	INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, +			  core_alua_do_transition_tg_pt_work);  	tg_pt_gp->tg_pt_gp_dev = dev; -	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;  	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, -		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); +		ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);  	/* -	 * Enable both explict and implict ALUA support by default +	 * Enable both explicit and implicit ALUA support by default  	 */  	tg_pt_gp->tg_pt_gp_alua_access_type = -			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; +			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;  	/*  	 * Set the default Active/NonOptimized Delay in milliseconds  	 */  	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;  	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; -	tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; +	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; + +	/* +	 * Enable all supported states +	 */ +	tg_pt_gp->tg_pt_gp_alua_supported_states = +	    ALUA_T_SUP | ALUA_O_SUP | +	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;  	if (def_group) {  		spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1465,7 +1839,7 @@ void core_alua_free_tg_pt_gp(  	 * been called from target_core_alua_drop_tg_pt_gp().  	 *  	 * Here we remove *tg_pt_gp from the global list so that -	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS +	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS  	 * can be made while we are releasing struct t10_alua_tg_pt_gp.  	 */  	spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1473,6 +1847,8 @@ void core_alua_free_tg_pt_gp(  	dev->t10_alua.alua_tg_pt_gps_counter--;  	spin_unlock(&dev->t10_alua.tg_pt_gps_lock); +	flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); +  	/*  	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by  	 * core_alua_get_tg_pt_gp_by_name() in @@ -1501,7 +1877,7 @@ void core_alua_free_tg_pt_gp(  		 * core_alua_free_tg_pt_gp_mem().  		 *  		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, -		 * assume we want to re-assocate a given tg_pt_gp_mem with +		 * assume we want to re-associate a given tg_pt_gp_mem with  		 * default_tg_pt_gp.  		 */  		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1740,13 +2116,13 @@ ssize_t core_alua_show_access_type(  	struct t10_alua_tg_pt_gp *tg_pt_gp,  	char *page)  { -	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && -	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) -		return sprintf(page, "Implict and Explict\n"); -	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) -		return sprintf(page, "Implict\n"); -	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) -		return sprintf(page, "Explict\n"); +	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && +	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) +		return sprintf(page, "Implicit and Explicit\n"); +	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) +		return sprintf(page, "Implicit\n"); +	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) +		return sprintf(page, "Explicit\n");  	else  		return sprintf(page, "None\n");  } @@ -1771,11 +2147,11 @@ ssize_t core_alua_store_access_type(  	}  	if (tmp == 3)  		tg_pt_gp->tg_pt_gp_alua_access_type = -			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; +			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;  	else if (tmp == 2) -		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; +		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;  	else if (tmp == 1) -		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; +		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;  	else  		tg_pt_gp->tg_pt_gp_alua_access_type = 0; @@ -1844,14 +2220,14 @@ ssize_t core_alua_store_trans_delay_msecs(  	return count;  } -ssize_t core_alua_show_implict_trans_secs( +ssize_t core_alua_show_implicit_trans_secs(  	struct t10_alua_tg_pt_gp *tg_pt_gp,  	char *page)  { -	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); +	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);  } -ssize_t core_alua_store_implict_trans_secs( +ssize_t core_alua_store_implicit_trans_secs(  	struct t10_alua_tg_pt_gp *tg_pt_gp,  	const char *page,  	size_t count) @@ -1861,16 +2237,16 @@ ssize_t core_alua_store_implict_trans_secs(  	ret = kstrtoul(page, 0, &tmp);  	if (ret < 0) { -		pr_err("Unable to extract implict_trans_secs\n"); +		pr_err("Unable to extract implicit_trans_secs\n");  		return ret;  	} -	if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { -		pr_err("Passed implict_trans_secs: %lu, exceeds" -			" ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, -			ALUA_MAX_IMPLICT_TRANS_SECS); +	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { +		pr_err("Passed implicit_trans_secs: %lu, exceeds" +			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, +			ALUA_MAX_IMPLICIT_TRANS_SECS);  		return  -EINVAL;  	} -	tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; +	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;  	return count;  } @@ -1970,8 +2346,8 @@ ssize_t core_alua_store_secondary_status(  		return ret;  	}  	if ((tmp != ALUA_STATUS_NONE) && -	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && -	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { +	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && +	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {  		pr_err("Illegal value for alua_tg_pt_status: %lu\n",  				tmp);  		return -EINVAL; diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index e539c3e7f4a..0a7d65e8040 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -7,29 +7,41 @@   * from spc4r17 section 6.4.2 Table 135   */  #define TPGS_NO_ALUA				0x00 -#define TPGS_IMPLICT_ALUA			0x10 -#define TPGS_EXPLICT_ALUA			0x20 +#define TPGS_IMPLICIT_ALUA			0x10 +#define TPGS_EXPLICIT_ALUA			0x20  /*   * ASYMMETRIC ACCESS STATE field   * - * from spc4r17 section 6.27 Table 245 + * from spc4r36j section 6.37 Table 307   */ -#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED	0x0 +#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0  #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1  #define ALUA_ACCESS_STATE_STANDBY		0x2  #define ALUA_ACCESS_STATE_UNAVAILABLE		0x3 +#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4  #define ALUA_ACCESS_STATE_OFFLINE		0xe  #define ALUA_ACCESS_STATE_TRANSITION		0xf  /* + * from spc4r36j section 6.37 Table 306 + */ +#define ALUA_T_SUP		0x80 +#define ALUA_O_SUP		0x40 +#define ALUA_LBD_SUP		0x10 +#define ALUA_U_SUP		0x08 +#define ALUA_S_SUP		0x04 +#define ALUA_AN_SUP		0x02 +#define ALUA_AO_SUP		0x01 + +/*   * REPORT_TARGET_PORT_GROUP STATUS CODE   *   * from spc4r17 section 6.27 Table 246   */  #define ALUA_STATUS_NONE				0x00 -#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG		0x01 -#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA		0x02 +#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG		0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA		0x02  /*   * From spc4r17, Table D.1: ASC and ASCQ Assignement @@ -46,17 +58,17 @@  #define ALUA_DEFAULT_NONOP_DELAY_MSECS			100  #define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */  /* - * Used for implict and explict ALUA transitional delay, that is disabled + * Used for implicit and explicit ALUA transitional delay, that is disabled   * by default, and is intended to be used for debugging client side ALUA code.   */  #define ALUA_DEFAULT_TRANS_DELAY_MSECS			0  #define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */  /* - * Used for the recommended application client implict transition timeout + * Used for the recommended application client implicit transition timeout   * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.   */ -#define ALUA_DEFAULT_IMPLICT_TRANS_SECS			0 -#define ALUA_MAX_IMPLICT_TRANS_SECS			255 +#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS			0 +#define ALUA_MAX_IMPLICIT_TRANS_SECS			255  /*   * Used by core_alua_update_tpg_primary_metadata() and   * core_alua_update_tpg_secondary_metadata() @@ -67,18 +79,30 @@   */  #define ALUA_SECONDARY_METADATA_WWN_LEN			256 +/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ +#define ALUA_MD_BUF_LEN					1024 +  extern struct kmem_cache *t10_alua_lu_gp_cache;  extern struct kmem_cache *t10_alua_lu_gp_mem_cache;  extern struct kmem_cache *t10_alua_tg_pt_gp_cache;  extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +extern struct kmem_cache *t10_alua_lba_map_cache; +extern struct kmem_cache *t10_alua_lba_map_mem_cache;  extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);  extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);  extern int core_alua_check_nonop_delay(struct se_cmd *);  extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,  				struct se_device *, struct se_port *,  				struct se_node_acl *, int, int);  extern char *core_alua_dump_status(int); +extern struct t10_alua_lba_map *core_alua_allocate_lba_map( +				struct list_head *, u64, u64); +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); +extern void core_alua_free_lba_map(struct list_head *); +extern void core_alua_set_lba_map(struct se_device *, struct list_head *, +				int, int);  extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);  extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);  extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); @@ -113,9 +137,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,  					char *);  extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,  					const char *, size_t); -extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,  					char *); -extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,  					const char *, size_t);  extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,  					char *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 82e81c542e4..bf55c5a04cf 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(  	 * struct target_fabric_configfs *tf will contain a usage reference.  	 */  	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", -			&TF_CIT_TMPL(tf)->tfc_wwn_cit); +			&tf->tf_cit_tmpl.tfc_wwn_cit);  	tf->tf_group.default_groups = tf->tf_default_groups;  	tf->tf_group.default_groups[0] = &tf->tf_disc_group;  	tf->tf_group.default_groups[1] = NULL;  	config_group_init_type_name(&tf->tf_group, name, -			&TF_CIT_TMPL(tf)->tfc_wwn_cit); +			&tf->tf_cit_tmpl.tfc_wwn_cit);  	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", -			&TF_CIT_TMPL(tf)->tfc_discovery_cit); +			&tf->tf_cit_tmpl.tfc_discovery_cit);  	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"  			" %s\n", tf->tf_group.cg_item.ci_name); @@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(  		pr_err("Missing tfo->queue_tm_rsp()\n");  		return -EINVAL;  	} +	if (!tfo->aborted_task) { +		pr_err("Missing tfo->aborted_task()\n"); +		return -EINVAL; +	}  	/*  	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()  	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in @@ -643,6 +647,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);  DEF_DEV_ATTRIB(emulate_3pc);  SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(pi_prot_type); +SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB_RO(hw_pi_prot_type); +SE_DEV_ATTR_RO(hw_pi_prot_type); + +DEF_DEV_ATTRIB(pi_prot_format); +SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); +  DEF_DEV_ATTRIB(enforce_pr_isids);  SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); @@ -702,6 +715,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {  	&target_core_dev_attrib_emulate_tpws.attr,  	&target_core_dev_attrib_emulate_caw.attr,  	&target_core_dev_attrib_emulate_3pc.attr, +	&target_core_dev_attrib_pi_prot_type.attr, +	&target_core_dev_attrib_hw_pi_prot_type.attr, +	&target_core_dev_attrib_pi_prot_format.attr,  	&target_core_dev_attrib_enforce_pr_isids.attr,  	&target_core_dev_attrib_is_nonrot.attr,  	&target_core_dev_attrib_emulate_rest_reord.attr, @@ -1741,6 +1757,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {  	.store	= target_core_store_alua_lu_gp,  }; +static ssize_t target_core_show_dev_lba_map(void *p, char *page) +{ +	struct se_device *dev = p; +	struct t10_alua_lba_map *map; +	struct t10_alua_lba_map_member *mem; +	char *b = page; +	int bl = 0; +	char state; + +	spin_lock(&dev->t10_alua.lba_map_lock); +	if (!list_empty(&dev->t10_alua.lba_map_list)) +	    bl += sprintf(b + bl, "%u %u\n", +			  dev->t10_alua.lba_map_segment_size, +			  dev->t10_alua.lba_map_segment_multiplier); +	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { +		bl += sprintf(b + bl, "%llu %llu", +			      map->lba_map_first_lba, map->lba_map_last_lba); +		list_for_each_entry(mem, &map->lba_map_mem_list, +				    lba_map_mem_list) { +			switch (mem->lba_map_mem_alua_state) { +			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: +				state = 'O'; +				break; +			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: +				state = 'A'; +				break; +			case ALUA_ACCESS_STATE_STANDBY: +				state = 'S'; +				break; +			case ALUA_ACCESS_STATE_UNAVAILABLE: +				state = 'U'; +				break; +			default: +				state = '.'; +				break; +			} +			bl += sprintf(b + bl, " %d:%c", +				      mem->lba_map_mem_alua_pg_id, state); +		} +		bl += sprintf(b + bl, "\n"); +	} +	spin_unlock(&dev->t10_alua.lba_map_lock); +	return bl; +} + +static ssize_t target_core_store_dev_lba_map( +	void *p, +	const char *page, +	size_t count) +{ +	struct se_device *dev = p; +	struct t10_alua_lba_map *lba_map = NULL; +	struct list_head lba_list; +	char *map_entries, *ptr; +	char state; +	int pg_num = -1, pg; +	int ret = 0, num = 0, pg_id, alua_state; +	unsigned long start_lba = -1, end_lba = -1; +	unsigned long segment_size = -1, segment_mult = -1; + +	map_entries = kstrdup(page, GFP_KERNEL); +	if (!map_entries) +		return -ENOMEM; + +	INIT_LIST_HEAD(&lba_list); +	while ((ptr = strsep(&map_entries, "\n")) != NULL) { +		if (!*ptr) +			continue; + +		if (num == 0) { +			if (sscanf(ptr, "%lu %lu\n", +				   &segment_size, &segment_mult) != 2) { +				pr_err("Invalid line %d\n", num); +				ret = -EINVAL; +				break; +			} +			num++; +			continue; +		} +		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { +			pr_err("Invalid line %d\n", num); +			ret = -EINVAL; +			break; +		} +		ptr = strchr(ptr, ' '); +		if (!ptr) { +			pr_err("Invalid line %d, missing end lba\n", num); +			ret = -EINVAL; +			break; +		} +		ptr++; +		ptr = strchr(ptr, ' '); +		if (!ptr) { +			pr_err("Invalid line %d, missing state definitions\n", +			       num); +			ret = -EINVAL; +			break; +		} +		ptr++; +		lba_map = core_alua_allocate_lba_map(&lba_list, +						     start_lba, end_lba); +		if (IS_ERR(lba_map)) { +			ret = PTR_ERR(lba_map); +			break; +		} +		pg = 0; +		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { +			switch (state) { +			case 'O': +				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; +				break; +			case 'A': +				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; +				break; +			case 'S': +				alua_state = ALUA_ACCESS_STATE_STANDBY; +				break; +			case 'U': +				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; +				break; +			default: +				pr_err("Invalid ALUA state '%c'\n", state); +				ret = -EINVAL; +				goto out; +			} + +			ret = core_alua_allocate_lba_map_mem(lba_map, +							     pg_id, alua_state); +			if (ret) { +				pr_err("Invalid target descriptor %d:%c " +				       "at line %d\n", +				       pg_id, state, num); +				break; +			} +			pg++; +			ptr = strchr(ptr, ' '); +			if (ptr) +				ptr++; +			else +				break; +		} +		if (pg_num == -1) +		    pg_num = pg; +		else if (pg != pg_num) { +			pr_err("Only %d from %d port groups definitions " +			       "at line %d\n", pg, pg_num, num); +			ret = -EINVAL; +			break; +		} +		num++; +	} +out: +	if (ret) { +		core_alua_free_lba_map(&lba_list); +		count = ret; +	} else +		core_alua_set_lba_map(dev, &lba_list, +				      segment_size, segment_mult); +	kfree(map_entries); +	return count; +} + +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { +	.attr	= { .ca_owner = THIS_MODULE, +		    .ca_name = "lba_map", +		    .ca_mode = S_IRUGO | S_IWUSR }, +	.show	= target_core_show_dev_lba_map, +	.store	= target_core_store_dev_lba_map, +}; +  static struct configfs_attribute *lio_core_dev_attrs[] = {  	&target_core_attr_dev_info.attr,  	&target_core_attr_dev_control.attr, @@ -1748,6 +1934,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {  	&target_core_attr_dev_udev_path.attr,  	&target_core_attr_dev_enable.attr,  	&target_core_attr_dev_alua_lu_gp.attr, +	&target_core_attr_dev_lba_map.attr,  	NULL,  }; @@ -2036,10 +2223,15 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(  	int new_state, ret;  	if (!tg_pt_gp->tg_pt_gp_valid_id) { -		pr_err("Unable to do implict ALUA on non valid" +		pr_err("Unable to do implicit ALUA on non valid"  			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);  		return -EINVAL;  	} +	if (!(dev->dev_flags & DF_CONFIGURED)) { +		pr_err("Unable to set alua_access_state while device is" +		       " not configured\n"); +		return -ENODEV; +	}  	ret = kstrtoul(page, 0, &tmp);  	if (ret < 0) { @@ -2049,9 +2241,16 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(  	}  	new_state = (int)tmp; -	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { -		pr_err("Unable to process implict configfs ALUA" -			" transition while TPGS_IMPLICT_ALUA is disabled\n"); +	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { +		pr_err("Unable to process implicit configfs ALUA" +			" transition while TPGS_IMPLICIT_ALUA is disabled\n"); +		return -EINVAL; +	} +	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && +	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { +		/* LBA DEPENDENT is only allowed with implicit ALUA */ +		pr_err("Unable to process implicit configfs ALUA transition" +		       " while explicit ALUA management is enabled\n");  		return -EINVAL;  	} @@ -2097,8 +2296,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(  	new_status = (int)tmp;  	if ((new_status != ALUA_STATUS_NONE) && -	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && -	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { +	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && +	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {  		pr_err("Illegal ALUA access status: 0x%02x\n",  				new_status);  		return -EINVAL; @@ -2131,6 +2330,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(  SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);  /* + * alua_supported_states + */ + +#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit)		\ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \ +	struct t10_alua_tg_pt_gp *t, char *p)				\ +{									\ +	return sprintf(p, "%d\n", !!(t->_var & _bit));			\ +} + +#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit)		\ +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\ +	struct t10_alua_tg_pt_gp *t, const char *p, size_t c)		\ +{									\ +	unsigned long tmp;						\ +	int ret;							\ +									\ +	if (!t->tg_pt_gp_valid_id) {					\ +		pr_err("Unable to do set ##_name ALUA state on non"	\ +		       " valid tg_pt_gp ID: %hu\n",			\ +		       t->tg_pt_gp_valid_id);				\ +		return -EINVAL;						\ +	}								\ +									\ +	ret = kstrtoul(p, 0, &tmp);					\ +	if (ret < 0) {							\ +		pr_err("Invalid value '%s', must be '0' or '1'\n", p);	\ +		return -EINVAL;						\ +	}								\ +	if (tmp > 1) {							\ +		pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ +		return -EINVAL;						\ +	}								\ +	if (!tmp)							\ +		t->_var |= _bit;					\ +	else								\ +		t->_var &= ~_bit;					\ +									\ +	return c;							\ +} + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning, +			       tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning, +				tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline, +			       tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(offline, +				tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, +			       tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, +				tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, +			       tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable, +				tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby, +			       tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(standby, +				tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized, +			       tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized, +				tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized, +			       tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized, +				tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR); + +/*   * alua_write_metadata   */  static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( @@ -2210,24 +2493,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(  SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);  /* - * implict_trans_secs + * implicit_trans_secs   */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(  	struct t10_alua_tg_pt_gp *tg_pt_gp,  	char *page)  { -	return core_alua_show_implict_trans_secs(tg_pt_gp, page); +	return core_alua_show_implicit_trans_secs(tg_pt_gp, page);  } -static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(  	struct t10_alua_tg_pt_gp *tg_pt_gp,  	const char *page,  	size_t count)  { -	return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); +	return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);  } -SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);  /*   * preferred @@ -2350,10 +2633,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {  	&target_core_alua_tg_pt_gp_alua_access_state.attr,  	&target_core_alua_tg_pt_gp_alua_access_status.attr,  	&target_core_alua_tg_pt_gp_alua_access_type.attr, +	&target_core_alua_tg_pt_gp_alua_support_transitioning.attr, +	&target_core_alua_tg_pt_gp_alua_support_offline.attr, +	&target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr, +	&target_core_alua_tg_pt_gp_alua_support_unavailable.attr, +	&target_core_alua_tg_pt_gp_alua_support_standby.attr, +	&target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr, +	&target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,  	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,  	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,  	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr, -	&target_core_alua_tg_pt_gp_implict_trans_secs.attr, +	&target_core_alua_tg_pt_gp_implicit_trans_secs.attr,  	&target_core_alua_tg_pt_gp_preferred.attr,  	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,  	&target_core_alua_tg_pt_gp_members.attr, @@ -2846,7 +3136,7 @@ static int __init target_core_init_configfs(void)  	 * and ALUA Logical Unit Group and Target Port Group infrastructure.  	 */  	target_cg = &subsys->su_group; -	target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, +	target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,  				GFP_KERNEL);  	if (!target_cg->default_groups) {  		pr_err("Unable to allocate target_cg->default_groups\n"); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index d90dbb0f1a6..98da9016715 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)  		se_cmd->pr_res_key = deve->pr_res_key;  		se_cmd->orig_fe_lun = unpacked_lun;  		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + +		percpu_ref_get(&se_lun->lun_ref); +		se_cmd->lun_ref_active = true;  	}  	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)  		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;  		se_cmd->orig_fe_lun = 0;  		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + +		percpu_ref_get(&se_lun->lun_ref); +		se_cmd->lun_ref_active = true;  	}  	/* Directly associate cmd with se_dev */  	se_cmd->se_dev = se_lun->lun_se_dev; -	/* TODO: get rid of this and use atomics for stats */  	dev = se_lun->lun_se_dev; -	spin_lock_irqsave(&dev->stats_lock, flags); -	dev->num_cmds++; +	atomic_long_inc(&dev->num_cmds);  	if (se_cmd->data_direction == DMA_TO_DEVICE) -		dev->write_bytes += se_cmd->data_length; +		atomic_long_add(se_cmd->data_length, &dev->write_bytes);  	else if (se_cmd->data_direction == DMA_FROM_DEVICE) -		dev->read_bytes += se_cmd->data_length; -	spin_unlock_irqrestore(&dev->stats_lock, flags); - -	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); -	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); -	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); +		atomic_long_add(se_cmd->data_length, &dev->read_bytes);  	return 0;  } @@ -226,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(  			continue;  		atomic_inc(&deve->pr_ref_count); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock_irq(&nacl->device_list_lock);  		return deve; @@ -314,14 +313,14 @@ int core_enable_device_list_for_node(  	deve = nacl->device_list[mapped_lun];  	/* -	 * Check if the call is handling demo mode -> explict LUN ACL +	 * Check if the call is handling demo mode -> explicit LUN ACL  	 * transition.  This transition must be for the same struct se_lun  	 * + mapped_lun that was setup in demo mode..  	 */  	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {  		if (deve->se_lun_acl != NULL) {  			pr_err("struct se_dev_entry->se_lun_acl" -			       " already set for demo mode -> explict" +			       " already set for demo mode -> explicit"  			       " LUN ACL transition\n");  			spin_unlock_irq(&nacl->device_list_lock);  			return -EINVAL; @@ -329,7 +328,7 @@ int core_enable_device_list_for_node(  		if (deve->se_lun != lun) {  			pr_err("struct se_dev_entry->se_lun does"  			       " match passed struct se_lun for demo mode" -			       " -> explict LUN ACL transition\n"); +			       " -> explicit LUN ACL transition\n");  			spin_unlock_irq(&nacl->device_list_lock);  			return -EINVAL;  		} @@ -617,6 +616,7 @@ void core_dev_unexport(  	dev->export_count--;  	spin_unlock(&hba->device_lock); +	lun->lun_sep = NULL;  	lun->lun_se_dev = NULL;  } @@ -799,10 +799,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)  		pr_err("emulate_write_cache not supported for pSCSI\n");  		return -EINVAL;  	} -	if (dev->transport->get_write_cache) { -		pr_warn("emulate_write_cache cannot be changed when underlying" -			" HW reports WriteCacheEnabled, ignoring request\n"); -		return 0; +	if (flag && +	    dev->transport->get_write_cache) { +		pr_err("emulate_write_cache not supported for this device\n"); +		return -EINVAL;  	}  	dev->dev_attrib.emulate_write_cache = flag; @@ -919,6 +919,94 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)  	return 0;  } +int se_dev_set_pi_prot_type(struct se_device *dev, int flag) +{ +	int rc, old_prot = dev->dev_attrib.pi_prot_type; + +	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { +		pr_err("Illegal value %d for pi_prot_type\n", flag); +		return -EINVAL; +	} +	if (flag == 2) { +		pr_err("DIF TYPE2 protection currently not supported\n"); +		return -ENOSYS; +	} +	if (dev->dev_attrib.hw_pi_prot_type) { +		pr_warn("DIF protection enabled on underlying hardware," +			" ignoring\n"); +		return 0; +	} +	if (!dev->transport->init_prot || !dev->transport->free_prot) { +		/* 0 is only allowed value for non-supporting backends */ +		if (flag == 0) +			return 0; + +		pr_err("DIF protection not supported by backend: %s\n", +		       dev->transport->name); +		return -ENOSYS; +	} +	if (!(dev->dev_flags & DF_CONFIGURED)) { +		pr_err("DIF protection requires device to be configured\n"); +		return -ENODEV; +	} +	if (dev->export_count) { +		pr_err("dev[%p]: Unable to change SE Device PROT type while" +		       " export_count is %d\n", dev, dev->export_count); +		return -EINVAL; +	} + +	dev->dev_attrib.pi_prot_type = flag; + +	if (flag && !old_prot) { +		rc = dev->transport->init_prot(dev); +		if (rc) { +			dev->dev_attrib.pi_prot_type = old_prot; +			return rc; +		} + +	} else if (!flag && old_prot) { +		dev->transport->free_prot(dev); +	} +	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + +	return 0; +} + +int se_dev_set_pi_prot_format(struct se_device *dev, int flag) +{ +	int rc; + +	if (!flag) +		return 0; + +	if (flag != 1) { +		pr_err("Illegal value %d for pi_prot_format\n", flag); +		return -EINVAL; +	} +	if (!dev->transport->format_prot) { +		pr_err("DIF protection format not supported by backend %s\n", +		       dev->transport->name); +		return -ENOSYS; +	} +	if (!(dev->dev_flags & DF_CONFIGURED)) { +		pr_err("DIF protection format requires device to be configured\n"); +		return -ENODEV; +	} +	if (dev->export_count) { +		pr_err("dev[%p]: Unable to format SE Device PROT type while" +		       " export_count is %d\n", dev, dev->export_count); +		return -EINVAL; +	} + +	rc = dev->transport->format_prot(dev); +	if (rc) +		return rc; + +	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + +	return 0; +} +  int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)  {  	if ((flag != 0) && (flag != 1)) { @@ -1107,29 +1195,34 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)  	dev->dev_attrib.block_size = block_size;  	pr_debug("dev[%p]: SE Device block_size changed to %u\n",  			dev, block_size); + +	if (dev->dev_attrib.max_bytes_per_io) +		dev->dev_attrib.hw_max_sectors = +			dev->dev_attrib.max_bytes_per_io / block_size; +  	return 0;  }  struct se_lun *core_dev_add_lun(  	struct se_portal_group *tpg,  	struct se_device *dev, -	u32 lun) +	u32 unpacked_lun)  { -	struct se_lun *lun_p; +	struct se_lun *lun;  	int rc; -	lun_p = core_tpg_pre_addlun(tpg, lun); -	if (IS_ERR(lun_p)) -		return lun_p; +	lun = core_tpg_alloc_lun(tpg, unpacked_lun); +	if (IS_ERR(lun)) +		return lun; -	rc = core_tpg_post_addlun(tpg, lun_p, +	rc = core_tpg_add_lun(tpg, lun,  				TRANSPORT_LUNFLAGS_READ_WRITE, dev);  	if (rc < 0)  		return ERR_PTR(rc);  	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"  		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), -		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, +		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,  		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);  	/*  	 * Update LUN maps for dynamically added initiators when @@ -1150,7 +1243,7 @@ struct se_lun *core_dev_add_lun(  		spin_unlock_irq(&tpg->acl_node_lock);  	} -	return lun_p; +	return lun;  }  /*      core_dev_del_lun(): @@ -1304,7 +1397,7 @@ int core_dev_add_initiator_node_lun_acl(  	spin_lock(&lun->lun_acl_lock);  	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);  	atomic_inc(&lun->lun_acl_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	spin_unlock(&lun->lun_acl_lock);  	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1338,7 +1431,7 @@ int core_dev_del_initiator_node_lun_acl(  	spin_lock(&lun->lun_acl_lock);  	list_del(&lacl->lacl_list);  	atomic_dec(&lun->lun_acl_count); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  	spin_unlock(&lun->lun_acl_lock);  	core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, @@ -1407,6 +1500,7 @@ static void scsi_dump_inquiry(struct se_device *dev)  struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  {  	struct se_device *dev; +	struct se_lun *xcopy_lun;  	dev = hba->transport->alloc_device(hba, name);  	if (!dev) @@ -1415,6 +1509,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  	dev->dev_link_magic = SE_DEV_LINK_MAGIC;  	dev->se_hba = hba;  	dev->transport = hba->transport; +	dev->prot_length = sizeof(struct se_dif_v1_tuple);  	INIT_LIST_HEAD(&dev->dev_list);  	INIT_LIST_HEAD(&dev->dev_sep_list); @@ -1423,7 +1518,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  	INIT_LIST_HEAD(&dev->state_list);  	INIT_LIST_HEAD(&dev->qf_cmd_list);  	INIT_LIST_HEAD(&dev->g_dev_node); -	spin_lock_init(&dev->stats_lock);  	spin_lock_init(&dev->execute_task_lock);  	spin_lock_init(&dev->delayed_cmd_lock);  	spin_lock_init(&dev->dev_reservation_lock); @@ -1440,6 +1534,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);  	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);  	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); +	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); +	spin_lock_init(&dev->t10_alua.lba_map_lock);  	dev->t10_wwn.t10_dev = dev;  	dev->t10_alua.t10_dev = dev; @@ -1456,6 +1552,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;  	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;  	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; +	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;  	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;  	dev->dev_attrib.is_nonrot = DA_IS_NONROT;  	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; @@ -1469,6 +1566,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)  	dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;  	dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; +	xcopy_lun = &dev->xcopy_lun; +	xcopy_lun->lun_se_dev = dev; +	init_completion(&xcopy_lun->lun_shutdown_comp); +	INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); +	spin_lock_init(&xcopy_lun->lun_acl_lock); +	spin_lock_init(&xcopy_lun->lun_sep_lock); +	init_completion(&xcopy_lun->lun_ref_comp); +  	return dev;  } @@ -1576,9 +1681,13 @@ void target_free_device(struct se_device *dev)  	}  	core_alua_free_lu_gp_mem(dev); +	core_alua_set_lba_map(dev, NULL, 0, 0);  	core_scsi3_free_all_registrations(dev);  	se_release_vpd_for_dev(dev); +	if (dev->transport->free_prot) +		dev->transport->free_prot(dev); +  	dev->transport->free_device(dev);  } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 3503996d7d1..7de9f0475d0 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(  	}  	config_group_init_type_name(&lacl->se_lun_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); +			&tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);  	config_group_init_type_name(&lacl->ml_stat_grps.stat_group, -			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); +			"statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);  	lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;  	lacl_cg->default_groups[1] = NULL; @@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(  	nacl_cg->default_groups[4] = NULL;  	config_group_init_type_name(&se_nacl->acl_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);  	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);  	config_group_init_type_name(&se_nacl->acl_auth_group, "auth", -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);  	config_group_init_type_name(&se_nacl->acl_param_group, "param", -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);  	config_group_init_type_name(&se_nacl->acl_fabric_stat_group,  			"fabric_statistics", -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);  	return &se_nacl->acl_group;  } @@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(  	se_tpg_np->tpg_np_parent = se_tpg;  	config_group_init_type_name(&se_tpg_np->tpg_np_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); +			&tf->tf_cit_tmpl.tfc_tpg_np_base_cit);  	return &se_tpg_np->tpg_np_group;  } @@ -899,14 +899,14 @@ static struct config_group *target_fabric_make_lun(  	}  	config_group_init_type_name(&lun->lun_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit); +			&tf->tf_cit_tmpl.tfc_tpg_port_cit);  	config_group_init_type_name(&lun->port_stat_grps.stat_group, -			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); +			"statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);  	lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;  	lun_cg->default_groups[1] = NULL;  	port_stat_grp = &lun->port_stat_grps.stat_group; -	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group) * 3, +	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group *) * 4,  				GFP_KERNEL);  	if (!port_stat_grp->default_groups) {  		pr_err("Unable to allocate port_stat_grp->default_groups\n"); @@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(  	se_tpg->tpg_group.default_groups[6] = NULL;  	config_group_init_type_name(&se_tpg->tpg_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_base_cit); +			&tf->tf_cit_tmpl.tfc_tpg_base_cit);  	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", -			&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); +			&tf->tf_cit_tmpl.tfc_tpg_lun_cit);  	config_group_init_type_name(&se_tpg->tpg_np_group, "np", -			&TF_CIT_TMPL(tf)->tfc_tpg_np_cit); +			&tf->tf_cit_tmpl.tfc_tpg_np_cit);  	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", -			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); +			&tf->tf_cit_tmpl.tfc_tpg_nacl_cit);  	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", -			&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); +			&tf->tf_cit_tmpl.tfc_tpg_attrib_cit);  	config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", -			&TF_CIT_TMPL(tf)->tfc_tpg_auth_cit); +			&tf->tf_cit_tmpl.tfc_tpg_auth_cit);  	config_group_init_type_name(&se_tpg->tpg_param_group, "param", -			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit); +			&tf->tf_cit_tmpl.tfc_tpg_param_cit);  	return &se_tpg->tpg_group;  } @@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(  	wwn->wwn_group.default_groups[1] = NULL;  	config_group_init_type_name(&wwn->wwn_group, name, -			&TF_CIT_TMPL(tf)->tfc_tpg_cit); +			&tf->tf_cit_tmpl.tfc_tpg_cit);  	config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", -			&TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); +			&tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);  	return &wwn->wwn_group;  } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b662f89deda..7d6cddaec52 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)  	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"  		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,  		TARGET_CORE_MOD_VERSION); -	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" -		" MaxSectors: %u\n", -		hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); +	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", +		hba->hba_id, fd_host->fd_host_id);  	return 0;  } @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)  	}  	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; -	dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; +	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; +	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;  	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;  	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { @@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev)  	kfree(fd_dev);  } +static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, +			 int is_write) +{ +	struct se_device *se_dev = cmd->se_dev; +	struct fd_dev *dev = FD_DEV(se_dev); +	struct file *prot_fd = dev->fd_prot_file; +	struct scatterlist *sg; +	loff_t pos = (cmd->t_task_lba * se_dev->prot_length); +	unsigned char *buf; +	u32 prot_size, len, size; +	int rc, ret = 1, i; + +	prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * +		     se_dev->prot_length; + +	if (!is_write) { +		fd_prot->prot_buf = vzalloc(prot_size); +		if (!fd_prot->prot_buf) { +			pr_err("Unable to allocate fd_prot->prot_buf\n"); +			return -ENOMEM; +		} +		buf = fd_prot->prot_buf; + +		fd_prot->prot_sg_nents = cmd->t_prot_nents; +		fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * +					   fd_prot->prot_sg_nents, GFP_KERNEL); +		if (!fd_prot->prot_sg) { +			pr_err("Unable to allocate fd_prot->prot_sg\n"); +			vfree(fd_prot->prot_buf); +			return -ENOMEM; +		} +		size = prot_size; + +		for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { + +			len = min_t(u32, PAGE_SIZE, size); +			sg_set_buf(sg, buf, len); +			size -= len; +			buf += len; +		} +	} + +	if (is_write) { +		rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); +		if (rc < 0 || prot_size != rc) { +			pr_err("kernel_write() for fd_do_prot_rw failed:" +			       " %d\n", rc); +			ret = -EINVAL; +		} +	} else { +		rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); +		if (rc < 0) { +			pr_err("kernel_read() for fd_do_prot_rw failed:" +			       " %d\n", rc); +			ret = -EINVAL; +		} +	} + +	if (is_write || ret < 0) { +		kfree(fd_prot->prot_sg); +		vfree(fd_prot->prot_buf); +	} + +	return ret; +} +  static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,  		u32 sgl_nents, int is_write)  { @@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	      enum dma_data_direction data_direction)  {  	struct se_device *dev = cmd->se_dev; +	struct fd_prot fd_prot; +	sense_reason_t rc;  	int ret = 0;  	/* @@ -558,11 +626,51 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	 * physical memory addresses to struct iovec virtual memory.  	 */  	if (data_direction == DMA_FROM_DEVICE) { +		memset(&fd_prot, 0, sizeof(struct fd_prot)); + +		if (cmd->prot_type) { +			ret = fd_do_prot_rw(cmd, &fd_prot, false); +			if (ret < 0) +				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +		} +  		ret = fd_do_rw(cmd, sgl, sgl_nents, 0); + +		if (ret > 0 && cmd->prot_type) { +			u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + +			rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, +						 0, fd_prot.prot_sg, 0); +			if (rc) { +				kfree(fd_prot.prot_sg); +				vfree(fd_prot.prot_buf); +				return rc; +			} +			kfree(fd_prot.prot_sg); +			vfree(fd_prot.prot_buf); +		}  	} else { +		memset(&fd_prot, 0, sizeof(struct fd_prot)); + +		if (cmd->prot_type) { +			u32 sectors = cmd->data_length / dev->dev_attrib.block_size; + +			ret = fd_do_prot_rw(cmd, &fd_prot, false); +			if (ret < 0) +				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +			rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, +						  0, fd_prot.prot_sg, 0); +			if (rc) { +				kfree(fd_prot.prot_sg); +				vfree(fd_prot.prot_buf); +				return rc; +			} +		} +  		ret = fd_do_rw(cmd, sgl, sgl_nents, 1);  		/* -		 * Perform implict vfs_fsync_range() for fd_do_writev() ops +		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops  		 * for SCSI WRITEs with Forced Unit Access (FUA) set.  		 * Allow this to happen independent of WCE=0 setting.  		 */ @@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  			vfs_fsync_range(fd_dev->fd_file, start, end, 1);  		} + +		if (ret > 0 && cmd->prot_type) { +			ret = fd_do_prot_rw(cmd, &fd_prot, true); +			if (ret < 0) +				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +		}  	} -	if (ret < 0) +	if (ret < 0) { +		kfree(fd_prot.prot_sg); +		vfree(fd_prot.prot_buf);  		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +	}  	if (ret)  		target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -700,6 +817,102 @@ static sector_t fd_get_blocks(struct se_device *dev)  		       dev->dev_attrib.block_size);  } +static int fd_init_prot(struct se_device *dev) +{ +	struct fd_dev *fd_dev = FD_DEV(dev); +	struct file *prot_file, *file = fd_dev->fd_file; +	struct inode *inode; +	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; +	char buf[FD_MAX_DEV_PROT_NAME]; + +	if (!file) { +		pr_err("Unable to locate fd_dev->fd_file\n"); +		return -ENODEV; +	} + +	inode = file->f_mapping->host; +	if (S_ISBLK(inode->i_mode)) { +		pr_err("FILEIO Protection emulation only supported on" +		       " !S_ISBLK\n"); +		return -ENOSYS; +	} + +	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) +		flags &= ~O_DSYNC; + +	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", +		 fd_dev->fd_dev_name); + +	prot_file = filp_open(buf, flags, 0600); +	if (IS_ERR(prot_file)) { +		pr_err("filp_open(%s) failed\n", buf); +		ret = PTR_ERR(prot_file); +		return ret; +	} +	fd_dev->fd_prot_file = prot_file; + +	return 0; +} + +static int fd_format_prot(struct se_device *dev) +{ +	struct fd_dev *fd_dev = FD_DEV(dev); +	struct file *prot_fd = fd_dev->fd_prot_file; +	sector_t prot_length, prot; +	unsigned char *buf; +	loff_t pos = 0; +	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; +	int rc, ret = 0, size, len; + +	if (!dev->dev_attrib.pi_prot_type) { +		pr_err("Unable to format_prot while pi_prot_type == 0\n"); +		return -ENODEV; +	} +	if (!prot_fd) { +		pr_err("Unable to locate fd_dev->fd_prot_file\n"); +		return -ENODEV; +	} + +	buf = vzalloc(unit_size); +	if (!buf) { +		pr_err("Unable to allocate FILEIO prot buf\n"); +		return -ENOMEM; +	} +	prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; +	size = prot_length; + +	pr_debug("Using FILEIO prot_length: %llu\n", +		 (unsigned long long)prot_length); + +	memset(buf, 0xff, unit_size); +	for (prot = 0; prot < prot_length; prot += unit_size) { +		len = min(unit_size, size); +		rc = kernel_write(prot_fd, buf, len, pos); +		if (rc != len) { +			pr_err("vfs_write to prot file failed: %d\n", rc); +			ret = -ENODEV; +			goto out; +		} +		pos += len; +		size -= len; +	} + +out: +	vfree(buf); +	return ret; +} + +static void fd_free_prot(struct se_device *dev) +{ +	struct fd_dev *fd_dev = FD_DEV(dev); + +	if (!fd_dev->fd_prot_file) +		return; + +	filp_close(fd_dev->fd_prot_file, NULL); +	fd_dev->fd_prot_file = NULL; +} +  static struct sbc_ops fd_sbc_ops = {  	.execute_rw		= fd_execute_rw,  	.execute_sync_cache	= fd_execute_sync_cache, @@ -730,6 +943,9 @@ static struct se_subsystem_api fileio_template = {  	.show_configfs_dev_params = fd_show_configfs_dev_params,  	.get_device_type	= sbc_get_device_type,  	.get_blocks		= fd_get_blocks, +	.init_prot		= fd_init_prot, +	.format_prot		= fd_format_prot, +	.free_prot		= fd_free_prot,  };  static int __init fileio_module_init(void) diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5bd239..182cbb29503 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,10 +4,14 @@  #define FD_VERSION		"4.0"  #define FD_MAX_DEV_NAME		256 +#define FD_MAX_DEV_PROT_NAME	FD_MAX_DEV_NAME + 16  #define FD_DEVICE_QUEUE_DEPTH	32  #define FD_MAX_DEVICE_QUEUE_DEPTH 128  #define FD_BLOCKSIZE		512 -#define FD_MAX_SECTORS		2048 +/* + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call + */ +#define FD_MAX_BYTES		8388608  #define RRF_EMULATE_CDB		0x01  #define RRF_GOT_LBA		0x02 @@ -15,6 +19,13 @@  #define FBDF_HAS_PATH		0x01  #define FBDF_HAS_SIZE		0x02  #define FDBD_HAS_BUFFERED_IO_WCE 0x04 +#define FDBD_FORMAT_UNIT_SIZE	2048 + +struct fd_prot { +	unsigned char	*prot_buf; +	struct scatterlist *prot_sg; +	u32 prot_sg_nents; +};  struct fd_dev {  	struct se_device dev; @@ -29,6 +40,7 @@ struct fd_dev {  	u32		fd_block_size;  	unsigned long long fd_dev_size;  	struct file	*fd_file; +	struct file	*fd_prot_file;  	/* FILEIO HBA device is connected to */  	struct fd_host *fd_host;  } ____cacheline_aligned; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b9a3394fe47..7e6b857c6b3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev)  	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);  	struct request_queue *q;  	struct block_device *bd = NULL; +	struct blk_integrity *bi;  	fmode_t mode;  	int ret = -ENOMEM; @@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev)  	if (blk_queue_nonrot(q))  		dev->dev_attrib.is_nonrot = 1; +	bi = bdev_get_integrity(bd); +	if (bi) { +		struct bio_set *bs = ib_dev->ibd_bio_set; + +		if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || +		    !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { +			pr_err("IBLOCK export of blk_integrity: %s not" +			       " supported\n", bi->name); +			ret = -ENOSYS; +			goto out_blkdev_put; +		} + +		if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { +			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; +		} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { +			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; +		} + +		if (dev->dev_attrib.pi_prot_type) { +			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { +				pr_err("Unable to allocate bioset for PI\n"); +				ret = -ENOMEM; +				goto out_blkdev_put; +			} +			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", +				 bs->bio_integrity_pool); +		} +		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; +	} +  	return 0; +out_blkdev_put: +	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);  out_free_bioset:  	bioset_free(ib_dev->ibd_bio_set);  	ib_dev->ibd_bio_set = NULL; @@ -172,6 +205,7 @@ static void iblock_free_device(struct se_device *dev)  		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);  	if (ib_dev->ibd_bio_set != NULL)  		bioset_free(ib_dev->ibd_bio_set); +  	kfree(ib_dev);  } @@ -289,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err)  		 * Bump the ib_bio_err_cnt and release bio.  		 */  		atomic_inc(&ibr->ib_bio_err_cnt); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  	}  	bio_put(bio); @@ -319,7 +353,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)  	bio->bi_bdev = ib_dev->ibd_bd;  	bio->bi_private = cmd;  	bio->bi_end_io = &iblock_bio_done; -	bio->bi_sector = lba; +	bio->bi_iter.bi_sector = lba;  	return bio;  } @@ -586,13 +620,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)  	return bl;  } +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) +{ +	struct se_device *dev = cmd->se_dev; +	struct blk_integrity *bi; +	struct bio_integrity_payload *bip; +	struct iblock_dev *ib_dev = IBLOCK_DEV(dev); +	struct scatterlist *sg; +	int i, rc; + +	bi = bdev_get_integrity(ib_dev->ibd_bd); +	if (!bi) { +		pr_err("Unable to locate bio_integrity\n"); +		return -ENODEV; +	} + +	bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); +	if (!bip) { +		pr_err("Unable to allocate bio_integrity_payload\n"); +		return -ENOMEM; +	} + +	bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * +			 dev->prot_length; +	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; + +	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, +		 (unsigned long long)bip->bip_iter.bi_sector); + +	for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { + +		rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, +					    sg->offset); +		if (rc != sg->length) { +			pr_err("bio_integrity_add_page() failed; %d\n", rc); +			return -ENOMEM; +		} + +		pr_debug("Added bio integrity page: %p length: %d offset; %d\n", +			 sg_page(sg), sg->length, sg->offset); +	} + +	return 0; +} +  static sense_reason_t  iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  		  enum dma_data_direction data_direction)  {  	struct se_device *dev = cmd->se_dev;  	struct iblock_req *ibr; -	struct bio *bio; +	struct bio *bio, *bio_start;  	struct bio_list list;  	struct scatterlist *sg;  	u32 sg_num = sgl_nents; @@ -655,6 +734,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	if (!bio)  		goto fail_free_ibr; +	bio_start = bio;  	bio_list_init(&list);  	bio_list_add(&list, bio); @@ -688,6 +768,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  		sg_num--;  	} +	if (cmd->prot_type) { +		int rc = iblock_alloc_bip(cmd, bio_start); +		if (rc) +			goto fail_put_bios; +	} +  	iblock_submit_bios(&list, rw);  	iblock_complete_cmd(cmd);  	return 0; @@ -710,6 +796,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)  	return iblock_emulate_read_cap_with_block_size(dev, bd, q);  } +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) +{ +	struct iblock_dev *ib_dev = IBLOCK_DEV(dev); +	struct block_device *bd = ib_dev->ibd_bd; +	int ret; + +	ret = bdev_alignment_offset(bd); +	if (ret == -1) +		return 0; + +	/* convert offset-bytes to offset-lbas */ +	return ret / bdev_logical_block_size(bd); +} + +static unsigned int iblock_get_lbppbe(struct se_device *dev) +{ +	struct iblock_dev *ib_dev = IBLOCK_DEV(dev); +	struct block_device *bd = ib_dev->ibd_bd; +	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + +	return ilog2(logs_per_phys); +} + +static unsigned int iblock_get_io_min(struct se_device *dev) +{ +	struct iblock_dev *ib_dev = IBLOCK_DEV(dev); +	struct block_device *bd = ib_dev->ibd_bd; + +	return bdev_io_min(bd); +} + +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ +	struct iblock_dev *ib_dev = IBLOCK_DEV(dev); +	struct block_device *bd = ib_dev->ibd_bd; + +	return bdev_io_opt(bd); +} +  static struct sbc_ops iblock_sbc_ops = {  	.execute_rw		= iblock_execute_rw,  	.execute_sync_cache	= iblock_execute_sync_cache, @@ -724,7 +849,7 @@ iblock_parse_cdb(struct se_cmd *cmd)  	return sbc_parse_cdb(cmd, &iblock_sbc_ops);  } -bool iblock_get_write_cache(struct se_device *dev) +static bool iblock_get_write_cache(struct se_device *dev)  {  	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);  	struct block_device *bd = ib_dev->ibd_bd; @@ -749,6 +874,10 @@ static struct se_subsystem_api iblock_template = {  	.show_configfs_dev_params = iblock_show_configfs_dev_params,  	.get_device_type	= sbc_get_device_type,  	.get_blocks		= iblock_get_blocks, +	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, +	.get_lbppbe		= iblock_get_lbppbe, +	.get_io_min		= iblock_get_io_min, +	.get_io_opt		= iblock_get_io_opt,  	.get_write_cache	= iblock_get_write_cache,  }; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 579128abe3f..de9cab708f4 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -35,6 +35,8 @@ int	se_dev_set_emulate_tpu(struct se_device *, int);  int	se_dev_set_emulate_tpws(struct se_device *, int);  int	se_dev_set_emulate_caw(struct se_device *, int);  int	se_dev_set_emulate_3pc(struct se_device *, int); +int	se_dev_set_pi_prot_type(struct se_device *, int); +int	se_dev_set_pi_prot_format(struct se_device *, int);  int	se_dev_set_enforce_pr_isids(struct se_device *, int);  int	se_dev_set_is_nonrot(struct se_device *, int);  int	se_dev_set_emulate_rest_reord(struct se_device *dev, int); @@ -75,13 +77,11 @@ extern struct se_device *g_lun0_dev;  struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,  		const char *); -struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, -		unsigned char *);  void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);  void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); -struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); -int	core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, -		u32, void *); +struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); +int	core_tpg_add_lun(struct se_portal_group *, struct se_lun *, +		u32, struct se_device *);  struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);  int	core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); @@ -102,7 +102,7 @@ int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);  int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);  int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);  bool	target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); -int	transport_clear_lun_from_sessions(struct se_lun *); +int	transport_clear_lun_ref(struct se_lun *);  void	transport_send_task_abort(struct se_cmd *);  sense_reason_t	target_cmd_size_check(struct se_cmd *cmd, unsigned int size);  void	target_qf_do_work(struct work_struct *work); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d1ae4c5c3ff..df357862286 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(  	 * statement.  	 */  	if (!ret && !other_cdb) { -		pr_debug("Allowing explict CDB: 0x%02x for %s" +		pr_debug("Allowing explicit CDB: 0x%02x for %s"  			" reservation holder\n", cdb[0],  			core_scsi3_pr_dump_type(pr_reg_type)); @@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(  			 */  			if (!registered_nexus) { -				pr_debug("Allowing implict CDB: 0x%02x" +				pr_debug("Allowing implicit CDB: 0x%02x"  					" for %s reservation on unregistered"  					" nexus\n", cdb[0],  					core_scsi3_pr_dump_type(pr_reg_type)); @@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(  			 * allow commands from registered nexuses.  			 */ -			pr_debug("Allowing implict CDB: 0x%02x for %s" +			pr_debug("Allowing implicit CDB: 0x%02x for %s"  				" reservation\n", cdb[0],  				core_scsi3_pr_dump_type(pr_reg_type)); @@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  	spin_lock(&dev->se_port_lock);  	list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {  		atomic_inc(&port->sep_tg_pt_ref_cnt); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&dev->se_port_lock);  		spin_lock_bh(&port->sep_alua_lock); @@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  					alua_port_list) {  			/*  			 * This pointer will be NULL for demo mode MappedLUNs -			 * that have not been make explict via a ConfigFS +			 * that have not been make explicit via a ConfigFS  			 * MappedLUN group for the SCSI Initiator Node ACL.  			 */  			if (!deve_tmp->se_lun_acl) @@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  				continue;  			atomic_inc(&deve_tmp->pr_ref_count); -			smp_mb__after_atomic_inc(); +			smp_mb__after_atomic();  			spin_unlock_bh(&port->sep_alua_lock);  			/*  			 * Grab a configfs group dependency that is released @@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  				pr_err("core_scsi3_lunacl_depend"  						"_item() failed\n");  				atomic_dec(&port->sep_tg_pt_ref_cnt); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				atomic_dec(&deve_tmp->pr_ref_count); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				goto out;  			}  			/* @@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  						sa_res_key, all_tg_pt, aptpl);  			if (!pr_reg_atp) {  				atomic_dec(&port->sep_tg_pt_ref_cnt); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				atomic_dec(&deve_tmp->pr_ref_count); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				core_scsi3_lunacl_undepend_item(deve_tmp);  				goto out;  			} @@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(  		spin_lock(&dev->se_port_lock);  		atomic_dec(&port->sep_tg_pt_ref_cnt); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&dev->se_port_lock); @@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(  					continue;  			}  			atomic_inc(&pr_reg->pr_res_holders); -			smp_mb__after_atomic_inc(); +			smp_mb__after_atomic();  			spin_unlock(&pr_tmpl->registration_lock);  			return pr_reg;  		} @@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(  			continue;  		atomic_inc(&pr_reg->pr_res_holders); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&pr_tmpl->registration_lock);  		return pr_reg;  	} @@ -1155,10 +1155,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(  static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)  {  	atomic_dec(&pr_reg->pr_res_holders); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  } -static int core_scsi3_check_implict_release( +static int core_scsi3_check_implicit_release(  	struct se_device *dev,  	struct t10_pr_registration *pr_reg)  { @@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(  	}  	if (pr_res_holder == pr_reg) {  		/* -		 * Perform an implict RELEASE if the registration that +		 * Perform an implicit RELEASE if the registration that  		 * is being released is holding the reservation.  		 *  		 * From spc4r17, section 5.7.11.1: @@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(  		 * For 'All Registrants' reservation types, all existing  		 * registrations are still processed as reservation holders  		 * in core_scsi3_pr_seq_non_holder() after the initial -		 * reservation holder is implictly released here. +		 * reservation holder is implicitly released here.  		 */  	} else if (pr_reg->pr_reg_all_tg_pt &&  		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, @@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)  			&tpg->tpg_group.cg_item);  	atomic_dec(&tpg->tpg_pr_ref_count); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  }  static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)  	if (nacl->dynamic_node_acl) {  		atomic_dec(&nacl->acl_pr_ref_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		return;  	} @@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)  			&nacl->acl_group.cg_item);  	atomic_dec(&nacl->acl_pr_ref_count); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  }  static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)  	 */  	if (!lun_acl) {  		atomic_dec(&se_deve->pr_ref_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		return;  	}  	nacl = lun_acl->se_lun_nacl; @@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)  			&lun_acl->se_lun_group.cg_item);  	atomic_dec(&se_deve->pr_ref_count); -	smp_mb__after_atomic_dec(); +	smp_mb__after_atomic();  }  static sense_reason_t @@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port(  				continue;  			atomic_inc(&tmp_tpg->tpg_pr_ref_count); -			smp_mb__after_atomic_inc(); +			smp_mb__after_atomic();  			spin_unlock(&dev->se_port_lock);  			if (core_scsi3_tpg_depend_item(tmp_tpg)) {  				pr_err(" core_scsi3_tpg_depend_item()"  					" for tmp_tpg\n");  				atomic_dec(&tmp_tpg->tpg_pr_ref_count); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  				goto out_unmap;  			} @@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port(  						tmp_tpg, i_str);  			if (dest_node_acl) {  				atomic_inc(&dest_node_acl->acl_pr_ref_count); -				smp_mb__after_atomic_inc(); +				smp_mb__after_atomic();  			}  			spin_unlock_irq(&tmp_tpg->acl_node_lock); @@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port(  				pr_err("configfs_depend_item() failed"  					" for dest_node_acl->acl_group\n");  				atomic_dec(&dest_node_acl->acl_pr_ref_count); -				smp_mb__after_atomic_dec(); +				smp_mb__after_atomic();  				core_scsi3_tpg_undepend_item(tmp_tpg);  				ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  				goto out_unmap; @@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port(  			pr_err("core_scsi3_lunacl_depend_item()"  					" failed\n");  			atomic_dec(&dest_se_deve->pr_ref_count); -			smp_mb__after_atomic_dec(); +			smp_mb__after_atomic();  			core_scsi3_nodeacl_undepend_item(dest_node_acl);  			core_scsi3_tpg_undepend_item(dest_tpg);  			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,  	struct t10_reservation *pr_tmpl = &dev->t10_pr;  	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;  	sense_reason_t ret = TCM_NO_SENSE; -	int pr_holder = 0; +	int pr_holder = 0, type;  	if (!se_sess || !se_lun) {  		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -2125,12 +2125,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,  		/*  		 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.  		 */ -		pr_holder = core_scsi3_check_implict_release( +		pr_holder = core_scsi3_check_implicit_release(  				cmd->se_dev, pr_reg);  		if (pr_holder < 0) {  			ret = TCM_RESERVATION_CONFLICT;  			goto out;  		} +		type = pr_reg->pr_res_type;  		spin_lock(&pr_tmpl->registration_lock);  		/* @@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,  		 * Release the calling I_T Nexus registration now..  		 */  		__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); +		pr_reg = NULL;  		/*  		 * From spc4r17, section 5.7.11.3 Unregistering @@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,  		 * RESERVATIONS RELEASED.  		 */  		if (pr_holder && -		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || -		     pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { +		    (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || +		     type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {  			list_for_each_entry(pr_reg_p,  					&pr_tmpl->registration_list,  					pr_reg_list) { @@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,  	ret = core_scsi3_update_and_write_aptpl(dev, aptpl);  out: -	core_scsi3_put_pr_reg(pr_reg); +	if (pr_reg) +		core_scsi3_put_pr_reg(pr_reg);  	return ret;  } @@ -2402,7 +2405,7 @@ static void __core_scsi3_complete_pro_release(  	struct se_device *dev,  	struct se_node_acl *se_nacl,  	struct t10_pr_registration *pr_reg, -	int explict) +	int explicit)  {  	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;  	char i_buf[PR_REG_ISID_ID_LEN]; @@ -2416,7 +2419,7 @@ static void __core_scsi3_complete_pro_release(  	pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"  		" reservation holder TYPE: %s ALL_TG_PT: %d\n", -		tfo->get_fabric_name(), (explict) ? "explict" : "implict", +		tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",  		core_scsi3_pr_dump_type(pr_reg->pr_res_type),  		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);  	pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", @@ -2692,7 +2695,7 @@ static void __core_scsi3_complete_pro_preempt(  	memset(i_buf, 0, PR_REG_ISID_ID_LEN);  	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);  	/* -	 * Do an implict RELEASE of the existing reservation. +	 * Do an implicit RELEASE of the existing reservation.  	 */  	if (dev->dev_pr_res_holder)  		__core_scsi3_complete_pro_release(dev, nacl, @@ -2845,7 +2848,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,  				 * 5.7.11.4 Preempting, Table 52 and Figure 7.  				 *  				 * For a ZERO SA Reservation key, release -				 * all other registrations and do an implict +				 * all other registrations and do an implicit  				 * release of active persistent reservation.  				 *  				 * For a non-ZERO SA Reservation key, only @@ -3165,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,  			continue;  		atomic_inc(&dest_se_tpg->tpg_pr_ref_count); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&dev->se_port_lock);  		if (core_scsi3_tpg_depend_item(dest_se_tpg)) {  			pr_err("core_scsi3_tpg_depend_item() failed"  				" for dest_se_tpg\n");  			atomic_dec(&dest_se_tpg->tpg_pr_ref_count); -			smp_mb__after_atomic_dec(); +			smp_mb__after_atomic();  			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  			goto out_put_pr_reg;  		} @@ -3270,7 +3273,7 @@ after_iport_check:  				initiator_str);  	if (dest_node_acl) {  		atomic_inc(&dest_node_acl->acl_pr_ref_count); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  	}  	spin_unlock_irq(&dest_se_tpg->acl_node_lock); @@ -3286,7 +3289,7 @@ after_iport_check:  		pr_err("core_scsi3_nodeacl_depend_item() for"  			" dest_node_acl\n");  		atomic_dec(&dest_node_acl->acl_pr_ref_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dest_node_acl = NULL;  		ret = TCM_INVALID_PARAMETER_LIST;  		goto out; @@ -3311,7 +3314,7 @@ after_iport_check:  	if (core_scsi3_lunacl_depend_item(dest_se_deve)) {  		pr_err("core_scsi3_lunacl_depend_item() failed\n");  		atomic_dec(&dest_se_deve->pr_ref_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dest_se_deve = NULL;  		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  		goto out; @@ -3877,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)  		add_desc_len = 0;  		atomic_inc(&pr_reg->pr_res_holders); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		spin_unlock(&pr_tmpl->registration_lock);  		/*  		 * Determine expected length of $FABRIC_MOD specific @@ -3891,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)  				" out of buffer: %d\n", cmd->data_length);  			spin_lock(&pr_tmpl->registration_lock);  			atomic_dec(&pr_reg->pr_res_holders); -			smp_mb__after_atomic_dec(); +			smp_mb__after_atomic();  			break;  		}  		/* @@ -3953,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)  		spin_lock(&pr_tmpl->registration_lock);  		atomic_dec(&pr_reg->pr_res_holders); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		/*  		 * Set the ADDITIONAL DESCRIPTOR LENGTH  		 */ diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index ed75cdd32cb..2ee2936fa0b 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -43,6 +43,11 @@  #define PR_APTPL_MAX_IPORT_LEN			256  #define PR_APTPL_MAX_TPORT_LEN			256 +/* + *  Function defined in target_core_spc.c + */ +void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); +  extern struct kmem_cache *t10_pr_reg_cache;  extern void core_pr_dump_initiator_port(struct t10_pr_registration *, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 551c96ca60a..94d00df28f3 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)  	 * pSCSI Host ID and enable for phba mode  	 */  	sh = scsi_host_lookup(phv->phv_host_id); -	if (IS_ERR(sh)) { +	if (!sh) {  		pr_err("pSCSI: Unable to locate SCSI Host for"  			" phv_host_id: %d\n", phv->phv_host_id); -		return PTR_ERR(sh); +		return -EINVAL;  	}  	phv->phv_lld_host = sh; @@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)  			sh = phv->phv_lld_host;  		} else {  			sh = scsi_host_lookup(pdv->pdv_host_id); -			if (IS_ERR(sh)) { +			if (!sh) {  				pr_err("pSCSI: Unable to locate"  					" pdv_host_id: %d\n", pdv->pdv_host_id); -				return PTR_ERR(sh); +				return -EINVAL;  			}  		}  	} else { @@ -1055,6 +1055,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)  			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  			goto fail;  		} + +		blk_rq_set_block_pc(req);  	} else {  		BUG_ON(!cmd->data_length); @@ -1071,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)  		}  	} -	req->cmd_type = REQ_TYPE_BLOCK_PC;  	req->end_io = pscsi_req_done;  	req->end_io_data = cmd;  	req->cmd_len = scsi_command_size(pt->pscsi_cdb); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 131327ac7f5..b920db3388c 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> -#include <linux/blkdev.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <scsi/scsi.h> @@ -79,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)  	hba->hba_ptr = NULL;  } -/*	rd_release_device_space(): - * - * - */ -static void rd_release_device_space(struct rd_dev *rd_dev) +static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, +				 u32 sg_table_count)  { -	u32 i, j, page_count = 0, sg_per_table; -	struct rd_dev_sg_table *sg_table;  	struct page *pg;  	struct scatterlist *sg; +	u32 i, j, page_count = 0, sg_per_table; -	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) -		return; - -	sg_table = rd_dev->sg_table_array; - -	for (i = 0; i < rd_dev->sg_table_count; i++) { +	for (i = 0; i < sg_table_count; i++) {  		sg = sg_table[i].sg_table;  		sg_per_table = sg_table[i].rd_sg_count; @@ -106,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)  				page_count++;  			}  		} -  		kfree(sg);  	} +	kfree(sg_table); +	return page_count; +} + +static void rd_release_device_space(struct rd_dev *rd_dev) +{ +	u32 page_count; + +	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) +		return; + +	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, +					  rd_dev->sg_table_count); +  	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"  		" Device ID: %u, pages %u in %u tables total bytes %lu\n",  		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,  		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); -	kfree(sg_table);  	rd_dev->sg_table_array = NULL;  	rd_dev->sg_table_count = 0;  } @@ -125,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)   *   *   */ -static int rd_build_device_space(struct rd_dev *rd_dev) +static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, +				 u32 total_sg_needed, unsigned char init_payload)  { -	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; +	u32 i = 0, j, page_offset = 0, sg_per_table;  	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /  				sizeof(struct scatterlist)); -	struct rd_dev_sg_table *sg_table;  	struct page *pg;  	struct scatterlist *sg; - -	if (rd_dev->rd_page_count <= 0) { -		pr_err("Illegal page count: %u for Ramdisk device\n", -			rd_dev->rd_page_count); -		return -EINVAL; -	} - -	/* Don't need backing pages for NULLIO */ -	if (rd_dev->rd_flags & RDF_NULLIO) -		return 0; - -	total_sg_needed = rd_dev->rd_page_count; - -	sg_tables = (total_sg_needed / max_sg_per_table) + 1; - -	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); -	if (!sg_table) { -		pr_err("Unable to allocate memory for Ramdisk" -			" scatterlist tables\n"); -		return -ENOMEM; -	} - -	rd_dev->sg_table_array = sg_table; -	rd_dev->sg_table_count = sg_tables; +	unsigned char *p;  	while (total_sg_needed) {  		sg_per_table = (total_sg_needed > max_sg_per_table) ? @@ -187,16 +166,119 @@ static int rd_build_device_space(struct rd_dev *rd_dev)  			}  			sg_assign_page(&sg[j], pg);  			sg[j].length = PAGE_SIZE; + +			p = kmap(pg); +			memset(p, init_payload, PAGE_SIZE); +			kunmap(pg);  		}  		page_offset += sg_per_table;  		total_sg_needed -= sg_per_table;  	} +	return 0; +} + +static int rd_build_device_space(struct rd_dev *rd_dev) +{ +	struct rd_dev_sg_table *sg_table; +	u32 sg_tables, total_sg_needed; +	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / +				sizeof(struct scatterlist)); +	int rc; + +	if (rd_dev->rd_page_count <= 0) { +		pr_err("Illegal page count: %u for Ramdisk device\n", +		       rd_dev->rd_page_count); +		return -EINVAL; +	} + +	/* Don't need backing pages for NULLIO */ +	if (rd_dev->rd_flags & RDF_NULLIO) +		return 0; + +	total_sg_needed = rd_dev->rd_page_count; + +	sg_tables = (total_sg_needed / max_sg_per_table) + 1; + +	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); +	if (!sg_table) { +		pr_err("Unable to allocate memory for Ramdisk" +		       " scatterlist tables\n"); +		return -ENOMEM; +	} + +	rd_dev->sg_table_array = sg_table; +	rd_dev->sg_table_count = sg_tables; + +	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); +	if (rc) +		return rc; +  	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" -		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, -		rd_dev->rd_dev_id, rd_dev->rd_page_count, -		rd_dev->sg_table_count); +		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, +		 rd_dev->rd_dev_id, rd_dev->rd_page_count, +		 rd_dev->sg_table_count); + +	return 0; +} + +static void rd_release_prot_space(struct rd_dev *rd_dev) +{ +	u32 page_count; + +	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) +		return; + +	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, +					  rd_dev->sg_prot_count); + +	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" +		 " Device ID: %u, pages %u in %u tables total bytes %lu\n", +		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, +		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + +	rd_dev->sg_prot_array = NULL; +	rd_dev->sg_prot_count = 0; +} + +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) +{ +	struct rd_dev_sg_table *sg_table; +	u32 total_sg_needed, sg_tables; +	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / +				sizeof(struct scatterlist)); +	int rc; + +	if (rd_dev->rd_flags & RDF_NULLIO) +		return 0; +	/* +	 * prot_length=8byte dif data +	 * tot sg needed = rd_page_count * (PGSZ/block_size) * +	 * 		   (prot_length/block_size) + pad +	 * PGSZ canceled each other. +	 */ +	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; + +	sg_tables = (total_sg_needed / max_sg_per_table) + 1; + +	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); +	if (!sg_table) { +		pr_err("Unable to allocate memory for Ramdisk protection" +		       " scatterlist tables\n"); +		return -ENOMEM; +	} + +	rd_dev->sg_prot_array = sg_table; +	rd_dev->sg_prot_count = sg_tables; + +	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); +	if (rc) +		return rc; + +	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" +		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, +		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);  	return 0;  } @@ -279,6 +361,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)  	return NULL;  } +static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) +{ +	struct rd_dev_sg_table *sg_table; +	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / +				sizeof(struct scatterlist)); + +	i = page / sg_per_table; +	if (i < rd_dev->sg_prot_count) { +		sg_table = &rd_dev->sg_prot_array[i]; +		if ((sg_table->page_start_offset <= page) && +		     (sg_table->page_end_offset >= page)) +			return sg_table; +	} + +	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", +			page); + +	return NULL; +} +  static sense_reason_t  rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	      enum dma_data_direction data_direction) @@ -293,6 +395,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	u32 rd_page;  	u32 src_len;  	u64 tmp; +	sense_reason_t rc;  	if (dev->rd_flags & RDF_NULLIO) {  		target_complete_cmd(cmd, SAM_STAT_GOOD); @@ -315,6 +418,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",  			cmd->t_task_lba, rd_size, rd_page, rd_offset); +	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { +		struct rd_dev_sg_table *prot_table; +		struct scatterlist *prot_sg; +		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; +		u32 prot_offset, prot_page; + +		tmp = cmd->t_task_lba * se_dev->prot_length; +		prot_offset = do_div(tmp, PAGE_SIZE); +		prot_page = tmp; + +		prot_table = rd_get_prot_table(dev, prot_page); +		if (!prot_table) +			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + +		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, +					  prot_sg, prot_offset); +		if (rc) +			return rc; +	} +  	src_len = PAGE_SIZE - rd_offset;  	sg_miter_start(&m, sgl, sgl_nents,  			data_direction == DMA_FROM_DEVICE ? @@ -376,6 +501,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,  	}  	sg_miter_stop(&m); +	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { +		struct rd_dev_sg_table *prot_table; +		struct scatterlist *prot_sg; +		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; +		u32 prot_offset, prot_page; + +		tmp = cmd->t_task_lba * se_dev->prot_length; +		prot_offset = do_div(tmp, PAGE_SIZE); +		prot_page = tmp; + +		prot_table = rd_get_prot_table(dev, prot_page); +		if (!prot_table) +			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; + +		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, +					 prot_sg, prot_offset); +		if (rc) +			return rc; +	} +  	target_complete_cmd(cmd, SAM_STAT_GOOD);  	return 0;  } @@ -457,6 +604,24 @@ static sector_t rd_get_blocks(struct se_device *dev)  	return blocks_long;  } +static int rd_init_prot(struct se_device *dev) +{ +	struct rd_dev *rd_dev = RD_DEV(dev); + +        if (!dev->dev_attrib.pi_prot_type) +		return 0; + +	return rd_build_prot_space(rd_dev, dev->prot_length, +				   dev->dev_attrib.block_size); +} + +static void rd_free_prot(struct se_device *dev) +{ +	struct rd_dev *rd_dev = RD_DEV(dev); + +	rd_release_prot_space(rd_dev); +} +  static struct sbc_ops rd_sbc_ops = {  	.execute_rw		= rd_execute_rw,  }; @@ -482,6 +647,8 @@ static struct se_subsystem_api rd_mcp_template = {  	.show_configfs_dev_params = rd_show_configfs_dev_params,  	.get_device_type	= sbc_get_device_type,  	.get_blocks		= rd_get_blocks, +	.init_prot		= rd_init_prot, +	.free_prot		= rd_free_prot,  };  int __init rd_module_init(void) diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 1789d1e1439..cc46a6a89b3 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -33,8 +33,12 @@ struct rd_dev {  	u32		rd_page_count;  	/* Number of SG tables in sg_table_array */  	u32		sg_table_count; +	/* Number of SG tables in sg_prot_array */ +	u32		sg_prot_count;  	/* Array of rd_dev_sg_table_t containing scatterlists */  	struct rd_dev_sg_table *sg_table_array; +	/* Array of rd_dev_sg_table containing protection scatterlists */ +	struct rd_dev_sg_table *sg_prot_array;  	/* Ramdisk HBA device is connected to */  	struct rd_host *rd_host;  } ____cacheline_aligned; diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6c17295e8d7..bd78d9235ac 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -23,6 +23,7 @@  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/ratelimit.h> +#include <linux/crc-t10dif.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h>  #include <scsi/scsi_tcq.h> @@ -33,7 +34,7 @@  #include "target_core_internal.h"  #include "target_core_ua.h" - +#include "target_core_alua.h"  static sense_reason_t  sbc_emulate_readcapacity(struct se_cmd *cmd) @@ -80,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)  		transport_kunmap_data_sg(cmd);  	} -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, 8);  	return 0;  } @@ -88,6 +89,7 @@ static sense_reason_t  sbc_emulate_readcapacity_16(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; +	struct se_session *sess = cmd->se_sess;  	unsigned char *rbuf;  	unsigned char buf[32];  	unsigned long long blocks = dev->transport->get_blocks(dev); @@ -106,11 +108,28 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;  	buf[11] = dev->dev_attrib.block_size & 0xff;  	/* +	 * Set P_TYPE and PROT_EN bits for DIF support +	 */ +	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { +		if (dev->dev_attrib.pi_prot_type) +			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; +	} + +	if (dev->transport->get_lbppbe) +		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + +	if (dev->transport->get_alignment_offset_lbas) { +		u16 lalba = dev->transport->get_alignment_offset_lbas(dev); +		buf[14] = (lalba >> 8) & 0x3f; +		buf[15] = lalba & 0xff; +	} + +	/*  	 * Set Thin Provisioning Enable bit following sbc3r22 in section  	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.  	 */  	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) -		buf[14] = 0x80; +		buf[14] |= 0x80;  	rbuf = transport_kmap_data_sg(cmd);  	if (rbuf) { @@ -118,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  		transport_kunmap_data_sg(cmd);  	} -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, 32);  	return 0;  } @@ -157,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)  	return cmd->se_dev->dev_attrib.block_size * sectors;  } -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ -	struct se_device *dev = cmd->se_dev; -	unsigned long long end_lba; -	u32 sectors; - -	sectors = cmd->data_length / dev->dev_attrib.block_size; -	end_lba = dev->transport->get_blocks(dev) + 1; - -	if (cmd->t_task_lba + sectors > end_lba) { -		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", -			cmd->t_task_lba, sectors, end_lba); -		return -EINVAL; -	} - -	return 0; -} -  static inline u32 transport_get_sectors_6(unsigned char *cdb)  {  	/* @@ -263,6 +264,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o  			sectors, cmd->se_dev->dev_attrib.max_write_same_len);  		return TCM_INVALID_CDB_FIELD;  	} +	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ +	if (flags[0] & 0x10) { +		pr_warn("WRITE SAME with ANCHOR not supported\n"); +		return TCM_INVALID_CDB_FIELD; +	}  	/*  	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting  	 * translated into block discard requests within backend code. @@ -349,7 +355,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; -	cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; +	/* +	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through +	 * within target_complete_ok_work() if the command was successfully +	 * sent to the backend driver. +	 */ +	spin_lock_irq(&cmd->t_state_lock); +	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) +		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; +	spin_unlock_irq(&cmd->t_state_lock); +  	/*  	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()  	 * before the original READ I/O submission. @@ -363,7 +378,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev;  	struct scatterlist *write_sg = NULL, *sg; -	unsigned char *buf, *addr; +	unsigned char *buf = NULL, *addr;  	struct sg_mapping_iter m;  	unsigned int offset = 0, len;  	unsigned int nlbas = cmd->t_task_nolb; @@ -378,6 +393,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  	 */  	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)  		return TCM_NO_SENSE; +	/* +	 * Immediately exit + release dev->caw_sem if command has already +	 * been failed with a non-zero SCSI status. +	 */ +	if (cmd->scsi_status) { +		pr_err("compare_and_write_callback: non zero scsi_status:" +			" 0x%02x\n", cmd->scsi_status); +		goto out; +	}  	buf = kzalloc(cmd->data_length, GFP_KERNEL);  	if (!buf) { @@ -386,13 +410,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)  		goto out;  	} -	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, +	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,  			   GFP_KERNEL);  	if (!write_sg) {  		pr_err("Unable to allocate compare_and_write sg\n");  		ret = TCM_OUT_OF_RESOURCES;  		goto out;  	} +	sg_init_table(write_sg, cmd->t_data_nents);  	/*  	 * Setup verify and write data payloads from total NumberLBAs.  	 */ @@ -508,6 +533,12 @@ sbc_compare_and_write(struct se_cmd *cmd)  		cmd->transport_complete_callback = NULL;  		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  	} +	/* +	 * Reset cmd->data_length to individual block_size in order to not +	 * confuse backend drivers that depend on this value matching the +	 * size of the I/O being submitted. +	 */ +	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;  	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,  			      DMA_FROM_DEVICE); @@ -524,6 +555,116 @@ sbc_compare_and_write(struct se_cmd *cmd)  	return TCM_NO_SENSE;  } +static int +sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, +		       bool is_write, struct se_cmd *cmd) +{ +	if (is_write) { +		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : +					 TARGET_PROT_DOUT_INSERT; +		switch (protect) { +		case 0x0: +		case 0x3: +			cmd->prot_checks = 0; +			break; +		case 0x1: +		case 0x5: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x2: +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x4: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			break; +		default: +			pr_err("Unsupported protect field %d\n", protect); +			return -EINVAL; +		} +	} else { +		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : +					 TARGET_PROT_DIN_STRIP; +		switch (protect) { +		case 0x0: +		case 0x1: +		case 0x5: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x2: +			if (prot_type == TARGET_DIF_TYPE1_PROT) +				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; +			break; +		case 0x3: +			cmd->prot_checks = 0; +			break; +		case 0x4: +			cmd->prot_checks = TARGET_DIF_CHECK_GUARD; +			break; +		default: +			pr_err("Unsupported protect field %d\n", protect); +			return -EINVAL; +		} +	} + +	return 0; +} + +static bool +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, +	       u32 sectors, bool is_write) +{ +	u8 protect = cdb[1] >> 5; + +	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) +		return true; + +	switch (dev->dev_attrib.pi_prot_type) { +	case TARGET_DIF_TYPE3_PROT: +		cmd->reftag_seed = 0xffffffff; +		break; +	case TARGET_DIF_TYPE2_PROT: +		if (protect) +			return false; + +		cmd->reftag_seed = cmd->t_task_lba; +		break; +	case TARGET_DIF_TYPE1_PROT: +		cmd->reftag_seed = cmd->t_task_lba; +		break; +	case TARGET_DIF_TYPE0_PROT: +	default: +		return true; +	} + +	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, +				   is_write, cmd)) +		return false; + +	cmd->prot_type = dev->dev_attrib.pi_prot_type; +	cmd->prot_length = dev->prot_length * sectors; + +	/** +	 * In case protection information exists over the wire +	 * we modify command data length to describe pure data. +	 * The actual transfer length is data length + protection +	 * length +	 **/ +	if (protect) +		cmd->data_length = sectors * dev->dev_attrib.block_size; + +	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " +		 "prot_op=%d prot_checks=%d\n", +		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, +		 cmd->prot_op, cmd->prot_checks); + +	return true; +} +  sense_reason_t  sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  { @@ -544,6 +685,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_10:  		sectors = transport_get_sectors_10(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -551,6 +696,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_12:  		sectors = transport_get_sectors_12(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -558,6 +707,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case READ_16:  		sectors = transport_get_sectors_16(cdb);  		cmd->t_task_lba = transport_lba_64(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;  		cmd->execute_rw = ops->execute_rw;  		cmd->execute_cmd = sbc_execute_rw; @@ -573,6 +726,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_VERIFY:  		sectors = transport_get_sectors_10(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -582,6 +739,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_12:  		sectors = transport_get_sectors_12(cdb);  		cmd->t_task_lba = transport_lba_32(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -591,6 +752,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  	case WRITE_16:  		sectors = transport_get_sectors_16(cdb);  		cmd->t_task_lba = transport_lba_64(cdb); + +		if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) +			return TCM_UNSUPPORTED_SCSI_OPCODE; +  		if (cdb[1] & 0x8)  			cmd->se_cmd_flags |= SCF_FUA;  		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -692,6 +857,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		case SAI_READ_CAPACITY_16:  			cmd->execute_cmd = sbc_emulate_readcapacity_16;  			break; +		case SAI_REPORT_REFERRALS: +			cmd->execute_cmd = target_emulate_report_referrals; +			break;  		default:  			pr_err("Unsupported SA: 0x%02x\n",  				cmd->t_task_cdb[1] & 0x1f); @@ -702,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		break;  	case SYNCHRONIZE_CACHE:  	case SYNCHRONIZE_CACHE_16: -		if (!ops->execute_sync_cache) { -			size = 0; -			cmd->execute_cmd = sbc_emulate_noop; -			break; -		} - -		/* -		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE -		 */  		if (cdb[0] == SYNCHRONIZE_CACHE) {  			sectors = transport_get_sectors_10(cdb);  			cmd->t_task_lba = transport_lba_32(cdb); @@ -718,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  			sectors = transport_get_sectors_16(cdb);  			cmd->t_task_lba = transport_lba_64(cdb);  		} - -		size = sbc_get_size(cmd, sectors); - -		/* -		 * Check to ensure that LBA + Range does not exceed past end of -		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls -		 */ -		if (cmd->t_task_lba || sectors) { -			if (sbc_check_valid_sectors(cmd) < 0) -				return TCM_ADDRESS_OUT_OF_RANGE; +		if (ops->execute_sync_cache) { +			cmd->execute_cmd = ops->execute_sync_cache; +			goto check_lba;  		} -		cmd->execute_cmd = ops->execute_sync_cache; +		size = 0; +		cmd->execute_cmd = sbc_emulate_noop;  		break;  	case UNMAP:  		if (!ops->execute_unmap) @@ -772,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  		break;  	case VERIFY:  		size = 0; +		sectors = transport_get_sectors_10(cdb); +		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->execute_cmd = sbc_emulate_noop; -		break; +		goto check_lba;  	case REZERO_UNIT:  	case SEEK_6:  	case SEEK_10: @@ -813,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)  				dev->dev_attrib.hw_max_sectors);  			return TCM_INVALID_CDB_FIELD;  		} - +check_lba:  		end_lba = dev->transport->get_blocks(dev) + 1;  		if (cmd->t_task_lba + sectors > end_lba) {  			pr_err("cmd exceeds last lba %llu " @@ -920,3 +1075,262 @@ err:  	return ret;  }  EXPORT_SYMBOL(sbc_execute_unmap); + +void +sbc_dif_generate(struct se_cmd *cmd) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = cmd->t_prot_sg; +	sector_t sector = cmd->t_task_lba; +	void *daddr, *paddr; +	int i, j, offset = 0; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; +			sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, +						dev->dev_attrib.block_size)); +			if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) +				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); +			sdt->app_tag = 0; + +			pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			sector++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} +} + +static sense_reason_t +sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, +		  const void *p, sector_t sector, unsigned int ei_lba) +{ +	int block_size = dev->dev_attrib.block_size; +	__be16 csum; + +	csum = cpu_to_be16(crc_t10dif(p, block_size)); + +	if (sdt->guard_tag != csum) { +		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" +			" csum 0x%04x\n", (unsigned long long)sector, +			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); +		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; +	} + +	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && +	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { +		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" +		       " sector MSB: 0x%08x\n", (unsigned long long)sector, +		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); +		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; +	} + +	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && +	    be32_to_cpu(sdt->ref_tag) != ei_lba) { +		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" +		       " ei_lba: 0x%08x\n", (unsigned long long)sector, +			be32_to_cpu(sdt->ref_tag), ei_lba); +		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; +	} + +	return 0; +} + +static void +sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, +		  struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct scatterlist *psg; +	void *paddr, *addr; +	unsigned int i, len, left; +	unsigned int offset = sg_off; + +	left = sectors * dev->prot_length; + +	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { +		unsigned int psg_len, copied = 0; + +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; +		psg_len = min(left, psg->length); +		while (psg_len) { +			len = min(psg_len, sg->length - offset); +			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + +			if (read) +				memcpy(paddr + copied, addr, len); +			else +				memcpy(addr, paddr + copied, len); + +			left -= len; +			offset += len; +			copied += len; +			psg_len -= len; + +			if (offset >= sg->length) { +				sg = sg_next(sg); +				offset = 0; +			} +			kunmap_atomic(addr); +		} +		kunmap_atomic(paddr); +	} +} + +sense_reason_t +sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		     unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = cmd->t_prot_sg; +	sector_t sector = start; +	void *daddr, *paddr; +	int i, j, offset = 0; +	sense_reason_t rc; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + psg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; + +			pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, +					       ei_lba); +			if (rc) { +				kunmap_atomic(paddr); +				kunmap_atomic(daddr); +				cmd->bad_sector = sector; +				return rc; +			} + +			sector++; +			ei_lba++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} +	sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); + +	return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_write); + +static sense_reason_t +__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		      unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	struct se_device *dev = cmd->se_dev; +	struct se_dif_v1_tuple *sdt; +	struct scatterlist *dsg, *psg = sg; +	sector_t sector = start; +	void *daddr, *paddr; +	int i, j, offset = sg_off; +	sense_reason_t rc; + +	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { +		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; +		paddr = kmap_atomic(sg_page(psg)) + sg->offset; + +		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { + +			if (offset >= psg->length) { +				kunmap_atomic(paddr); +				psg = sg_next(psg); +				paddr = kmap_atomic(sg_page(psg)) + psg->offset; +				offset = 0; +			} + +			sdt = paddr + offset; + +			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" +				 " app_tag: 0x%04x ref_tag: %u\n", +				 (unsigned long long)sector, sdt->guard_tag, +				 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + +			if (sdt->app_tag == cpu_to_be16(0xffff)) { +				sector++; +				offset += sizeof(struct se_dif_v1_tuple); +				continue; +			} + +			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, +					       ei_lba); +			if (rc) { +				kunmap_atomic(paddr); +				kunmap_atomic(daddr); +				cmd->bad_sector = sector; +				return rc; +			} + +			sector++; +			ei_lba++; +			offset += sizeof(struct se_dif_v1_tuple); +		} + +		kunmap_atomic(paddr); +		kunmap_atomic(daddr); +	} + +	return 0; +} + +sense_reason_t +sbc_dif_read_strip(struct se_cmd *cmd) +{ +	struct se_device *dev = cmd->se_dev; +	u32 sectors = cmd->prot_length / dev->prot_length; + +	return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, +				     cmd->t_prot_sg, 0); +} + +sense_reason_t +sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, +		    unsigned int ei_lba, struct scatterlist *sg, int sg_off) +{ +	sense_reason_t rc; + +	rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); +	if (rc) +		return rc; + +	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); +	return 0; +} +EXPORT_SYMBOL(sbc_dif_verify_read); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 074539558a5..6cd7222738f 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)  	buf[5]	= 0x80;  	/* -	 * Set TPGS field for explict and/or implict ALUA access type +	 * Set TPGS field for explicit and/or implicit ALUA access type  	 * and opteration.  	 *  	 * See spc4r17 section 6.4.2 Table 135 @@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)  {  	struct se_lun *lun = cmd->se_lun;  	struct se_device *dev = cmd->se_dev; +	struct se_session *sess = cmd->se_sess;  	/* Set RMB (removable media) for tape devices */  	if (dev->transport->get_device_type(dev) == TYPE_TAPE) @@ -100,6 +101,14 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)  	 */  	if (dev->dev_attrib.emulate_3pc)  		buf[5] |= 0x8; +	/* +	 * Set Protection (PROTECT) bit when DIF has been enabled on the +	 * device, and the transport supports VERIFY + PASS. +	 */ +	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { +		if (dev->dev_attrib.pi_prot_type) +			buf[5] |= 0x1; +	}  	buf[7] = 0x2; /* CmdQue=1 */ @@ -120,15 +129,10 @@ static sense_reason_t  spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)  {  	struct se_device *dev = cmd->se_dev; -	u16 len = 0; +	u16 len;  	if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { -		u32 unit_serial_len; - -		unit_serial_len = strlen(dev->t10_wwn.unit_serial); -		unit_serial_len++; /* For NULL Terminator */ - -		len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); +		len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);  		len++; /* Extra Byte for NULL Terminator */  		buf[3] = len;  	} @@ -267,7 +271,7 @@ check_t10_vend_desc:  	port = lun->lun_sep;  	if (port) {  		struct t10_alua_lu_gp *lu_gp; -		u32 padding, scsi_name_len; +		u32 padding, scsi_name_len, scsi_target_len;  		u16 lu_gp_id = 0;  		u16 tg_pt_gp_id = 0;  		u16 tpgt; @@ -365,16 +369,6 @@ check_lu_gp:  		 * section 7.5.1 Table 362  		 */  check_scsi_name: -		scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); -		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ -		scsi_name_len += 10; -		/* Check for 4-byte padding */ -		padding = ((-scsi_name_len) & 3); -		if (padding != 0) -			scsi_name_len += padding; -		/* Header size + Designation descriptor */ -		scsi_name_len += 4; -  		buf[off] =  			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);  		buf[off++] |= 0x3; /* CODE SET == UTF-8 */ @@ -402,13 +396,57 @@ check_scsi_name:  		 * shall be no larger than 256 and shall be a multiple  		 * of four.  		 */ +		padding = ((-scsi_name_len) & 3);  		if (padding)  			scsi_name_len += padding; +		if (scsi_name_len > 256) +			scsi_name_len = 256;  		buf[off-1] = scsi_name_len;  		off += scsi_name_len;  		/* Header size + Designation descriptor */  		len += (scsi_name_len + 4); + +		/* +		 * Target device designator +		 */ +		buf[off] = +			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); +		buf[off++] |= 0x3; /* CODE SET == UTF-8 */ +		buf[off] = 0x80; /* Set PIV=1 */ +		/* Set ASSOCIATION == target device: 10b */ +		buf[off] |= 0x20; +		/* DESIGNATOR TYPE == SCSI name string */ +		buf[off++] |= 0x8; +		off += 2; /* Skip over Reserved and length */ +		/* +		 * SCSI name string identifer containing, $FABRIC_MOD +		 * dependent information.  For LIO-Target and iSCSI +		 * Target Port, this means "<iSCSI name>" in +		 * UTF-8 encoding. +		 */ +		scsi_target_len = sprintf(&buf[off], "%s", +					  tpg->se_tpg_tfo->tpg_get_wwn(tpg)); +		scsi_target_len += 1 /* Include  NULL terminator */; +		/* +		 * The null-terminated, null-padded (see 4.4.2) SCSI +		 * NAME STRING field contains a UTF-8 format string. +		 * The number of bytes in the SCSI NAME STRING field +		 * (i.e., the value in the DESIGNATOR LENGTH field) +		 * shall be no larger than 256 and shall be a multiple +		 * of four. +		 */ +		padding = ((-scsi_target_len) & 3); +		if (padding) +			scsi_target_len += padding; +		if (scsi_target_len > 256) +			scsi_target_len = 256; + +		buf[off-1] = scsi_target_len; +		off += scsi_target_len; + +		/* Header size + Designation descriptor */ +		len += (scsi_target_len + 4);  	}  	buf[2] = ((len >> 8) & 0xff);  	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ @@ -434,14 +472,31 @@ static sense_reason_t  spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)  {  	struct se_device *dev = cmd->se_dev; +	struct se_session *sess = cmd->se_sess;  	buf[3] = 0x3c; +	/* +	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK +	 * only for TYPE3 protection. +	 */ +	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { +		if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) +			buf[4] = 0x5; +		else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) +			buf[4] = 0x4; +	} +  	/* Set HEADSUP, ORDSUP, SIMPSUP */  	buf[5] = 0x07;  	/* If WriteCache emulation is enabled, set V_SUP */  	if (spc_check_dev_wce(dev))  		buf[6] = 0x01; +	/* If an LBA map is present set R_SUP */ +	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); +	if (!list_empty(&dev->t10_alua.lba_map_list)) +		buf[8] = 0x10; +	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);  	return 0;  } @@ -452,6 +507,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)  	struct se_device *dev = cmd->se_dev;  	u32 max_sectors;  	int have_tp = 0; +	int opt, min;  	/*  	 * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -475,7 +531,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)  	/*  	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY  	 */ -	put_unaligned_be16(1, &buf[6]); +	if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) +		put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); +	else +		put_unaligned_be16(1, &buf[6]);  	/*  	 * Set MAXIMUM TRANSFER LENGTH @@ -487,7 +546,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)  	/*  	 * Set OPTIMAL TRANSFER LENGTH  	 */ -	put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); +	if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) +		put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); +	else +		put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);  	/*  	 * Exit now if we don't support TP. @@ -593,6 +655,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)  	return 0;  } +/* Referrals VPD page */ +static sense_reason_t +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) +{ +	struct se_device *dev = cmd->se_dev; + +	buf[0] = dev->transport->get_device_type(dev); +	buf[3] = 0x0c; +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); + +	return 0; +} +  static sense_reason_t  spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); @@ -607,6 +683,7 @@ static struct {  	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },  	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },  	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, +	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },  };  /* supported vital product data pages */ @@ -636,11 +713,16 @@ spc_emulate_inquiry(struct se_cmd *cmd)  	struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;  	unsigned char *rbuf;  	unsigned char *cdb = cmd->t_task_cdb; -	unsigned char buf[SE_INQUIRY_BUF]; +	unsigned char *buf;  	sense_reason_t ret;  	int p; +	int len = 0; -	memset(buf, 0, SE_INQUIRY_BUF); +	buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); +	if (!buf) { +		pr_err("Unable to allocate response buffer for INQUIRY\n"); +		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +	}  	if (dev == tpg->tpg_virt_lun0.lun_se_dev)  		buf[0] = 0x3f; /* Not connected */ @@ -656,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)  		}  		ret = spc_emulate_inquiry_std(cmd, buf); +		len = buf[4] + 5;  		goto out;  	} @@ -663,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)  		if (cdb[2] == evpd_handlers[p].page) {  			buf[1] = cdb[2];  			ret = evpd_handlers[p].emulate(cmd, buf); +			len = get_unaligned_be16(&buf[2]) + 4;  			goto out;  		}  	} @@ -673,16 +757,17 @@ spc_emulate_inquiry(struct se_cmd *cmd)  out:  	rbuf = transport_kmap_data_sg(cmd);  	if (rbuf) { -		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));  		transport_kunmap_data_sg(cmd);  	} +	kfree(buf);  	if (!ret) -		target_complete_cmd(cmd, GOOD); +		target_complete_cmd_with_length(cmd, GOOD, len);  	return ret;  } -static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)  {  	p[0] = 0x01;  	p[1] = 0x0a; @@ -695,8 +780,11 @@ out:  	return 12;  } -static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)  { +	struct se_device *dev = cmd->se_dev; +	struct se_session *sess = cmd->se_sess; +  	p[0] = 0x0a;  	p[1] = 0x0a; @@ -778,6 +866,21 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)  	 * status (see SAM-4).  	 */  	p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; +	/* +	 * From spc4r30, section 7.5.7 Control mode page +	 * +	 * Application Tag Owner (ATO) bit set to one. +	 * +	 * If the ATO bit is set to one the device server shall not modify the +	 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection +	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE +	 * TAG field. +	 */ +	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { +		if (dev->dev_attrib.pi_prot_type) +			p[5] |= 0x80; +	} +  	p[8] = 0xff;  	p[9] = 0xff;  	p[11] = 30; @@ -786,8 +889,10 @@ out:  	return 12;  } -static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) +static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)  { +	struct se_device *dev = cmd->se_dev; +  	p[0] = 0x08;  	p[1] = 0x12; @@ -803,7 +908,7 @@ out:  	return 20;  } -static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) +static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)  {  	p[0] = 0x1c;  	p[1] = 0x0a; @@ -819,7 +924,7 @@ out:  static struct {  	uint8_t		page;  	uint8_t		subpage; -	int		(*emulate)(struct se_device *, u8, unsigned char *); +	int		(*emulate)(struct se_cmd *, u8, unsigned char *);  } modesense_handlers[] = {  	{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },  	{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, @@ -957,7 +1062,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  			 * the only two possibilities).  			 */  			if ((modesense_handlers[i].subpage & ~subpage) == 0) { -				ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); +				ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);  				if (!ten && length + ret >= 255)  					break;  				length += ret; @@ -970,7 +1075,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  	for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)  		if (modesense_handlers[i].page == page &&  		    modesense_handlers[i].subpage == subpage) { -			length += modesense_handlers[i].emulate(dev, pc, &buf[length]); +			length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);  			goto set_length;  		} @@ -996,13 +1101,12 @@ set_length:  		transport_kunmap_data_sg(cmd);  	} -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, length);  	return 0;  }  static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)  { -	struct se_device *dev = cmd->se_dev;  	char *cdb = cmd->t_task_cdb;  	bool ten = cdb[0] == MODE_SELECT_10;  	int off = ten ? 8 : 4; @@ -1038,7 +1142,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)  		if (modesense_handlers[i].page == page &&  		    modesense_handlers[i].subpage == subpage) {  			memset(tbuf, 0, SE_MODE_PAGE_BUF); -			length = modesense_handlers[i].emulate(dev, 0, tbuf); +			length = modesense_handlers[i].emulate(cmd, 0, tbuf);  			goto check_contents;  		} @@ -1173,7 +1277,7 @@ done:  	buf[3] = (lun_count & 0xff);  	transport_kunmap_data_sg(cmd); -	target_complete_cmd(cmd, GOOD); +	target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);  	return 0;  }  EXPORT_SYMBOL(spc_emulate_report_luns); @@ -1250,7 +1354,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)  		*size = (cdb[3] << 8) + cdb[4];  		/* -		 * Do implict HEAD_OF_QUEUE processing for INQUIRY. +		 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.  		 * See spc4r17 section 5.3  		 */  		cmd->sam_task_attr = MSG_HEAD_TAG; @@ -1284,7 +1388,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)  		cmd->execute_cmd = spc_emulate_report_luns;  		*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];  		/* -		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS +		 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS  		 * See spc4r17 section 5.3  		 */  		cmd->sam_task_attr = MSG_HEAD_TAG; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 9c642e02cba..03538994d2f 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@  #include <linux/utsname.h>  #include <linux/proc_fs.h>  #include <linux/seq_file.h> -#include <linux/blkdev.h>  #include <linux/configfs.h>  #include <scsi/scsi.h>  #include <scsi/scsi_device.h> @@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(  	struct se_device *dev =  		container_of(sgrps, struct se_device, dev_stat_grps); -	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); +	return snprintf(page, PAGE_SIZE, "%lu\n", +			atomic_long_read(&dev->num_resets));  }  DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); @@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(  		container_of(sgrps, struct se_device, dev_stat_grps);  	/* scsiLuNumCommands */ -	return snprintf(page, PAGE_SIZE, "%llu\n", -			(unsigned long long)dev->num_cmds); +	return snprintf(page, PAGE_SIZE, "%lu\n", +			atomic_long_read(&dev->num_cmds));  }  DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); @@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(  		container_of(sgrps, struct se_device, dev_stat_grps);  	/* scsiLuReadMegaBytes */ -	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); +	return snprintf(page, PAGE_SIZE, "%lu\n", +			atomic_long_read(&dev->read_bytes) >> 20);  }  DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); @@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(  		container_of(sgrps, struct se_device, dev_stat_grps);  	/* scsiLuWrittenMegaBytes */ -	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); +	return snprintf(page, PAGE_SIZE, "%lu\n", +			atomic_long_read(&dev->write_bytes) >> 20);  }  DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); @@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(  		container_of(sgrps, struct se_device, dev_stat_grps);  	/* scsiLuInResets */ -	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); +	return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));  }  DEV_STAT_SCSI_LU_ATTR_RO(resets); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 250009909d4..f7cd95e8111 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(  	struct se_cmd *cmd,  	int tas)  { +	bool remove = true;  	/*  	 * TASK ABORTED status (TAS) bit support  	*/  	if ((tmr_nacl && -	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) +	     (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { +		remove = false;  		transport_send_task_abort(cmd); +	} -	transport_cmd_finish_abort(cmd, 0); +	transport_cmd_finish_abort(cmd, remove);  }  static int target_check_cdb_and_preempt(struct list_head *list, @@ -127,6 +130,11 @@ void core_tmr_abort_task(  		if (dev != se_cmd->se_dev)  			continue; + +		/* skip se_cmd associated with tmr */ +		if (tmr->task_cmd == se_cmd) +			continue; +  		ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);  		if (tmr->ref_task_tag != ref_tag)  			continue; @@ -150,18 +158,9 @@ void core_tmr_abort_task(  		cancel_work_sync(&se_cmd->work);  		transport_wait_for_tasks(se_cmd); -		/* -		 * Now send SAM_STAT_TASK_ABORTED status for the referenced -		 * se_cmd descriptor.. -		 */ -		transport_send_task_abort(se_cmd); -		/* -		 * Also deal with possible extra acknowledge reference.. -		 */ -		if (se_cmd->se_cmd_flags & SCF_ACK_KREF) -			target_put_sess_cmd(se_sess, se_cmd);  		target_put_sess_cmd(se_sess, se_cmd); +		transport_cmd_finish_abort(se_cmd, true);  		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"  				" ref_tag: %d\n", ref_tag); @@ -386,9 +385,7 @@ int core_tmr_lun_reset(  		pr_debug("LUN_RESET: SCSI-2 Released reservation\n");  	} -	spin_lock_irq(&dev->stats_lock); -	dev->num_resets++; -	spin_unlock_irq(&dev->stats_lock); +	atomic_long_inc(&dev->num_resets);  	pr_debug("LUN_RESET: %s for [%s] Complete\n",  			(preempt_and_abort_list) ? "Preempt" : "TMR", diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index b9a6ec0aa5f..c036595b17c 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(  	return acl;  } +EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);  /*	core_tpg_add_node_to_devs():   * @@ -277,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(  	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);  	acl->se_tpg = tpg;  	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); -	spin_lock_init(&acl->stats_lock);  	acl->dynamic_node_acl = 1;  	tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -405,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);  	acl->se_tpg = tpg;  	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); -	spin_lock_init(&acl->stats_lock);  	tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -633,6 +632,13 @@ int core_tpg_set_initiator_node_tag(  }  EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); +static void core_tpg_lun_ref_release(struct percpu_ref *ref) +{ +	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + +	complete(&lun->lun_ref_comp); +} +  static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)  {  	/* Set in core_dev_setup_virtual_lun0() */ @@ -646,12 +652,11 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)  	atomic_set(&lun->lun_acl_count, 0);  	init_completion(&lun->lun_shutdown_comp);  	INIT_LIST_HEAD(&lun->lun_acl_list); -	INIT_LIST_HEAD(&lun->lun_cmd_list);  	spin_lock_init(&lun->lun_acl_lock); -	spin_lock_init(&lun->lun_cmd_lock);  	spin_lock_init(&lun->lun_sep_lock); +	init_completion(&lun->lun_ref_comp); -	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); +	ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);  	if (ret < 0)  		return ret; @@ -691,10 +696,9 @@ int core_tpg_register(  		atomic_set(&lun->lun_acl_count, 0);  		init_completion(&lun->lun_shutdown_comp);  		INIT_LIST_HEAD(&lun->lun_acl_list); -		INIT_LIST_HEAD(&lun->lun_cmd_list);  		spin_lock_init(&lun->lun_acl_lock); -		spin_lock_init(&lun->lun_cmd_lock);  		spin_lock_init(&lun->lun_sep_lock); +		init_completion(&lun->lun_ref_comp);  	}  	se_tpg->se_tpg_type = se_tpg_type; @@ -777,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)  }  EXPORT_SYMBOL(core_tpg_deregister); -struct se_lun *core_tpg_pre_addlun( +struct se_lun *core_tpg_alloc_lun(  	struct se_portal_group *tpg,  	u32 unpacked_lun)  { @@ -807,18 +811,24 @@ struct se_lun *core_tpg_pre_addlun(  	return lun;  } -int core_tpg_post_addlun( +int core_tpg_add_lun(  	struct se_portal_group *tpg,  	struct se_lun *lun,  	u32 lun_access, -	void *lun_ptr) +	struct se_device *dev)  {  	int ret; -	ret = core_dev_export(lun_ptr, tpg, lun); +	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);  	if (ret < 0)  		return ret; +	ret = core_dev_export(dev, tpg, lun); +	if (ret < 0) { +		percpu_ref_cancel_init(&lun->lun_ref); +		return ret; +	} +  	spin_lock(&tpg->tpg_lun_lock);  	lun->lun_access = lun_access;  	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; @@ -827,14 +837,6 @@ int core_tpg_post_addlun(  	return 0;  } -static void core_tpg_shutdown_lun( -	struct se_portal_group *tpg, -	struct se_lun *lun) -{ -	core_clear_lun_from_tpg(lun, tpg); -	transport_clear_lun_from_sessions(lun); -} -  struct se_lun *core_tpg_pre_dellun(  	struct se_portal_group *tpg,  	u32 unpacked_lun) @@ -869,7 +871,8 @@ int core_tpg_post_dellun(  	struct se_portal_group *tpg,  	struct se_lun *lun)  { -	core_tpg_shutdown_lun(tpg, lun); +	core_clear_lun_from_tpg(lun, tpg); +	transport_clear_lun_ref(lun);  	core_dev_unexport(lun->lun_se_dev, tpg, lun); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 84747cc1aac..7fa62fc93e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -28,7 +28,6 @@  #include <linux/string.h>  #include <linux/timer.h>  #include <linux/slab.h> -#include <linux/blkdev.h>  #include <linux/spinlock.h>  #include <linux/kthread.h>  #include <linux/in.h> @@ -63,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;  struct kmem_cache *t10_alua_lu_gp_mem_cache;  struct kmem_cache *t10_alua_tg_pt_gp_cache;  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache;  static void transport_complete_task_attr(struct se_cmd *cmd);  static void transport_handle_queue_full(struct se_cmd *cmd, @@ -129,14 +130,36 @@ int init_se_kmem_caches(void)  				"mem_t failed\n");  		goto out_free_tg_pt_gp_cache;  	} +	t10_alua_lba_map_cache = kmem_cache_create( +			"t10_alua_lba_map_cache", +			sizeof(struct t10_alua_lba_map), +			__alignof__(struct t10_alua_lba_map), 0, NULL); +	if (!t10_alua_lba_map_cache) { +		pr_err("kmem_cache_create() for t10_alua_lba_map_" +				"cache failed\n"); +		goto out_free_tg_pt_gp_mem_cache; +	} +	t10_alua_lba_map_mem_cache = kmem_cache_create( +			"t10_alua_lba_map_mem_cache", +			sizeof(struct t10_alua_lba_map_member), +			__alignof__(struct t10_alua_lba_map_member), 0, NULL); +	if (!t10_alua_lba_map_mem_cache) { +		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" +				"cache failed\n"); +		goto out_free_lba_map_cache; +	}  	target_completion_wq = alloc_workqueue("target_completion",  					       WQ_MEM_RECLAIM, 0);  	if (!target_completion_wq) -		goto out_free_tg_pt_gp_mem_cache; +		goto out_free_lba_map_mem_cache;  	return 0; +out_free_lba_map_mem_cache: +	kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: +	kmem_cache_destroy(t10_alua_lba_map_cache);  out_free_tg_pt_gp_mem_cache:  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);  out_free_tg_pt_gp_cache: @@ -165,6 +188,8 @@ void release_se_kmem_caches(void)  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +	kmem_cache_destroy(t10_alua_lba_map_cache); +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);  }  /* This code ensures unique mib indexes are handed out. */ @@ -210,7 +235,7 @@ void transport_subsystem_check_init(void)  	sub_api_initialized = 1;  } -struct se_session *transport_init_session(void) +struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)  {  	struct se_session *se_sess; @@ -226,6 +251,7 @@ struct se_session *transport_init_session(void)  	INIT_LIST_HEAD(&se_sess->sess_wait_list);  	spin_lock_init(&se_sess->sess_cmd_lock);  	kref_init(&se_sess->sess_kref); +	se_sess->sup_prot_ops = sup_prot_ops;  	return se_sess;  } @@ -236,17 +262,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,  {  	int rc; -	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); +	se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, +					GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);  	if (!se_sess->sess_cmd_map) { -		pr_err("Unable to allocate se_sess->sess_cmd_map\n"); -		return -ENOMEM; +		se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); +		if (!se_sess->sess_cmd_map) { +			pr_err("Unable to allocate se_sess->sess_cmd_map\n"); +			return -ENOMEM; +		}  	}  	rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);  	if (rc < 0) {  		pr_err("Unable to init se_sess->sess_tag_pool,"  			" tag_num: %u\n", tag_num); -		kfree(se_sess->sess_cmd_map); +		if (is_vmalloc_addr(se_sess->sess_cmd_map)) +			vfree(se_sess->sess_cmd_map); +		else +			kfree(se_sess->sess_cmd_map);  		se_sess->sess_cmd_map = NULL;  		return -ENOMEM;  	} @@ -256,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,  EXPORT_SYMBOL(transport_alloc_session_tags);  struct se_session *transport_init_session_tags(unsigned int tag_num, -					       unsigned int tag_size) +					       unsigned int tag_size, +					       enum target_prot_op sup_prot_ops)  {  	struct se_session *se_sess;  	int rc; -	se_sess = transport_init_session(); +	se_sess = transport_init_session(sup_prot_ops);  	if (IS_ERR(se_sess))  		return se_sess; @@ -412,7 +446,10 @@ void transport_free_session(struct se_session *se_sess)  {  	if (se_sess->sess_cmd_map) {  		percpu_ida_destroy(&se_sess->sess_tag_pool); -		kfree(se_sess->sess_cmd_map); +		if (is_vmalloc_addr(se_sess->sess_cmd_map)) +			vfree(se_sess->sess_cmd_map); +		else +			kfree(se_sess->sess_cmd_map);  	}  	kmem_cache_free(se_sess_cache, se_sess);  } @@ -463,11 +500,11 @@ void transport_deregister_session(struct se_session *se_sess)  	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",  		se_tpg->se_tpg_tfo->get_fabric_name());  	/* -	 * If last kref is dropping now for an explict NodeACL, awake sleeping +	 * If last kref is dropping now for an explicit NodeACL, awake sleeping  	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group  	 * removal context.  	 */ -	if (se_nacl && comp_nacl == true) +	if (se_nacl && comp_nacl)  		target_put_nacl(se_nacl);  	transport_free_session(se_sess); @@ -505,23 +542,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,  	if (write_pending)  		cmd->t_state = TRANSPORT_WRITE_PENDING; -	/* -	 * Determine if IOCTL context caller in requesting the stopping of this -	 * command for LUN shutdown purposes. -	 */ -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", -			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - -		cmd->transport_state &= ~CMD_T_ACTIVE; -		if (remove_from_lists) -			target_remove_from_state_list(cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -		complete(&cmd->transport_lun_stop_comp); -		return 1; -	} -  	if (remove_from_lists) {  		target_remove_from_state_list(cmd); @@ -542,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return 1;  	} @@ -575,19 +595,25 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)  static void transport_lun_remove_cmd(struct se_cmd *cmd)  {  	struct se_lun *lun = cmd->se_lun; -	unsigned long flags;  	if (!lun)  		return; -	spin_lock_irqsave(&lun->lun_cmd_lock, flags); -	if (!list_empty(&cmd->se_lun_node)) -		list_del_init(&cmd->se_lun_node); -	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); +	if (cmpxchg(&cmd->lun_ref_active, true, false)) +		percpu_ref_put(&lun->lun_ref);  }  void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)  { +	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) +		transport_lun_remove_cmd(cmd); +	/* +	 * Allow the fabric driver to unmap any resources before +	 * releasing the descriptor via TFO->release_cmd() +	 */ +	if (remove) +		cmd->se_tfo->aborted_task(cmd); +  	if (transport_cmd_check_stop_to_fabric(cmd))  		return;  	if (remove) @@ -654,19 +680,16 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)  		return;  	} -	if (!success) -		cmd->transport_state |= CMD_T_FAILED; -  	/* -	 * Check for case where an explict ABORT_TASK has been received +	 * Check for case where an explicit ABORT_TASK has been received  	 * and transport_wait_for_tasks() will be waiting for completion..  	 */  	if (cmd->transport_state & CMD_T_ABORTED &&  	    cmd->transport_state & CMD_T_STOP) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return; -	} else if (cmd->transport_state & CMD_T_FAILED) { +	} else if (!success) {  		INIT_WORK(&cmd->work, target_complete_failure_work);  	} else {  		INIT_WORK(&cmd->work, target_complete_ok_work); @@ -680,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)  }  EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ +	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { +		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { +			cmd->residual_count += cmd->data_length - length; +		} else { +			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; +			cmd->residual_count = cmd->data_length - length; +		} + +		cmd->data_length = length; +	} + +	target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); +  static void target_add_to_state_list(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev; @@ -713,7 +753,7 @@ void target_qf_do_work(struct work_struct *work)  	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {  		list_del(&cmd->se_qf_node);  		atomic_dec(&dev->dev_qf_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"  			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1082,17 +1122,15 @@ void transport_init_se_cmd(  	int task_attr,  	unsigned char *sense_buffer)  { -	INIT_LIST_HEAD(&cmd->se_lun_node);  	INIT_LIST_HEAD(&cmd->se_delayed_node);  	INIT_LIST_HEAD(&cmd->se_qf_node);  	INIT_LIST_HEAD(&cmd->se_cmd_list);  	INIT_LIST_HEAD(&cmd->state_list); -	init_completion(&cmd->transport_lun_fe_stop_comp); -	init_completion(&cmd->transport_lun_stop_comp);  	init_completion(&cmd->t_transport_stop_comp);  	init_completion(&cmd->cmd_wait_comp);  	init_completion(&cmd->task_stop_comp);  	spin_lock_init(&cmd->t_state_lock); +	kref_init(&cmd->cmd_kref);  	cmd->transport_state = CMD_T_DEV_ACTIVE;  	cmd->se_tfo = tfo; @@ -1128,7 +1166,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)  	 * Dormant to Active status.  	 */  	cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",  			cmd->se_ordered_id, cmd->sam_task_attr,  			dev->transport->name); @@ -1299,6 +1337,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,   * @sgl_count: scatterlist count for unidirectional mapping   * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping   * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information   *   * Returns non zero to signal active I/O shutdown failure.  All other   * setup exceptions will be returned as a SCSI CHECK_CONDITION response, @@ -1311,7 +1351,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,  		u32 data_length, int task_attr, int data_dir, int flags,  		struct scatterlist *sgl, u32 sgl_count, -		struct scatterlist *sgl_bidi, u32 sgl_bidi_count) +		struct scatterlist *sgl_bidi, u32 sgl_bidi_count, +		struct scatterlist *sgl_prot, u32 sgl_prot_count)  {  	struct se_portal_group *se_tpg;  	sense_reason_t rc; @@ -1359,6 +1400,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  		transport_generic_request_failure(se_cmd, rc);  		return 0;  	} + +	/* +	 * Save pointers for SGLs containing protection information, +	 * if present. +	 */ +	if (sgl_prot_count) { +		se_cmd->t_prot_sg = sgl_prot; +		se_cmd->t_prot_nents = sgl_prot_count; +	} +  	/*  	 * When a non zero sgl_count has been passed perform SGL passthrough  	 * mapping for pre-allocated fabric memory instead of having target @@ -1395,6 +1446,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess  			return 0;  		}  	} +  	/*  	 * Check if we need to delay processing because of ALUA  	 * Active/NonOptimized primary access state.. @@ -1434,7 +1486,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,  {  	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,  			unpacked_lun, data_length, task_attr, data_dir, -			flags, NULL, 0, NULL, 0); +			flags, NULL, 0, NULL, 0, NULL, 0);  }  EXPORT_SYMBOL(target_submit_cmd); @@ -1580,6 +1632,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,  	case TCM_CHECK_CONDITION_ABORT_CMD:  	case TCM_CHECK_CONDITION_UNIT_ATTENTION:  	case TCM_CHECK_CONDITION_NOT_READY: +	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: +	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: +	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:  		break;  	case TCM_OUT_OF_RESOURCES:  		sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1668,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)  		return false;  	case MSG_ORDERED_TAG:  		atomic_inc(&dev->dev_ordered_sync); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "  			 " se_ordered_id: %u\n", @@ -1686,7 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)  		 * For SIMPLE and UNTAGGED Task Attribute commands  		 */  		atomic_inc(&dev->simple_cmds); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		break;  	} @@ -1709,42 +1764,36 @@ void target_execute_cmd(struct se_cmd *cmd)  	/*  	 * If the received CDB has aleady been aborted stop processing it here.  	 */ -	if (transport_check_aborted_status(cmd, 1)) { -		complete(&cmd->transport_lun_stop_comp); +	if (transport_check_aborted_status(cmd, 1))  		return; -	} - -	/* -	 * Determine if IOCTL context caller in requesting the stopping of this -	 * command for LUN shutdown purposes. -	 */ -	spin_lock_irq(&cmd->t_state_lock); -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", -			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); -		cmd->transport_state &= ~CMD_T_ACTIVE; -		spin_unlock_irq(&cmd->t_state_lock); -		complete(&cmd->transport_lun_stop_comp); -		return; -	}  	/*  	 * Determine if frontend context caller is requesting the stopping of  	 * this command for frontend exceptions.  	 */ +	spin_lock_irq(&cmd->t_state_lock);  	if (cmd->transport_state & CMD_T_STOP) {  		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",  			__func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd));  		spin_unlock_irq(&cmd->t_state_lock); -		complete(&cmd->t_transport_stop_comp); +		complete_all(&cmd->t_transport_stop_comp);  		return;  	}  	cmd->t_state = TRANSPORT_PROCESSING;  	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;  	spin_unlock_irq(&cmd->t_state_lock); +	/* +	 * Perform WRITE_INSERT of PI using software emulation when backend +	 * device has PI enabled, if the transport has not already generated +	 * PI using hardware WRITE_INSERT offload. +	 */ +	if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { +		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) +			sbc_dif_generate(cmd); +	}  	if (target_handle_task_attr(cmd)) {  		spin_lock_irq(&cmd->t_state_lock); @@ -1797,7 +1846,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {  		atomic_dec(&dev->simple_cmds); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dev->dev_cur_ordered_id++;  		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"  			" SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1809,7 +1858,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  			cmd->se_ordered_id);  	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {  		atomic_dec(&dev->dev_ordered_sync); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  		dev->dev_cur_ordered_id++;  		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1868,12 +1917,27 @@ static void transport_handle_queue_full(  	spin_lock_irq(&dev->qf_cmd_lock);  	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);  	atomic_inc(&dev->dev_qf_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);  	schedule_work(&cmd->se_dev->qf_work_queue);  } +static bool target_check_read_strip(struct se_cmd *cmd) +{ +	sense_reason_t rc; + +	if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { +		rc = sbc_dif_read_strip(cmd); +		if (rc) { +			cmd->pi_err = rc; +			return true; +		} +	} + +	return false; +} +  static void target_complete_ok_work(struct work_struct *work)  {  	struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -1938,6 +2002,22 @@ static void target_complete_ok_work(struct work_struct *work)  					cmd->data_length;  		}  		spin_unlock(&cmd->se_lun->lun_sep_lock); +		/* +		 * Perform READ_STRIP of PI using software emulation when +		 * backend had PI enabled, if the transport will not be +		 * performing hardware READ_STRIP offload. +		 */ +		if (cmd->prot_op == TARGET_PROT_DIN_STRIP && +		    target_check_read_strip(cmd)) { +			ret = transport_send_check_condition_and_sense(cmd, +						cmd->pi_err, 0); +			if (ret == -EAGAIN || ret == -ENOMEM) +				goto queue_full; + +			transport_lun_remove_cmd(cmd); +			transport_cmd_check_stop_to_fabric(cmd); +			return; +		}  		trace_target_cmd_complete(cmd);  		ret = cmd->se_tfo->queue_data_in(cmd); @@ -2030,6 +2110,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)  	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);  	cmd->t_bidi_data_sg = NULL;  	cmd->t_bidi_data_nents = 0; + +	transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); +	cmd->t_prot_sg = NULL; +	cmd->t_prot_nents = 0;  }  /** @@ -2193,6 +2277,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)  				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;  		} +		if (cmd->prot_op != TARGET_PROT_NORMAL) { +			ret = target_alloc_sgl(&cmd->t_prot_sg, +					       &cmd->t_prot_nents, +					       cmd->prot_length, true); +			if (ret < 0) +				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +		} +  		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,  				       cmd->data_length, zero_flag);  		if (ret < 0) @@ -2283,13 +2375,12 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,  	unsigned long flags;  	int ret = 0; -	kref_init(&se_cmd->cmd_kref);  	/*  	 * Add a second kref if the fabric caller is expecting to handle  	 * fabric acknowledgement that requires two target_put_sess_cmd()  	 * invocations before se_cmd descriptor release.  	 */ -	if (ack_kref == true) { +	if (ack_kref) {  		kref_get(&se_cmd->cmd_kref);  		se_cmd->se_cmd_flags |= SCF_ACK_KREF;  	} @@ -2333,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)   */  int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)  { +	if (!se_sess) { +		se_cmd->se_tfo->release_cmd(se_cmd); +		return 1; +	}  	return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,  			&se_sess->sess_cmd_lock);  } @@ -2394,164 +2489,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)  }  EXPORT_SYMBOL(target_wait_for_sess_cmds); -/*	transport_lun_wait_for_tasks(): - * - *	Called from ConfigFS context to stop the passed struct se_cmd to allow - *	an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) -{ -	unsigned long flags; -	int ret = 0; - -	/* -	 * If the frontend has already requested this struct se_cmd to -	 * be stopped, we can safely ignore this struct se_cmd. -	 */ -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (cmd->transport_state & CMD_T_STOP) { -		cmd->transport_state &= ~CMD_T_LUN_STOP; - -		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", -			 cmd->se_tfo->get_task_tag(cmd)); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		transport_cmd_check_stop(cmd, false, false); -		return -EPERM; -	} -	cmd->transport_state |= CMD_T_LUN_FE_STOP; -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	// XXX: audit task_flags checks. -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if ((cmd->transport_state & CMD_T_BUSY) && -	    (cmd->transport_state & CMD_T_SENT)) { -		if (!target_stop_cmd(cmd, &flags)) -			ret++; -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	pr_debug("ConfigFS: cmd: %p stop tasks ret:" -			" %d\n", cmd, ret); -	if (!ret) { -		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", -				cmd->se_tfo->get_task_tag(cmd)); -		wait_for_completion(&cmd->transport_lun_stop_comp); -		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", -				cmd->se_tfo->get_task_tag(cmd)); -	} - -	return 0; -} - -static void __transport_clear_lun_from_sessions(struct se_lun *lun) -{ -	struct se_cmd *cmd = NULL; -	unsigned long lun_flags, cmd_flags; -	/* -	 * Do exception processing and return CHECK_CONDITION status to the -	 * Initiator Port. -	 */ -	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -	while (!list_empty(&lun->lun_cmd_list)) { -		cmd = list_first_entry(&lun->lun_cmd_list, -		       struct se_cmd, se_lun_node); -		list_del_init(&cmd->se_lun_node); - -		spin_lock(&cmd->t_state_lock); -		pr_debug("SE_LUN[%d] - Setting cmd->transport" -			"_lun_stop for  ITT: 0x%08x\n", -			cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); -		cmd->transport_state |= CMD_T_LUN_STOP; -		spin_unlock(&cmd->t_state_lock); - -		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - -		if (!cmd->se_lun) { -			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", -				cmd->se_tfo->get_task_tag(cmd), -				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); -			BUG(); -		} -		/* -		 * If the Storage engine still owns the iscsi_cmd_t, determine -		 * and/or stop its context. -		 */ -		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" -			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); - -		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { -			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -			continue; -		} - -		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" -			"_wait_for_tasks(): SUCCESS\n", -			cmd->se_lun->unpacked_lun, -			cmd->se_tfo->get_task_tag(cmd)); - -		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); -		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); -			goto check_cond; -		} -		cmd->transport_state &= ~CMD_T_DEV_ACTIVE; -		target_remove_from_state_list(cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - -		/* -		 * The Storage engine stopped this struct se_cmd before it was -		 * send to the fabric frontend for delivery back to the -		 * Initiator Node.  Return this SCSI CDB back with an -		 * CHECK_CONDITION status. -		 */ -check_cond: -		transport_send_check_condition_and_sense(cmd, -				TCM_NON_EXISTENT_LUN, 0); -		/* -		 *  If the fabric frontend is waiting for this iscsi_cmd_t to -		 * be released, notify the waiting thread now that LU has -		 * finished accessing it. -		 */ -		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); -		if (cmd->transport_state & CMD_T_LUN_FE_STOP) { -			pr_debug("SE_LUN[%d] - Detected FE stop for" -				" struct se_cmd: %p ITT: 0x%08x\n", -				lun->unpacked_lun, -				cmd, cmd->se_tfo->get_task_tag(cmd)); - -			spin_unlock_irqrestore(&cmd->t_state_lock, -					cmd_flags); -			transport_cmd_check_stop(cmd, false, false); -			complete(&cmd->transport_lun_fe_stop_comp); -			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -			continue; -		} -		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", -			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - -		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); -		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); -	} -	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); -} - -static int transport_clear_lun_thread(void *p) +static int transport_clear_lun_ref_thread(void *p)  {  	struct se_lun *lun = p; -	__transport_clear_lun_from_sessions(lun); +	percpu_ref_kill(&lun->lun_ref); + +	wait_for_completion(&lun->lun_ref_comp);  	complete(&lun->lun_shutdown_comp);  	return 0;  } -int transport_clear_lun_from_sessions(struct se_lun *lun) +int transport_clear_lun_ref(struct se_lun *lun)  {  	struct task_struct *kt; -	kt = kthread_run(transport_clear_lun_thread, lun, +	kt = kthread_run(transport_clear_lun_ref_thread, lun,  			"tcm_cl_%u", lun->unpacked_lun);  	if (IS_ERR(kt)) {  		pr_err("Unable to start clear_lun thread\n"); @@ -2585,43 +2539,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return false;  	} -	/* -	 * If we are already stopped due to an external event (ie: LUN shutdown) -	 * sleep until the connection can have the passed struct se_cmd back. -	 * The cmd->transport_lun_stopped_sem will be upped by -	 * transport_clear_lun_from_sessions() once the ConfigFS context caller -	 * has completed its operation on the struct se_cmd. -	 */ -	if (cmd->transport_state & CMD_T_LUN_STOP) { -		pr_debug("wait_for_tasks: Stopping" -			" wait_for_completion(&cmd->t_tasktransport_lun_fe" -			"_stop_comp); for ITT: 0x%08x\n", -			cmd->se_tfo->get_task_tag(cmd)); -		/* -		 * There is a special case for WRITES where a FE exception + -		 * LUN shutdown means ConfigFS context is still sleeping on -		 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). -		 * We go ahead and up transport_lun_stop_comp just to be sure -		 * here. -		 */ -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&cmd->transport_lun_stop_comp); -		wait_for_completion(&cmd->transport_lun_fe_stop_comp); -		spin_lock_irqsave(&cmd->t_state_lock, flags); - -		target_remove_from_state_list(cmd); -		/* -		 * At this point, the frontend who was the originator of this -		 * struct se_cmd, now owns the structure and can be released through -		 * normal means below. -		 */ -		pr_debug("wait_for_tasks: Stopped" -			" wait_for_completion(&cmd->t_tasktransport_lun_fe_" -			"stop_comp); for ITT: 0x%08x\n", -			cmd->se_tfo->get_task_tag(cmd)); - -		cmd->transport_state &= ~CMD_T_LUN_STOP; -	}  	if (!(cmd->transport_state & CMD_T_ACTIVE)) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2663,6 +2580,19 @@ static int transport_get_sense_codes(  	return 0;  } +static +void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector) +{ +	/* Place failed LBA in sense data information descriptor 0. */ +	buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc; +	buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */ +	buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa; +	buffer[SPC_VALIDITY_OFFSET] = 0x80; + +	/* Descriptor Information: failing sector */ +	put_unaligned_be64(bad_sector, &buffer[12]); +} +  int  transport_send_check_condition_and_sense(struct se_cmd *cmd,  		sense_reason_t reason, int from_transport) @@ -2856,6 +2786,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,  		buffer[SPC_ASC_KEY_OFFSET] = 0x1d;  		buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;  		break; +	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK GUARD CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x01; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break; +	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x02; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break; +	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: +		/* CURRENT ERROR */ +		buffer[0] = 0x70; +		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +		/* ILLEGAL REQUEST */ +		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ +		buffer[SPC_ASC_KEY_OFFSET] = 0x10; +		buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; +		transport_err_sector_info(buffer, cmd->bad_sector); +		break;  	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:  	default:  		/* CURRENT ERROR */ @@ -2893,13 +2856,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)  	if (!(cmd->transport_state & CMD_T_ABORTED))  		return 0; -	if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) +	/* +	 * If cmd has been aborted but either no status is to be sent or it has +	 * already been sent, just return +	 */ +	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))  		return 1;  	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",  		 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -	cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; +	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; +	cmd->scsi_status = SAM_STAT_TASK_ABORTED;  	trace_target_cmd_complete(cmd);  	cmd->se_tfo->queue_status(cmd); @@ -2912,7 +2880,7 @@ void transport_send_task_abort(struct se_cmd *cmd)  	unsigned long flags;  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { +	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return;  	} @@ -2927,7 +2895,9 @@ void transport_send_task_abort(struct se_cmd *cmd)  	if (cmd->data_direction == DMA_TO_DEVICE) {  		if (cmd->se_tfo->write_pending_status(cmd) != 0) {  			cmd->transport_state |= CMD_T_ABORTED; -			smp_mb__after_atomic_inc(); +			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; +			smp_mb__after_atomic(); +			return;  		}  	}  	cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -2985,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)  int transport_generic_handle_tmr(  	struct se_cmd *cmd)  { +	unsigned long flags; + +	spin_lock_irqsave(&cmd->t_state_lock, flags); +	cmd->transport_state |= CMD_T_ACTIVE; +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); +  	INIT_WORK(&cmd->work, target_tmr_work);  	queue_work(cmd->se_dev->tmr_wq, &cmd->work);  	return 0; diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index b04467e7547..101858e245b 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -98,7 +98,6 @@ int core_scsi3_ua_allocate(  		pr_err("Unable to allocate struct se_ua\n");  		return -ENOMEM;  	} -	INIT_LIST_HEAD(&ua->ua_dev_list);  	INIT_LIST_HEAD(&ua->ua_nacl_list);  	ua->ua_nacl = nacl; @@ -163,7 +162,7 @@ int core_scsi3_ua_allocate(  		spin_unlock_irq(&nacl->device_list_lock);  		atomic_inc(&deve->ua_count); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		return 0;  	}  	list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -176,7 +175,7 @@ int core_scsi3_ua_allocate(  		asc, ascq);  	atomic_inc(&deve->ua_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	return 0;  } @@ -191,7 +190,7 @@ void core_scsi3_ua_release_all(  		kmem_cache_free(se_ua_cache, ua);  		atomic_dec(&deve->ua_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&deve->ua_lock);  } @@ -252,7 +251,7 @@ void core_scsi3_ua_for_check_condition(  		kmem_cache_free(se_ua_cache, ua);  		atomic_dec(&deve->ua_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&deve->ua_lock);  	spin_unlock_irq(&nacl->device_list_lock); @@ -311,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense(  		kmem_cache_free(se_ua_cache, ua);  		atomic_dec(&deve->ua_count); -		smp_mb__after_atomic_dec(); +		smp_mb__after_atomic();  	}  	spin_unlock(&deve->ua_lock);  	spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index 0204952fe4d..be912b36daa 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -19,7 +19,7 @@  #define ASCQ_2AH_RESERVATIONS_RELEASED				0x04  #define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05  #define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06 -#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07  #define ASCQ_2AH_PRIORITY_CHANGED				0x08  #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 4d22e7d2adc..e9186cdf35e 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -40,10 +40,6 @@  static struct workqueue_struct *xcopy_wq = NULL;  /* - * From target_core_spc.c - */ -extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); -/*   * From target_core_device.c   */  extern struct mutex g_device_mutex; @@ -74,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op  	unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;  	int rc; -	if (src == true) +	if (src)  		dev_wwn = &xop->dst_tid_wwn[0];  	else  		dev_wwn = &xop->src_tid_wwn[0]; @@ -82,6 +78,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op  	mutex_lock(&g_device_mutex);  	list_for_each_entry(se_dev, &g_device_list, g_dev_node) { +		if (!se_dev->dev_attrib.emulate_3pc) +			continue; +  		memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);  		target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); @@ -89,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op  		if (rc != 0)  			continue; -		if (src == true) { +		if (src) {  			xop->dst_dev = se_dev;  			pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"  				" se_dev\n", xop->dst_dev); @@ -167,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op  		return -EINVAL;  	} -	if (src == true) { +	if (src) {  		memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);  		/*  		 * Determine if the source designator matches the local device @@ -237,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,  			/*  			 * Assume target descriptors are in source -> destination order..  			 */ -			if (src == true) +			if (src)  				src = false;  			else  				src = true; @@ -298,8 +297,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op  		(unsigned long long)xop->dst_lba);  	if (dc != 0) { -		xop->dbl = (desc[29] << 16) & 0xff; -		xop->dbl |= (desc[30] << 8) & 0xff; +		xop->dbl = (desc[29] & 0xff) << 16; +		xop->dbl |= (desc[30] & 0xff) << 8;  		xop->dbl |= desc[31] & 0xff;  		pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); @@ -357,6 +356,7 @@ struct xcopy_pt_cmd {  	struct se_cmd se_cmd;  	struct xcopy_op *xcopy_op;  	struct completion xpt_passthrough_sem; +	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];  };  static struct se_port xcopy_pt_port; @@ -401,9 +401,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)  	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,  				struct xcopy_pt_cmd, se_cmd); -	if (xpt_cmd->remote_port) -		kfree(se_cmd->se_lun); -  	kfree(xpt_cmd);  } @@ -563,27 +560,15 @@ static int target_xcopy_init_pt_lun(  	 * reservations.  The pt_cmd->se_lun pointer will be setup from within  	 * target_xcopy_setup_pt_port()  	 */ -	if (remote_port == false) { +	if (!remote_port) {  		pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;  		return 0;  	} -	pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); -	if (!pt_cmd->se_lun) { -		pr_err("Unable to allocate pt_cmd->se_lun\n"); -		return -ENOMEM; -	} -	init_completion(&pt_cmd->se_lun->lun_shutdown_comp); -	INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list); -	INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list); -	spin_lock_init(&pt_cmd->se_lun->lun_acl_lock); -	spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock); -	spin_lock_init(&pt_cmd->se_lun->lun_sep_lock); - +	pt_cmd->se_lun = &se_dev->xcopy_lun;  	pt_cmd->se_dev = se_dev;  	pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); -	pt_cmd->se_lun->lun_se_dev = se_dev;  	pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;  	pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", @@ -654,8 +639,6 @@ static int target_xcopy_setup_pt_cmd(  	return 0;  out: -	if (remote_port == true) -		kfree(cmd->se_lun);  	return ret;  } @@ -675,7 +658,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)  	pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",  			se_cmd->scsi_status); -	return 0; + +	return (se_cmd->scsi_status) ? -EINVAL : 0;  }  static int target_xcopy_read_source( @@ -708,7 +692,7 @@ static int target_xcopy_read_source(  		(unsigned long long)src_lba, src_sectors, length);  	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, -				DMA_FROM_DEVICE, 0, NULL); +			      DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);  	xop->src_pt_cmd = xpt_cmd;  	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], @@ -768,7 +752,7 @@ static int target_xcopy_write_destination(  		(unsigned long long)dst_lba, dst_sectors, length);  	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, -				DMA_TO_DEVICE, 0, NULL); +			      DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);  	xop->dst_pt_cmd = xpt_cmd;  	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], @@ -884,30 +868,42 @@ out:  sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)  { +	struct se_device *dev = se_cmd->se_dev;  	struct xcopy_op *xop = NULL;  	unsigned char *p = NULL, *seg_desc;  	unsigned int list_id, list_id_usage, sdll, inline_dl, sa; +	sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;  	int rc;  	unsigned short tdll; +	if (!dev->dev_attrib.emulate_3pc) { +		pr_err("EXTENDED_COPY operation explicitly disabled\n"); +		return TCM_UNSUPPORTED_SCSI_OPCODE; +	} +  	sa = se_cmd->t_task_cdb[1] & 0x1f;  	if (sa != 0x00) {  		pr_err("EXTENDED_COPY(LID4) not supported\n");  		return TCM_UNSUPPORTED_SCSI_OPCODE;  	} +	xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); +	if (!xop) { +		pr_err("Unable to allocate xcopy_op\n"); +		return TCM_OUT_OF_RESOURCES; +	} +	xop->xop_se_cmd = se_cmd; +  	p = transport_kmap_data_sg(se_cmd);  	if (!p) {  		pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); +		kfree(xop);  		return TCM_OUT_OF_RESOURCES;  	}  	list_id = p[0]; -	if (list_id != 0x00) { -		pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); -		goto out; -	} -	list_id_usage = (p[1] & 0x18); +	list_id_usage = (p[1] & 0x18) >> 3; +  	/*  	 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH  	 */ @@ -920,13 +916,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)  		goto out;  	} -	xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); -	if (!xop) { -		pr_err("Unable to allocate xcopy_op\n"); -		goto out; -	} -	xop->xop_se_cmd = se_cmd; -  	pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"  		" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,  		tdll, sdll, inline_dl); @@ -935,6 +924,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)  	if (rc <= 0)  		goto out; +	if (xop->src_dev->dev_attrib.block_size != +	    xop->dst_dev->dev_attrib.block_size) { +		pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" +		       " block_size: %u currently unsupported\n", +			xop->src_dev->dev_attrib.block_size, +			xop->dst_dev->dev_attrib.block_size); +		xcopy_pt_undepend_remotedev(xop); +		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +		goto out; +	} +  	pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,  				rc * XCOPY_TARGET_DESC_LEN);  	seg_desc = &p[16]; @@ -957,7 +957,7 @@ out:  	if (p)  		transport_kunmap_data_sg(se_cmd);  	kfree(xop); -	return TCM_INVALID_CDB_FIELD; +	return ret;  }  static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 0dd54a44abc..a0bcfd3e7e7 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -22,6 +22,7 @@  #define FT_NAMELEN 32		/* length of ASCII WWPNs including pad */  #define FT_TPG_NAMELEN 32	/* max length of TPG name */  #define FT_LUN_NAMELEN 32	/* max length of LUN name */ +#define TCM_FC_DEFAULT_TAGS 512	/* tags used for per-session preallocation */  struct ft_transport_id {  	__u8	format; @@ -93,20 +94,19 @@ struct ft_lun {   */  struct ft_tpg {  	u32 index; -	struct ft_lport_acl *lport_acl; +	struct ft_lport_wwn *lport_wwn;  	struct ft_tport *tport;		/* active tport or NULL */ -	struct list_head list;		/* linkage in ft_lport_acl tpg_list */  	struct list_head lun_list;	/* head of LUNs */  	struct se_portal_group se_tpg;  	struct workqueue_struct *workqueue;  }; -struct ft_lport_acl { +struct ft_lport_wwn {  	u64 wwpn;  	char name[FT_NAMELEN]; -	struct list_head list; -	struct list_head tpg_list; -	struct se_wwn fc_lport_wwn; +	struct list_head ft_wwn_node; +	struct ft_tpg *tpg; +	struct se_wwn se_wwn;  };  /* @@ -127,7 +127,6 @@ struct ft_cmd {  	u32 sg_cnt;			/* No. of item in scatterlist */  }; -extern struct list_head ft_lport_list;  extern struct mutex ft_lport_lock;  extern struct fc4_prov ft_prov;  extern struct target_fabric_configfs *ft_configfs; @@ -162,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);  u32 ft_get_task_tag(struct se_cmd *);  int ft_get_cmd_state(struct se_cmd *);  void ft_queue_tm_resp(struct se_cmd *); +void ft_aborted_task(struct se_cmd *);  /*   * other internal functions. diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 0e5a1caed17..be0c0d08c56 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -28,6 +28,7 @@  #include <linux/configfs.h>  #include <linux/ctype.h>  #include <linux/hash.h> +#include <linux/percpu_ida.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h>  #include <scsi/scsi_host.h> @@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)  {  	struct fc_frame *fp;  	struct fc_lport *lport; +	struct ft_sess *sess;  	if (!cmd)  		return; +	sess = cmd->sess;  	fp = cmd->req_frame;  	lport = fr_dev(fp);  	if (fr_seq(fp))  		lport->tt.seq_release(fr_seq(fp));  	fc_frame_free(fp); -	ft_sess_put(cmd->sess);	/* undo get from lookup at recv */ -	kfree(cmd); +	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); +	ft_sess_put(sess);	/* undo get from lookup at recv */  }  void ft_release_cmd(struct se_cmd *se_cmd) @@ -125,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd)  	struct fc_lport *lport;  	struct fc_exch *ep;  	size_t len; +	int rc;  	if (cmd->aborted)  		return 0; @@ -134,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd)  	len = sizeof(*fcp) + se_cmd->scsi_sense_length;  	fp = fc_frame_alloc(lport, len);  	if (!fp) { -		/* XXX shouldn't just drop it - requeue and retry? */ -		return 0; +		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; +		return -ENOMEM;  	} +  	fcp = fc_frame_payload_get(fp, len);  	memset(fcp, 0, len);  	fcp->resp.fr_status = se_cmd->scsi_status; @@ -167,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd)  	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,  		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); -	lport->tt.seq_send(lport, cmd->seq, fp); +	rc = lport->tt.seq_send(lport, cmd->seq, fp); +	if (rc) { +		pr_info_ratelimited("%s: Failed to send response frame %p, " +				    "xid <0x%x>\n", __func__, fp, ep->xid); +		/* +		 * Generate a TASK_SET_FULL status to notify the initiator +		 * to reduce it's queue_depth after the se_cmd response has +		 * been re-queued by target-core. +		 */ +		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; +		return -ENOMEM; +	}  	lport->tt.exch_done(cmd->seq);  	return 0;  } @@ -423,6 +439,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)  	ft_send_resp_code(cmd, code);  } +void ft_aborted_task(struct se_cmd *se_cmd) +{ +	return; +} +  static void ft_send_work(struct work_struct *work);  /* @@ -432,14 +453,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)  {  	struct ft_cmd *cmd;  	struct fc_lport *lport = sess->tport->lport; +	struct se_session *se_sess = sess->se_sess; +	int tag; -	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); -	if (!cmd) +	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); +	if (tag < 0)  		goto busy; + +	cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; +	memset(cmd, 0, sizeof(struct ft_cmd)); + +	cmd->se_cmd.map_tag = tag;  	cmd->sess = sess;  	cmd->seq = lport->tt.seq_assign(lport, fp);  	if (!cmd->seq) { -		kfree(cmd); +		percpu_ida_free(&se_sess->sess_tag_pool, tag);  		goto busy;  	}  	cmd->req_frame = fp;		/* hold frame during cmd */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 4e0050840a7..efdcb9663a1 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -50,7 +50,7 @@  struct target_fabric_configfs *ft_configfs; -LIST_HEAD(ft_lport_list); +static LIST_HEAD(ft_wwn_list);  DEFINE_MUTEX(ft_lport_lock);  unsigned int ft_debug_logging; @@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)  	return found;  } -struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) +static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)  {  	struct ft_node_acl *acl; @@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(  	struct config_group *group,  	const char *name)  { -	struct ft_lport_acl *lacl; +	struct ft_lport_wwn *ft_wwn;  	struct ft_tpg *tpg;  	struct workqueue_struct *wq;  	unsigned long index; @@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(  	if (index > UINT_MAX)  		return NULL; -	lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); +	if ((index != 1)) { +		pr_err("Error, a single TPG=1 is used for HW port mappings\n"); +		return ERR_PTR(-ENOSYS); +	} + +	ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);  	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);  	if (!tpg)  		return NULL;  	tpg->index = index; -	tpg->lport_acl = lacl; +	tpg->lport_wwn = ft_wwn;  	INIT_LIST_HEAD(&tpg->lun_list);  	wq = alloc_workqueue("tcm_fc", 0, 1); @@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(  	tpg->workqueue = wq;  	mutex_lock(&ft_lport_lock); -	list_add_tail(&tpg->list, &lacl->tpg_list); +	ft_wwn->tpg = tpg;  	mutex_unlock(&ft_lport_lock);  	return &tpg->se_tpg; @@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(  static void ft_del_tpg(struct se_portal_group *se_tpg)  {  	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); +	struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;  	pr_debug("del tpg %s\n",  		    config_item_name(&tpg->se_tpg.tpg_group.cg_item)); @@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)  	synchronize_rcu();  	mutex_lock(&ft_lport_lock); -	list_del(&tpg->list); +	ft_wwn->tpg = NULL;  	if (tpg->tport) {  		tpg->tport->tpg = NULL;  		tpg->tport = NULL; @@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)   */  struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)  { -	struct ft_lport_acl *lacl; -	struct ft_tpg *tpg; +	struct ft_lport_wwn *ft_wwn; -	list_for_each_entry(lacl, &ft_lport_list, list) { -		if (lacl->wwpn == lport->wwpn) { -			list_for_each_entry(tpg, &lacl->tpg_list, list) -				return tpg; /* XXX for now return first entry */ -			return NULL; -		} +	list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) { +		if (ft_wwn->wwpn == lport->wwpn) +			return ft_wwn->tpg;  	}  	return NULL;  } @@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)   * Add lport to allowed config.   * The name is the WWPN in lower-case ASCII, colon-separated bytes.   */ -static struct se_wwn *ft_add_lport( +static struct se_wwn *ft_add_wwn(  	struct target_fabric_configfs *tf,  	struct config_group *group,  	const char *name)  { -	struct ft_lport_acl *lacl; -	struct ft_lport_acl *old_lacl; +	struct ft_lport_wwn *ft_wwn; +	struct ft_lport_wwn *old_ft_wwn;  	u64 wwpn; -	pr_debug("add lport %s\n", name); +	pr_debug("add wwn %s\n", name);  	if (ft_parse_wwn(name, &wwpn, 1) < 0)  		return NULL; -	lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); -	if (!lacl) +	ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL); +	if (!ft_wwn)  		return NULL; -	lacl->wwpn = wwpn; -	INIT_LIST_HEAD(&lacl->tpg_list); +	ft_wwn->wwpn = wwpn;  	mutex_lock(&ft_lport_lock); -	list_for_each_entry(old_lacl, &ft_lport_list, list) { -		if (old_lacl->wwpn == wwpn) { +	list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) { +		if (old_ft_wwn->wwpn == wwpn) {  			mutex_unlock(&ft_lport_lock); -			kfree(lacl); +			kfree(ft_wwn);  			return NULL;  		}  	} -	list_add_tail(&lacl->list, &ft_lport_list); -	ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); +	list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list); +	ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);  	mutex_unlock(&ft_lport_lock); -	return &lacl->fc_lport_wwn; +	return &ft_wwn->se_wwn;  } -static void ft_del_lport(struct se_wwn *wwn) +static void ft_del_wwn(struct se_wwn *wwn)  { -	struct ft_lport_acl *lacl = container_of(wwn, -				struct ft_lport_acl, fc_lport_wwn); +	struct ft_lport_wwn *ft_wwn = container_of(wwn, +				struct ft_lport_wwn, se_wwn); -	pr_debug("del lport %s\n", lacl->name); +	pr_debug("del wwn %s\n", ft_wwn->name);  	mutex_lock(&ft_lport_lock); -	list_del(&lacl->list); +	list_del(&ft_wwn->ft_wwn_node);  	mutex_unlock(&ft_lport_lock); -	kfree(lacl); +	kfree(ft_wwn);  }  static ssize_t ft_wwn_show_attr_version( @@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)  {  	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; -	return tpg->lport_acl->name; +	return tpg->lport_wwn->name;  }  static u16 ft_get_tag(struct se_portal_group *se_tpg) @@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {  	.queue_data_in =		ft_queue_data_in,  	.queue_status =			ft_queue_status,  	.queue_tm_rsp =			ft_queue_tm_resp, +	.aborted_task =			ft_aborted_task,  	/*  	 * Setup function pointers for generic logic in  	 * target_core_fabric_configfs.c  	 */ -	.fabric_make_wwn =		&ft_add_lport, -	.fabric_drop_wwn =		&ft_del_lport, +	.fabric_make_wwn =		&ft_add_wwn, +	.fabric_drop_wwn =		&ft_del_wwn,  	.fabric_make_tpg =		&ft_add_tpg,  	.fabric_drop_tpg =		&ft_del_tpg,  	.fabric_post_link =		NULL, @@ -552,7 +554,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {  	.fabric_drop_nodeacl =		&ft_del_acl,  }; -int ft_register_configfs(void) +static int ft_register_configfs(void)  {  	struct target_fabric_configfs *fabric;  	int ret; @@ -571,16 +573,16 @@ int ft_register_configfs(void)  	/*  	 * Setup default attribute lists for various fabric->tf_cit_tmpl  	 */ -	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = +	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =  						    ft_nacl_base_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;  	/*  	 * register the fabric for use within TCM  	 */ @@ -599,7 +601,7 @@ int ft_register_configfs(void)  	return 0;  } -void ft_deregister_configfs(void) +static void ft_deregister_configfs(void)  {  	if (!ft_configfs)  		return; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index e415af32115..97b486c3dda 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)  	if (cmd->aborted)  		return 0; + +	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) +		goto queue_status; +  	ep = fc_seq_exch(cmd->seq);  	lport = ep->lp;  	cmd->seq = lport->tt.seq_start_next(cmd->seq); @@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd)  			       FC_TYPE_FCP, f_ctl, fh_off);  		error = lport->tt.seq_send(lport, seq, fp);  		if (error) { -			/* XXX For now, initiator will retry */ -			pr_err_ratelimited("%s: Failed to send frame %p, " +			pr_info_ratelimited("%s: Failed to send frame %p, "  						"xid <0x%x>, remaining %zu, "  						"lso_max <0x%x>\n",  						__func__, fp, ep->xid,  						remaining, lport->lso_max); +			/* +			 * Go ahead and set TASK_SET_FULL status ignoring the +			 * rest of the DataIN, and immediately attempt to +			 * send the response via ft_queue_status() in order +			 * to notify the initiator that it should reduce it's +			 * per LUN queue_depth. +			 */ +			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; +			break;  		}  	} +queue_status:  	return ft_queue_status(se_cmd);  } diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 4859505ae2e..21ce50880c7 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);   * Lookup or allocate target local port.   * Caller holds ft_lport_lock.   */ -static struct ft_tport *ft_tport_create(struct fc_lport *lport) +static struct ft_tport *ft_tport_get(struct fc_lport *lport)  {  	struct ft_tpg *tpg;  	struct ft_tport *tport; @@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)  	if (tport) {  		tport->tpg = tpg; +		tpg->tport = tport;  		return tport;  	} @@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)  void ft_lport_add(struct fc_lport *lport, void *arg)  {  	mutex_lock(&ft_lport_lock); -	ft_tport_create(lport); +	ft_tport_get(lport);  	mutex_unlock(&ft_lport_lock);  } @@ -210,7 +211,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,  	if (!sess)  		return NULL; -	sess->se_sess = transport_init_session(); +	sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, +						    sizeof(struct ft_cmd), +						    TARGET_PROT_NORMAL);  	if (IS_ERR(sess->se_sess)) {  		kfree(sess);  		return NULL; @@ -349,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,  	struct ft_node_acl *acl;  	u32 fcp_parm; -	tport = ft_tport_create(rdata->local_port); +	tport = ft_tport_get(rdata->local_port);  	if (!tport)  		goto not_target;	/* not a target for this local port */  | 
