diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
| -rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 537 | 
1 files changed, 435 insertions, 102 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 67f78a470f5..7b23f21f22f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -55,13 +55,7 @@   * and sends a CRQ message back to inform the client that the request has   * completed.   * - * Note that some of the underlying infrastructure is different between - * machines conforming to the "RS/6000 Platform Architecture" (RPA) and - * the older iSeries hypervisor models.  To support both, some low level - * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. - * The Makefile should pick one, not two, not zero, of these. - * - * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor + * TODO: This is currently pretty tied to the IBM pSeries hypervisor   * interfaces.  It would be really nice to abstract this above an RDMA   * layer.   */ @@ -99,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;  static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;  static int fast_fail = 1;  static int client_reserve = 1; +static char partition_name[97] = "UNKNOWN"; +static unsigned int partition_number = -1;  static struct scsi_transport_template *ibmvscsi_transport_template;  #define IBMVSCSI_VERSION "1.5.9" -static struct ibmvscsi_ops *ibmvscsi_ops; -  MODULE_DESCRIPTION("IBM Virtual SCSI");  MODULE_AUTHOR("Dave Boutcher");  MODULE_LICENSE("GPL"); @@ -124,6 +118,327 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");  module_param_named(client_reserve, client_reserve, int, S_IRUGO );  MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); +static void ibmvscsi_handle_crq(struct viosrp_crq *crq, +				struct ibmvscsi_host_data *hostdata); + +/* ------------------------------------------------------------ + * Routines for managing the command/response queue + */ +/** + * ibmvscsi_handle_event: - Interrupt handler for crq events + * @irq:	number of irq to handle, not used + * @dev_instance: ibmvscsi_host_data of host that received interrupt + * + * Disables interrupts and schedules srp_task + * Always returns IRQ_HANDLED + */ +static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) +{ +	struct ibmvscsi_host_data *hostdata = +	    (struct ibmvscsi_host_data *)dev_instance; +	vio_disable_interrupts(to_vio_dev(hostdata->dev)); +	tasklet_schedule(&hostdata->srp_task); +	return IRQ_HANDLED; +} + +/** + * release_crq_queue: - Deallocates data and unregisters CRQ + * @queue:	crq_queue to initialize and register + * @host_data:	ibmvscsi_host_data of host + * + * Frees irq, deallocates a page for messages, unmaps dma, and unregisters + * the crq with the hypervisor. + */ +static void ibmvscsi_release_crq_queue(struct crq_queue *queue, +				       struct ibmvscsi_host_data *hostdata, +				       int max_requests) +{ +	long rc = 0; +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); +	free_irq(vdev->irq, (void *)hostdata); +	tasklet_kill(&hostdata->srp_task); +	do { +		if (rc) +			msleep(100); +		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); +	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); +	dma_unmap_single(hostdata->dev, +			 queue->msg_token, +			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); +	free_page((unsigned long)queue->msgs); +} + +/** + * crq_queue_next_crq: - Returns the next entry in message queue + * @queue:	crq_queue to use + * + * Returns pointer to next entry in queue, or NULL if there are no new + * entried in the CRQ. + */ +static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) +{ +	struct viosrp_crq *crq; +	unsigned long flags; + +	spin_lock_irqsave(&queue->lock, flags); +	crq = &queue->msgs[queue->cur]; +	if (crq->valid & 0x80) { +		if (++queue->cur == queue->size) +			queue->cur = 0; + +		/* Ensure the read of the valid bit occurs before reading any +		 * other bits of the CRQ entry +		 */ +		rmb(); +	} else +		crq = NULL; +	spin_unlock_irqrestore(&queue->lock, flags); + +	return crq; +} + +/** + * ibmvscsi_send_crq: - Send a CRQ + * @hostdata:	the adapter + * @word1:	the first 64 bits of the data + * @word2:	the second 64 bits of the data + */ +static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, +			     u64 word1, u64 word2) +{ +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); + +	/* +	 * Ensure the command buffer is flushed to memory before handing it +	 * over to the VIOS to prevent it from fetching any stale data. +	 */ +	mb(); +	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); +} + +/** + * ibmvscsi_task: - Process srps asynchronously + * @data:	ibmvscsi_host_data of host + */ +static void ibmvscsi_task(void *data) +{ +	struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); +	struct viosrp_crq *crq; +	int done = 0; + +	while (!done) { +		/* Pull all the valid messages off the CRQ */ +		while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { +			ibmvscsi_handle_crq(crq, hostdata); +			crq->valid = 0x00; +		} + +		vio_enable_interrupts(vdev); +		crq = crq_queue_next_crq(&hostdata->queue); +		if (crq != NULL) { +			vio_disable_interrupts(vdev); +			ibmvscsi_handle_crq(crq, hostdata); +			crq->valid = 0x00; +		} else { +			done = 1; +		} +	} +} + +static void gather_partition_info(void) +{ +	struct device_node *rootdn; + +	const char *ppartition_name; +	const __be32 *p_number_ptr; + +	/* Retrieve information about this partition */ +	rootdn = of_find_node_by_path("/"); +	if (!rootdn) { +		return; +	} + +	ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL); +	if (ppartition_name) +		strncpy(partition_name, ppartition_name, +				sizeof(partition_name)); +	p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); +	if (p_number_ptr) +		partition_number = of_read_number(p_number_ptr, 1); +	of_node_put(rootdn); +} + +static void set_adapter_info(struct ibmvscsi_host_data *hostdata) +{ +	memset(&hostdata->madapter_info, 0x00, +			sizeof(hostdata->madapter_info)); + +	dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION); +	strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); + +	strncpy(hostdata->madapter_info.partition_name, partition_name, +			sizeof(hostdata->madapter_info.partition_name)); + +	hostdata->madapter_info.partition_number = +					cpu_to_be32(partition_number); + +	hostdata->madapter_info.mad_version = cpu_to_be32(1); +	hostdata->madapter_info.os_type = cpu_to_be32(2); +} + +/** + * reset_crq_queue: - resets a crq after a failure + * @queue:	crq_queue to initialize and register + * @hostdata:	ibmvscsi_host_data of host + * + */ +static int ibmvscsi_reset_crq_queue(struct crq_queue *queue, +				    struct ibmvscsi_host_data *hostdata) +{ +	int rc = 0; +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); + +	/* Close the CRQ */ +	do { +		if (rc) +			msleep(100); +		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); +	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + +	/* Clean out the queue */ +	memset(queue->msgs, 0x00, PAGE_SIZE); +	queue->cur = 0; + +	set_adapter_info(hostdata); + +	/* And re-open it again */ +	rc = plpar_hcall_norets(H_REG_CRQ, +				vdev->unit_address, +				queue->msg_token, PAGE_SIZE); +	if (rc == 2) { +		/* Adapter is good, but other end is not ready */ +		dev_warn(hostdata->dev, "Partner adapter not ready\n"); +	} else if (rc != 0) { +		dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); +	} +	return rc; +} + +/** + * initialize_crq_queue: - Initializes and registers CRQ with hypervisor + * @queue:	crq_queue to initialize and register + * @hostdata:	ibmvscsi_host_data of host + * + * Allocates a page for messages, maps it for dma, and registers + * the crq with the hypervisor. + * Returns zero on success. + */ +static int ibmvscsi_init_crq_queue(struct crq_queue *queue, +				   struct ibmvscsi_host_data *hostdata, +				   int max_requests) +{ +	int rc; +	int retrc; +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); + +	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); + +	if (!queue->msgs) +		goto malloc_failed; +	queue->size = PAGE_SIZE / sizeof(*queue->msgs); + +	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, +					  queue->size * sizeof(*queue->msgs), +					  DMA_BIDIRECTIONAL); + +	if (dma_mapping_error(hostdata->dev, queue->msg_token)) +		goto map_failed; + +	gather_partition_info(); +	set_adapter_info(hostdata); + +	retrc = rc = plpar_hcall_norets(H_REG_CRQ, +				vdev->unit_address, +				queue->msg_token, PAGE_SIZE); +	if (rc == H_RESOURCE) +		/* maybe kexecing and resource is busy. try a reset */ +		rc = ibmvscsi_reset_crq_queue(queue, +					      hostdata); + +	if (rc == 2) { +		/* Adapter is good, but other end is not ready */ +		dev_warn(hostdata->dev, "Partner adapter not ready\n"); +		retrc = 0; +	} else if (rc != 0) { +		dev_warn(hostdata->dev, "Error %d opening adapter\n", rc); +		goto reg_crq_failed; +	} + +	queue->cur = 0; +	spin_lock_init(&queue->lock); + +	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, +		     (unsigned long)hostdata); + +	if (request_irq(vdev->irq, +			ibmvscsi_handle_event, +			0, "ibmvscsi", (void *)hostdata) != 0) { +		dev_err(hostdata->dev, "couldn't register irq 0x%x\n", +			vdev->irq); +		goto req_irq_failed; +	} + +	rc = vio_enable_interrupts(vdev); +	if (rc != 0) { +		dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc); +		goto req_irq_failed; +	} + +	return retrc; + +      req_irq_failed: +	tasklet_kill(&hostdata->srp_task); +	rc = 0; +	do { +		if (rc) +			msleep(100); +		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); +	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); +      reg_crq_failed: +	dma_unmap_single(hostdata->dev, +			 queue->msg_token, +			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); +      map_failed: +	free_page((unsigned long)queue->msgs); +      malloc_failed: +	return -1; +} + +/** + * reenable_crq_queue: - reenables a crq after + * @queue:	crq_queue to initialize and register + * @hostdata:	ibmvscsi_host_data of host + * + */ +static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, +				       struct ibmvscsi_host_data *hostdata) +{ +	int rc = 0; +	struct vio_dev *vdev = to_vio_dev(hostdata->dev); + +	/* Re-enable the CRQ */ +	do { +		if (rc) +			msleep(100); +		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); +	} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + +	if (rc) +		dev_err(hostdata->dev, "Error %d enabling adapter\n", rc); +	return rc; +} +  /* ------------------------------------------------------------   * Routines for the event pool and event structs   */ @@ -160,9 +475,9 @@ static int initialize_event_pool(struct event_pool *pool,  		memset(&evt->crq, 0x00, sizeof(evt->crq));  		atomic_set(&evt->free, 1);  		evt->crq.valid = 0x80; -		evt->crq.IU_length = sizeof(*evt->xfer_iu); -		evt->crq.IU_data_ptr = pool->iu_token +  -			sizeof(*evt->xfer_iu) * i; +		evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); +		evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + +			sizeof(*evt->xfer_iu) * i);  		evt->xfer_iu = pool->iu_storage + i;  		evt->hostdata = hostdata;  		evt->ext_list = NULL; @@ -284,7 +599,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,  	evt_struct->cmnd_done = NULL;  	evt_struct->sync_srp = NULL;  	evt_struct->crq.format = format; -	evt_struct->crq.timeout = timeout; +	evt_struct->crq.timeout = cpu_to_be16(timeout);  	evt_struct->done = done;  } @@ -355,8 +670,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,  	scsi_for_each_sg(cmd, sg, nseg, i) {  		struct srp_direct_buf *descr = md + i; -		descr->va = sg_dma_address(sg); -		descr->len = sg_dma_len(sg); +		descr->va = cpu_to_be64(sg_dma_address(sg)); +		descr->len = cpu_to_be32(sg_dma_len(sg));  		descr->key = 0;  		total_length += sg_dma_len(sg);   	} @@ -399,13 +714,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,  	}  	indirect->table_desc.va = 0; -	indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); +	indirect->table_desc.len = cpu_to_be32(sg_mapped * +					       sizeof(struct srp_direct_buf));  	indirect->table_desc.key = 0;  	if (sg_mapped <= MAX_INDIRECT_BUFS) {  		total_length = map_sg_list(cmd, sg_mapped,  					   &indirect->desc_list[0]); -		indirect->len = total_length; +		indirect->len = cpu_to_be32(total_length);  		return 1;  	} @@ -427,9 +743,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,  	total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); -	indirect->len = total_length; -	indirect->table_desc.va = evt_struct->ext_list_token; -	indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); +	indirect->len = cpu_to_be32(total_length); +	indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); +	indirect->table_desc.len = cpu_to_be32(sg_mapped * +					       sizeof(indirect->desc_list[0]));  	memcpy(indirect->desc_list, evt_struct->ext_list,  	       MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));   	return 1; @@ -490,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)  				       evt->hostdata->dev);  			if (evt->cmnd_done)  				evt->cmnd_done(evt->cmnd); -		} else if (evt->done) +		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && +			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)  			evt->done(evt);  		free_event_struct(&evt->hostdata->pool, evt);  		spin_lock_irqsave(hostdata->host->host_lock, flags); @@ -545,7 +863,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,  				   struct ibmvscsi_host_data *hostdata,  				   unsigned long timeout)  { -	u64 *crq_as_u64 = (u64 *) &evt_struct->crq; +	__be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;  	int request_status = 0;  	int rc;  	int srp_req = 0; @@ -616,8 +934,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,  		add_timer(&evt_struct->timer);  	} -	if ((rc = -	     ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { +	rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), +			       be64_to_cpu(crq_as_u64[1])); +	if (rc != 0) {  		list_del(&evt_struct->list);  		del_timer(&evt_struct->timer); @@ -683,15 +1002,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)  		if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)  			memcpy(cmnd->sense_buffer,  			       rsp->data, -			       rsp->sense_data_len); +			       be32_to_cpu(rsp->sense_data_len));  		unmap_cmd_data(&evt_struct->iu.srp.cmd,   			       evt_struct,   			       evt_struct->hostdata->dev);  		if (rsp->flags & SRP_RSP_FLAG_DOOVER) -			scsi_set_resid(cmnd, rsp->data_out_res_cnt); +			scsi_set_resid(cmnd, +				       be32_to_cpu(rsp->data_out_res_cnt));  		else if (rsp->flags & SRP_RSP_FLAG_DIOVER) -			scsi_set_resid(cmnd, rsp->data_in_res_cnt); +			scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));  	}  	if (evt_struct->cmnd_done) @@ -713,7 +1033,7 @@ static inline u16 lun_from_dev(struct scsi_device *dev)   * @cmd:	struct scsi_cmnd to be executed   * @done:	Callback function to be called when cmd is completed  */ -static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, +static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,  				 void (*done) (struct scsi_cmnd *))  {  	struct srp_cmd *srp_cmd; @@ -733,7 +1053,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,  	memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);  	srp_cmd->opcode = SRP_CMD;  	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); -	srp_cmd->lun = ((u64) lun) << 48; +	srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);  	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {  		if (!firmware_has_feature(FW_FEATURE_CMO)) @@ -758,14 +1078,17 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,  	if ((in_fmt == SRP_DATA_DESC_INDIRECT ||  	     out_fmt == SRP_DATA_DESC_INDIRECT) &&  	    indirect->table_desc.va == 0) { -		indirect->table_desc.va = evt_struct->crq.IU_data_ptr + +		indirect->table_desc.va = +			cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +  			offsetof(struct srp_cmd, add_data) + -			offsetof(struct srp_indirect_buf, desc_list); +			offsetof(struct srp_indirect_buf, desc_list));  	}  	return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);  } +static DEF_SCSI_QCMD(ibmvscsi_queuecommand) +  /* ------------------------------------------------------------   * Routines for driver initialization   */ @@ -852,7 +1175,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)  	 * request_limit could have been set to -1 by this client.  	 */  	atomic_set(&hostdata->request_limit, -		   evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); +		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));  	/* If we had any pending I/Os, kick them */  	scsi_unblock_requests(hostdata->host); @@ -878,8 +1201,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)  	login = &evt_struct->iu.srp.login_req;  	memset(login, 0, sizeof(*login));  	login->opcode = SRP_LOGIN_REQ; -	login->req_it_iu_len = sizeof(union srp_iu); -	login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; +	login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); +	login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | +					 SRP_BUF_FORMAT_INDIRECT);  	spin_lock_irqsave(hostdata->host->host_lock, flags);  	/* Start out with a request limit of 0, since this is negotiated in @@ -908,12 +1232,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)  		dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",  			evt_struct->xfer_iu->mad.capabilities.common.status);  	} else { -		if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) +		if (hostdata->caps.migration.common.server_support != +		    cpu_to_be16(SERVER_SUPPORTS_CAP))  			dev_info(hostdata->dev, "Partition migration not supported\n");  		if (client_reserve) {  			if (hostdata->caps.reserve.common.server_support == -			    SERVER_SUPPORTS_CAP) +			    cpu_to_be16(SERVER_SUPPORTS_CAP))  				dev_info(hostdata->dev, "Client reserve enabled\n");  			else  				dev_info(hostdata->dev, "Client reserve not supported\n"); @@ -945,9 +1270,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)  	req = &evt_struct->iu.mad.capabilities;  	memset(req, 0, sizeof(*req)); -	hostdata->caps.flags = CAP_LIST_SUPPORTED; +	hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);  	if (hostdata->client_migrated) -		hostdata->caps.flags |= CLIENT_MIGRATED; +		hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);  	strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),  		sizeof(hostdata->caps.name)); @@ -958,22 +1283,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)  	strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));  	hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; -	req->common.type = VIOSRP_CAPABILITIES_TYPE; -	req->buffer = hostdata->caps_addr; +	req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); +	req->buffer = cpu_to_be64(hostdata->caps_addr); -	hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; -	hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); -	hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; -	hostdata->caps.migration.ecl = 1; +	hostdata->caps.migration.common.cap_type = +				cpu_to_be32(MIGRATION_CAPABILITIES); +	hostdata->caps.migration.common.length = +				cpu_to_be16(sizeof(hostdata->caps.migration)); +	hostdata->caps.migration.common.server_support = +				cpu_to_be16(SERVER_SUPPORTS_CAP); +	hostdata->caps.migration.ecl = cpu_to_be32(1);  	if (client_reserve) { -		hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; -		hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); -		hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; -		hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; -		req->common.length = sizeof(hostdata->caps); +		hostdata->caps.reserve.common.cap_type = +					cpu_to_be32(RESERVATION_CAPABILITIES); +		hostdata->caps.reserve.common.length = +				cpu_to_be16(sizeof(hostdata->caps.reserve)); +		hostdata->caps.reserve.common.server_support = +				cpu_to_be16(SERVER_SUPPORTS_CAP); +		hostdata->caps.reserve.type = +				cpu_to_be32(CLIENT_RESERVE_SCSI_2); +		req->common.length = +				cpu_to_be16(sizeof(hostdata->caps));  	} else -		req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); +		req->common.length = cpu_to_be16(sizeof(hostdata->caps) - +						sizeof(hostdata->caps.reserve));  	spin_lock_irqsave(hostdata->host->host_lock, flags);  	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -991,7 +1325,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)  static void fast_fail_rsp(struct srp_event_struct *evt_struct)  {  	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; -	u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; +	u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);  	if (status == VIOSRP_MAD_NOT_SUPPORTED)  		dev_err(hostdata->dev, "fast_fail not supported in server\n"); @@ -1028,8 +1362,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)  	fast_fail_mad = &evt_struct->iu.mad.fast_fail;  	memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); -	fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; -	fast_fail_mad->common.length = sizeof(*fast_fail_mad); +	fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); +	fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));  	spin_lock_irqsave(hostdata->host->host_lock, flags);  	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); @@ -1056,15 +1390,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)  			 "host partition %s (%d), OS %d, max io %u\n",  			 hostdata->madapter_info.srp_version,  			 hostdata->madapter_info.partition_name, -			 hostdata->madapter_info.partition_number, -			 hostdata->madapter_info.os_type, -			 hostdata->madapter_info.port_max_txu[0]); +			 be32_to_cpu(hostdata->madapter_info.partition_number), +			 be32_to_cpu(hostdata->madapter_info.os_type), +			 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));  		if (hostdata->madapter_info.port_max_txu[0])   			hostdata->host->max_sectors =  -				hostdata->madapter_info.port_max_txu[0] >> 9; +				be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; -		if (hostdata->madapter_info.os_type == 3 && +		if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&  		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {  			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",  				hostdata->madapter_info.srp_version); @@ -1073,7 +1407,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)  			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;  		} -		if (hostdata->madapter_info.os_type == 3) { +		if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {  			enable_fast_fail(hostdata);  			return;  		} @@ -1108,9 +1442,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)  	req = &evt_struct->iu.mad.adapter_info;  	memset(req, 0x00, sizeof(*req)); -	req->common.type = VIOSRP_ADAPTER_INFO_TYPE; -	req->common.length = sizeof(hostdata->madapter_info); -	req->buffer = hostdata->adapter_info_addr; +	req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); +	req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); +	req->buffer = cpu_to_be64(hostdata->adapter_info_addr);  	spin_lock_irqsave(hostdata->host->host_lock, flags);  	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -1195,7 +1529,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)  		/* Set up an abort SRP command */  		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));  		tsk_mgmt->opcode = SRP_TSK_MGMT; -		tsk_mgmt->lun = ((u64) lun) << 48; +		tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);  		tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;  		tsk_mgmt->task_tag = (u64) found_evt; @@ -1318,7 +1652,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)  		/* Set up a lun reset SRP command */  		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));  		tsk_mgmt->opcode = SRP_TSK_MGMT; -		tsk_mgmt->lun = ((u64) lun) << 48; +		tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);  		tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;  		evt->sync_srp = &srp_rsp; @@ -1424,21 +1758,22 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)   * @hostdata:	ibmvscsi_host_data of host   *  */ -void ibmvscsi_handle_crq(struct viosrp_crq *crq, -			 struct ibmvscsi_host_data *hostdata) +static void ibmvscsi_handle_crq(struct viosrp_crq *crq, +				struct ibmvscsi_host_data *hostdata)  {  	long rc;  	unsigned long flags; +	/* The hypervisor copies our tag value here so no byteswapping */  	struct srp_event_struct *evt_struct = -	    (struct srp_event_struct *)crq->IU_data_ptr; +			(__force struct srp_event_struct *)crq->IU_data_ptr;  	switch (crq->valid) {  	case 0xC0:		/* initialization */  		switch (crq->format) {  		case 0x01:	/* Initialization message */  			dev_info(hostdata->dev, "partner initialized\n");  			/* Send back a response */ -			if ((rc = ibmvscsi_ops->send_crq(hostdata, -							 0xC002000000000000LL, 0)) == 0) { +			rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0); +			if (rc == 0) {  				/* Now login */  				init_adapter(hostdata);  			} else { @@ -1486,18 +1821,18 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,  	 */  	if (!valid_event_struct(&hostdata->pool, evt_struct)) {  		dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", -		       (void *)crq->IU_data_ptr); +		       evt_struct);  		return;  	}  	if (atomic_read(&evt_struct->free)) {  		dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", -			(void *)crq->IU_data_ptr); +			evt_struct);  		return;  	}  	if (crq->format == VIOSRP_SRP_FORMAT) -		atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, +		atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),  			   &hostdata->request_limit);  	del_timer(&evt_struct->timer); @@ -1545,15 +1880,16 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,  	host_config = &evt_struct->iu.mad.host_config; +	/* The transport length field is only 16-bit */ +	length = min(0xffff, length); +  	/* Set up a lun reset SRP command */  	memset(host_config, 0x00, sizeof(*host_config)); -	host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; -	host_config->common.length = length; -	host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, -						    length, -						    DMA_BIDIRECTIONAL); +	host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); +	host_config->common.length = cpu_to_be16(length); +	addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); -	if (dma_mapping_error(hostdata->dev, host_config->buffer)) { +	if (dma_mapping_error(hostdata->dev, addr)) {  		if (!firmware_has_feature(FW_FEATURE_CMO))  			dev_err(hostdata->dev,  			        "dma_mapping error getting host config\n"); @@ -1561,6 +1897,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,  		return -1;  	} +	host_config->buffer = cpu_to_be64(addr); +  	init_completion(&evt_struct->comp);  	spin_lock_irqsave(hostdata->host->host_lock, flags);  	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); @@ -1590,8 +1928,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)  		sdev->allow_restart = 1;  		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);  	} -	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);  	spin_unlock_irqrestore(shost->host_lock, lock_flags); +	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);  	return 0;  } @@ -1844,18 +2182,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)  		smp_rmb();  		hostdata->reset_crq = 0; -		rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); +		rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);  		if (!rc) -			rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); -		if (!rc) -			rc = vio_enable_interrupts(to_vio_dev(hostdata->dev)); +			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); +		vio_enable_interrupts(to_vio_dev(hostdata->dev));  	} else if (hostdata->reenable_crq) {  		smp_rmb();  		action = "enable"; -		rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata); +		rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);  		hostdata->reenable_crq = 0;  		if (!rc) -			rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); +			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);  	} else  		return; @@ -1887,7 +2224,7 @@ static int ibmvscsi_work(void *data)  	struct ibmvscsi_host_data *hostdata = data;  	int rc; -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	while (1) {  		rc = wait_event_interruptible(hostdata->work_wait_q, @@ -1949,7 +2286,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)  		goto init_crq_failed;  	} -	rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); +	rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);  	if (rc != 0 && rc != H_RESOURCE) {  		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);  		goto kill_kthread; @@ -1979,7 +2316,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)  	 * to fail if the other end is not acive.  In that case we don't  	 * want to scan  	 */ -	if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0 +	if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0  	    || rc == H_RESOURCE) {  		/*  		 * Wait around max init_timeout secs for the adapter to finish @@ -2007,7 +2344,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)        add_host_failed:  	release_event_pool(&hostdata->pool, hostdata);        init_pool_failed: -	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); +	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);        kill_kthread:        kthread_stop(hostdata->work_thread);        init_crq_failed: @@ -2023,7 +2360,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)  	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);  	unmap_persist_bufs(hostdata);  	release_event_pool(&hostdata->pool, hostdata); -	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, +	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,  					max_events);  	kthread_stop(hostdata->work_thread); @@ -2044,14 +2381,17 @@ static int ibmvscsi_remove(struct vio_dev *vdev)  static int ibmvscsi_resume(struct device *dev)  {  	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); -	return ibmvscsi_ops->resume(hostdata); +	vio_disable_interrupts(to_vio_dev(hostdata->dev)); +	tasklet_schedule(&hostdata->srp_task); + +	return 0;  }  /**   * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we    * support.   */ -static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { +static struct vio_device_id ibmvscsi_device_table[] = {  	{"vscsi", "IBM,v-scsi"},  	{ "", "" }  }; @@ -2066,11 +2406,8 @@ static struct vio_driver ibmvscsi_driver = {  	.probe = ibmvscsi_probe,  	.remove = ibmvscsi_remove,  	.get_desired_dma = ibmvscsi_get_desired_dma, -	.driver = { -		.name = "ibmvscsi", -		.owner = THIS_MODULE, -		.pm = &ibmvscsi_pm_ops, -	} +	.name = "ibmvscsi", +	.pm = &ibmvscsi_pm_ops,  };  static struct srp_function_template ibmvscsi_transport_functions = { @@ -2084,11 +2421,7 @@ int __init ibmvscsi_module_init(void)  	driver_template.can_queue = max_requests;  	max_events = max_requests + 2; -	if (firmware_has_feature(FW_FEATURE_ISERIES)) -		ibmvscsi_ops = &iseriesvscsi_ops; -	else if (firmware_has_feature(FW_FEATURE_VIO)) -		ibmvscsi_ops = &rpavscsi_ops; -	else +	if (!firmware_has_feature(FW_FEATURE_VIO))  		return -ENODEV;  	ibmvscsi_transport_template =  | 
