aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/ibmvscsi/ibmvfc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c657
1 files changed, 419 insertions, 238 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e577e2fee3..8dd47689d58 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
@@ -49,7 +50,6 @@ static unsigned int max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
-static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static LIST_HEAD(ibmvfc_head);
@@ -83,11 +83,6 @@ MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable driver debug information. "
"[Default=" __stringify(IBMVFC_DEBUG) "]");
-module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
- "transport should insulate the loss of a remote port. Once this "
- "value is exceeded, the scsi target is removed. "
- "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
module_param_named(log_level, log_level, uint, 0);
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
"[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
@@ -432,6 +427,9 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
{
switch (tgt->action) {
case IBMVFC_TGT_ACTION_DEL_RPORT:
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ tgt->action = action;
+ case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
@@ -503,12 +501,23 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
- case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
+ break;
+ default:
+ vhost->action = action;
+ break;
+ };
+ break;
+ case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
default:
vhost->action = action;
break;
@@ -640,7 +649,7 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
**/
static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
- long rc;
+ long rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq_queue *crq = &vhost->crq;
@@ -648,6 +657,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
free_irq(vdev->irq, vhost);
tasklet_kill(&vhost->tasklet);
do {
+ if (rc)
+ msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -666,11 +677,13 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
**/
static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
{
- int rc;
+ int rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
/* Re-enable the CRQ */
do {
+ if (rc)
+ msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -689,15 +702,19 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
**/
static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
{
- int rc;
+ int rc = 0;
+ unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq_queue *crq = &vhost->crq;
/* Close the CRQ */
do {
+ if (rc)
+ msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
@@ -715,6 +732,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rc;
}
@@ -820,17 +838,9 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
**/
static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
{
- int rc;
-
- scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_ERROR);
- if ((rc = ibmvfc_reset_crq(vhost)) ||
- (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
- dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- } else
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
}
/**
@@ -1156,7 +1166,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
- struct device_node *of_node = vhost->dev->archdata.of_node;
+ struct device_node *of_node = vhost->dev->of_node;
const char *location;
memset(login_info, 0, sizeof(*login_info));
@@ -1596,7 +1606,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
+static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done) (struct scsi_cmnd *))
{
struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
@@ -1662,6 +1672,8 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
return 0;
}
+static DEF_SCSI_QCMD(ibmvfc_queuecommand)
+
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
@@ -2023,95 +2035,108 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
}
/**
- * ibmvfc_abort_task_set - Abort outstanding commands to the device
- * @sdev: scsi device to abort commands
- *
- * This sends an Abort Task Set to the VIOS for the specified device. This does
- * NOT send any cancel to the VIOS. That must be done separately.
+ * ibmvfc_match_rport - Match function for specified remote port
+ * @evt: ibmvfc event struct
+ * @device: device to match (rport)
*
* Returns:
- * 0 on success / other on failure
+ * 1 if event matches rport / 0 if event does not match rport
**/
-static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
{
- struct ibmvfc_host *vhost = shost_priv(sdev->host);
- struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
- struct ibmvfc_cmd *tmf;
- struct ibmvfc_event *evt, *found_evt;
- union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
- int rsp_rc = -EBUSY;
- unsigned long flags;
- int rsp_code = 0;
+ struct fc_rport *cmd_rport;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (evt->cmnd && evt->cmnd->device == sdev) {
- found_evt = evt;
- break;
- }
- }
-
- if (!found_evt) {
- if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
- sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return 0;
- }
-
- if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
-
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
- tmf->resp.len = sizeof(tmf->rsp);
- tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
- tmf->payload_len = sizeof(tmf->iu);
- tmf->resp_len = sizeof(tmf->rsp);
- tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->tgt_scsi_id = rport->port_id;
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
- tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
- tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
- evt->sync_iu = &rsp_iu;
-
- init_completion(&evt->comp);
- rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ if (evt->cmnd) {
+ cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
+ if (cmd_rport == rport)
+ return 1;
}
+ return 0;
+}
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
+/**
+ * ibmvfc_match_target - Match function for specified target
+ * @evt: ibmvfc event struct
+ * @device: device to match (starget)
+ *
+ * Returns:
+ * 1 if event matches starget / 0 if event does not match starget
+ **/
+static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+ return 1;
+ return 0;
+}
- if (rsp_rc != 0) {
- sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
- return -EIO;
- }
+/**
+ * ibmvfc_match_lun - Match function for specified LUN
+ * @evt: ibmvfc event struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if event matches sdev / 0 if event does not match sdev
+ **/
+static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && evt->cmnd->device == device)
+ return 1;
+ return 0;
+}
- sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
- wait_for_completion(&evt->comp);
+/**
+ * ibmvfc_wait_for_ops - Wait for ops to complete
+ * @vhost: ibmvfc host struct
+ * @device: device to match (starget or sdev)
+ * @match: match function
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
+ int (*match) (struct ibmvfc_event *, void *))
+{
+ struct ibmvfc_event *evt;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int wait;
+ unsigned long flags;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
- if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ ENTER;
+ do {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
- if (rsp_code) {
- if (fc_rsp->flags & FCP_RSP_LEN_VALID)
- rsp_code = fc_rsp->data.info.rsp_code;
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
- sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
- "flags: %x fcp_rsp: %x, scsi_status: %x\n",
- ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
- fc_rsp->scsi_status);
- rsp_rc = -EIO;
- } else
- sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+ if (!timeout) {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (wait)
+ dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
- spin_lock_irqsave(vhost->host->host_lock, flags);
- ibmvfc_free_event(evt);
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return rsp_rc;
+ LEAVE;
+ return SUCCESS;
}
/**
@@ -2154,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
return 0;
}
- if (vhost->state == IBMVFC_ACTIVE) {
+ if (vhost->logged_in) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
@@ -2165,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->lun);
- tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+ if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+ type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+ if (vhost->state == IBMVFC_ACTIVE)
+ tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+ else
+ tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (unsigned long)starget->hostdata;
@@ -2178,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
- return -EIO;
+ /* If failure is received, the host adapter is most likely going
+ through reset, return success so the caller will wait for the command
+ being cancelled to get returned */
+ return 0;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2191,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
- return -EIO;
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ return 0;
+ default:
+ return -EIO;
+ };
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
@@ -2199,88 +2240,158 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
- * ibmvfc_match_target - Match function for specified target
+ * ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
- * @device: device to match (starget)
+ * @key: cancel key to match
*
* Returns:
- * 1 if event matches starget / 0 if event does not match starget
+ * 1 if event matches key / 0 if event does not match key
**/
-static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
{
- if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+ unsigned long cancel_key = (unsigned long)key;
+
+ if (evt->crq.format == IBMVFC_CMD_FORMAT &&
+ evt->iu.cmd.cancel_key == cancel_key)
return 1;
return 0;
}
/**
- * ibmvfc_match_lun - Match function for specified LUN
+ * ibmvfc_match_evt - Match function for specified event
* @evt: ibmvfc event struct
- * @device: device to match (sdev)
+ * @match: event to match
*
* Returns:
- * 1 if event matches sdev / 0 if event does not match sdev
+ * 1 if event matches key / 0 if event does not match key
**/
-static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
{
- if (evt->cmnd && evt->cmnd->device == device)
+ if (evt == match)
return 1;
return 0;
}
/**
- * ibmvfc_wait_for_ops - Wait for ops to complete
- * @vhost: ibmvfc host struct
- * @device: device to match (starget or sdev)
- * @match: match function
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev: scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
*
* Returns:
- * SUCCESS / FAILED
+ * 0 on success / other on failure
**/
-static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
- int (*match) (struct ibmvfc_event *, void *))
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
{
- struct ibmvfc_event *evt;
- DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
- unsigned long flags;
- signed long timeout = init_timeout * HZ;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rc, rsp_rc = -EBUSY;
+ unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
+ int rsp_code = 0;
- ENTER;
- do {
- wait = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
- }
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
}
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
- if (wait) {
- timeout = wait_for_completion_timeout(&comp, timeout);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
- if (!timeout) {
- wait = 0;
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+ tmf->resp.len = sizeof(tmf->rsp);
+ tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+ tmf->payload_len = sizeof(tmf->iu);
+ tmf->resp_len = sizeof(tmf->rsp);
+ tmf->cancel_key = (unsigned long)sdev->hostdata;
+ tmf->tgt_scsi_id = rport->port_id;
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+ timeout = wait_for_completion_timeout(&evt->comp, timeout);
+
+ if (!timeout) {
+ rc = ibmvfc_cancel_all(sdev, 0);
+ if (!rc) {
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+ if (rc == SUCCESS)
+ rc = 0;
+ }
+
+ if (rc) {
+ sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
+ ibmvfc_reset_host(vhost);
+ rsp_rc = -EIO;
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+
+ if (rc == SUCCESS)
+ rsp_rc = 0;
+
+ rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
+ if (rc != SUCCESS) {
spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
- }
- }
+ ibmvfc_hard_reset_host(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- if (wait)
- dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
- LEAVE;
- return wait ? FAILED : SUCCESS;
+ rsp_rc = 0;
}
+
+ goto out;
}
- } while (wait);
+ }
- LEAVE;
- return SUCCESS;
+ if (rsp_iu.cmd.status)
+ rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+ if (rsp_code) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+ ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+out:
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
}
/**
@@ -2288,23 +2399,30 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
* @cmd: scsi command to abort
*
* Returns:
- * SUCCESS / FAILED
+ * SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
- int cancel_rc, abort_rc;
+ int cancel_rc, block_rc;
int rc = FAILED;
ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
- cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
- abort_rc = ibmvfc_abort_task_set(sdev);
+ if (block_rc != FAST_IO_FAIL) {
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ ibmvfc_abort_task_set(sdev);
+ } else
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
- if (!cancel_rc && !abort_rc)
+ if (!cancel_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
LEAVE;
return rc;
}
@@ -2314,37 +2432,44 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
* @cmd: scsi command struct
*
* Returns:
- * SUCCESS / FAILED
+ * SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
- int cancel_rc, reset_rc;
+ int cancel_rc, block_rc, reset_rc = 0;
int rc = FAILED;
ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
- cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
- reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+ if (block_rc != FAST_IO_FAIL) {
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+ } else
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
LEAVE;
return rc;
}
/**
- * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
*
**/
-static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
- *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
}
/**
@@ -2360,41 +2485,37 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
}
/**
- * ibmvfc_dev_abort_all - Device iterated abort task set function
- * @sdev: scsi device struct
- * @data: return code
- *
- **/
-static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
-{
- unsigned long *rc = data;
- *rc |= ibmvfc_abort_task_set(sdev);
-}
-
-/**
* ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct
*
* Returns:
- * SUCCESS / FAILED
+ * SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
- int reset_rc;
+ int block_rc;
+ int reset_rc = 0;
int rc = FAILED;
unsigned long cancel_rc = 0;
ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
- reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+ if (block_rc != FAST_IO_FAIL) {
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+ } else
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
LEAVE;
return rc;
}
@@ -2406,11 +2527,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
**/
static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int rc;
+ int rc, block_rc;
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+ block_rc = fc_block_scsi_eh(cmd);
dev_err(vhost->dev, "Resetting connection due to error recovery\n");
rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+ if (block_rc == FAST_IO_FAIL)
+ return FAST_IO_FAIL;
+
return rc ? FAILED : SUCCESS;
}
@@ -2423,60 +2549,87 @@ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
**/
static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
{
- struct scsi_target *starget = to_scsi_target(&rport->dev);
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct Scsi_Host *shost = rport_to_shost(rport);
struct ibmvfc_host *vhost = shost_priv(shost);
- unsigned long cancel_rc = 0;
- unsigned long abort_rc = 0;
- int rc = FAILED;
+ struct fc_rport *dev_rport;
+ struct scsi_device *sdev;
+ unsigned long rc;
ENTER;
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
- starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
+ shost_for_each_device(sdev, shost) {
+ dev_rport = starget_to_rport(scsi_target(sdev));
+ if (dev_rport != rport)
+ continue;
+ ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+ }
- if (!cancel_rc && !abort_rc)
- rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
+ rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
LEAVE;
}
-static const struct {
- enum ibmvfc_async_event ae;
- const char *desc;
-} ae_desc [] = {
- { IBMVFC_AE_ELS_PLOGI, "PLOGI" },
- { IBMVFC_AE_ELS_LOGO, "LOGO" },
- { IBMVFC_AE_ELS_PRLO, "PRLO" },
- { IBMVFC_AE_SCN_NPORT, "N-Port SCN" },
- { IBMVFC_AE_SCN_GROUP, "Group SCN" },
- { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" },
- { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" },
- { IBMVFC_AE_LINK_UP, "Link Up" },
- { IBMVFC_AE_LINK_DOWN, "Link Down" },
- { IBMVFC_AE_LINK_DEAD, "Link Dead" },
- { IBMVFC_AE_HALT, "Halt" },
- { IBMVFC_AE_RESUME, "Resume" },
- { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
+static const struct ibmvfc_async_desc ae_desc [] = {
+ { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
};
-static const char *unknown_ae = "Unknown async";
+static const struct ibmvfc_async_desc unknown_ae = {
+ "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
+};
/**
* ibmvfc_get_ae_desc - Get text description for async event
* @ae: async event
*
**/
-static const char *ibmvfc_get_ae_desc(u64 ae)
+static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
{
int i;
for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
if (ae_desc[i].ae == ae)
- return ae_desc[i].desc;
+ return &ae_desc[i];
+
+ return &unknown_ae;
+}
+
+static const struct {
+ enum ibmvfc_ae_link_state state;
+ const char *desc;
+} link_desc [] = {
+ { IBMVFC_AE_LS_LINK_UP, " link up" },
+ { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
+ { IBMVFC_AE_LS_LINK_DOWN, " link down" },
+ { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
+};
+
+/**
+ * ibmvfc_get_link_state - Get text description for link state
+ * @state: link state
+ *
+ **/
+static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(link_desc); i++)
+ if (link_desc[i].state == state)
+ return link_desc[i].desc;
- return unknown_ae;
+ return "";
}
/**
@@ -2488,11 +2641,12 @@ static const char *ibmvfc_get_ae_desc(u64 ae)
static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_host *vhost)
{
- const char *desc = ibmvfc_get_ae_desc(crq->event);
+ const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event);
struct ibmvfc_target *tgt;
- ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
- " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
+ ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
+ " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
+ ibmvfc_get_link_state(crq->link_state));
switch (crq->event) {
case IBMVFC_AE_RESUME:
@@ -2521,8 +2675,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_SCN_FABRIC:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
- vhost->delay_init = 1;
- __ibmvfc_reset_host(vhost);
+ if (vhost->state < IBMVFC_HALTED) {
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ }
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
@@ -2605,22 +2761,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
dev_info(vhost->dev, "Re-enabling adapter\n");
vhost->client_migrated = 1;
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
- (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
- } else
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
} else {
dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
-
ibmvfc_purge_requests(vhost, DID_ERROR);
- if ((rc = ibmvfc_reset_crq(vhost)) ||
- (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
- } else
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
}
return;
case IBMVFC_CRQ_CMD_RSP:
@@ -2743,7 +2890,6 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
static int ibmvfc_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
- struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
@@ -2755,8 +2901,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
scsi_activate_tcq(sdev, sdev->queue_depth);
} else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
-
- rport->dev_loss_tmo = dev_loss_tmo;
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
@@ -2918,6 +3062,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -2927,7 +3072,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3012,6 +3157,7 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3034,6 +3180,7 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3082,12 +3229,14 @@ static void ibmvfc_tasklet(void *data)
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -3095,10 +3244,12 @@ static void ibmvfc_tasklet(void *data)
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
} else
done = 1;
}
@@ -4115,6 +4266,8 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
};
@@ -4176,11 +4329,15 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
}
if (rport) {
@@ -4212,6 +4369,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
struct ibmvfc_target *tgt;
unsigned long flags;
struct fc_rport *rport;
+ int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log);
spin_lock_irqsave(vhost->host->host_lock, flags);
@@ -4221,6 +4379,29 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_LOGO_WAIT:
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rc = ibmvfc_reset_crq(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (rc == H_CLOSED)
+ vio_enable_interrupts(to_vio_dev(vhost->dev));
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
+ break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rc = ibmvfc_reenable_crq_queue(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
+ break;
case IBMVFC_HOST_ACTION_LOGO:
vhost->job_step(vhost);
break;
@@ -4258,6 +4439,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
rport = tgt->rport;
tgt->rport = NULL;
list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
@@ -4333,7 +4515,7 @@ static int ibmvfc_work(void *data)
struct ibmvfc_host *vhost = data;
int rc;
- set_user_nice(current, -20);
+ set_user_nice(current, MIN_NICE);
while (1) {
rc = wait_event_interruptible(vhost->work_wait_q,
@@ -4663,6 +4845,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if ((rc = scsi_add_host(shost, dev)))
goto release_event_pool;
+ fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
+
if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
&ibmvfc_trace_attr))) {
dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
@@ -4771,7 +4955,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
-static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+static struct vio_device_id ibmvfc_device_table[] = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
};
@@ -4786,11 +4970,8 @@ static struct vio_driver ibmvfc_driver = {
.probe = ibmvfc_probe,
.remove = ibmvfc_remove,
.get_desired_dma = ibmvfc_get_desired_dma,
- .driver = {
- .name = IBMVFC_NAME,
- .owner = THIS_MODULE,
- .pm = &ibmvfc_pm_ops,
- }
+ .name = IBMVFC_NAME,
+ .pm = &ibmvfc_pm_ops,
};
static struct fc_function_template ibmvfc_transport_functions = {