aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/scsi')
-rw-r--r--drivers/s390/scsi/Makefile5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c641
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c346
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c252
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c1367
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h530
-rw-r--r--drivers/s390/scsi/zfcp_def.h601
-rw-r--r--drivers/s390/scsi/zfcp_erp.c1030
-rw-r--r--drivers/s390/scsi/zfcp_ext.h208
-rw-r--r--drivers/s390/scsi/zfcp_fc.c1023
-rw-r--r--drivers/s390/scsi/zfcp_fc.h297
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c1932
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h113
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c594
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h271
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c490
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c463
-rw-r--r--drivers/s390/scsi/zfcp_unit.c255
19 files changed, 5349 insertions, 5252 deletions
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index cb301cc6178..9259039e886 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -2,7 +2,8 @@
# Makefile for the S/390 specific device drivers
#
-zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
- zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
+ zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
+ zfcp_unit.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 3ac27ee4739..8004b071a9f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2013
*/
/*
@@ -23,6 +23,7 @@
* Christof Schmitt
* Martin Petermann
* Sven Schuetz
+ * Steffen Maier
*/
#define KMSG_COMPONENT "zfcp"
@@ -30,7 +31,11 @@
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/module.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -42,115 +47,76 @@ static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
-static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+ unsigned long size)
{
- int idx;
-
- adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
- GFP_KERNEL);
- if (!adapter->req_list)
- return -ENOMEM;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- INIT_LIST_HEAD(&adapter->req_list[idx]);
- return 0;
-}
-
-/**
- * zfcp_reqlist_isempty - is the request list empty
- * @adapter: pointer to struct zfcp_adapter
- *
- * Returns: true if list is empty, false otherwise
- */
-int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
-{
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- if (!list_empty(&adapter->req_list[idx]))
- return 0;
- return 1;
+ return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
+ struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
- struct zfcp_unit *unit;
- down(&zfcp_data.config_sema);
- read_lock_irq(&zfcp_data.config_lock);
- adapter = zfcp_get_adapter_by_busid(busid);
- if (adapter)
- zfcp_adapter_get(adapter);
- read_unlock_irq(&zfcp_data.config_lock);
+ cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (!cdev)
+ return;
+
+ if (ccw_device_set_online(cdev))
+ goto out_ccw_device;
+ adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
- goto out_adapter;
- port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
- if (IS_ERR(port))
+ goto out_ccw_device;
+
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ if (!port)
goto out_port;
- unit = zfcp_unit_enqueue(port, lun);
- if (IS_ERR(unit))
- goto out_unit;
- up(&zfcp_data.config_sema);
- ccw_device_set_online(adapter->ccw_device);
-
- zfcp_erp_wait(adapter);
- flush_work(&unit->scsi_work);
-
- down(&zfcp_data.config_sema);
- zfcp_unit_put(unit);
-out_unit:
- zfcp_port_put(port);
+ flush_work(&port->rport_work);
+
+ zfcp_unit_add(port, lun);
+ put_device(&port->dev);
+
out_port:
- zfcp_adapter_put(adapter);
-out_adapter:
- up(&zfcp_data.config_sema);
+ zfcp_ccw_adapter_put(adapter);
+out_ccw_device:
+ put_device(&cdev->dev);
return;
}
-static struct kmem_cache *zfcp_cache_create(int size, char *name)
-{
- int align = 1;
- while ((size - align) > 0)
- align <<= 1;
- return kmem_cache_create(name , size, align, 0, NULL);
-}
-
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
- char *str;
+ char *str, *str_saved;
char busid[ZFCP_BUS_ID_SIZE];
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
- str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
+ str_saved = kstrdup(devstr, GFP_KERNEL);
+ str = str_saved;
if (!str)
return;
- strcpy(str, devstr);
-
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strncpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
+ if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
+ if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
- kfree(str);
+ kfree(str_saved);
zfcp_init_device_configure(busid, wwpn, lun);
return;
- err_out:
- kfree(str);
+err_out:
+ kfree(str_saved);
pr_err("%s is not a valid SCSI device\n", devstr);
}
@@ -158,38 +124,24 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
- zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
- sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
- if (!zfcp_data.fsf_req_qtcb_cache)
- goto out;
+ zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+ sizeof(struct fsf_qtcb));
+ if (!zfcp_fsf_qtcb_cache)
+ goto out_qtcb_cache;
- zfcp_data.sr_buffer_cache = zfcp_cache_create(
- sizeof(struct fsf_status_read_buffer), "zfcp_sr");
- if (!zfcp_data.sr_buffer_cache)
- goto out_sr_cache;
+ zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+ sizeof(struct zfcp_fc_req));
+ if (!zfcp_fc_req_cache)
+ goto out_fc_cache;
- zfcp_data.gid_pn_cache = zfcp_cache_create(
- sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
- if (!zfcp_data.gid_pn_cache)
- goto out_gid_cache;
-
- zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
-
- sema_init(&zfcp_data.config_sema, 1);
- rwlock_init(&zfcp_data.config_lock);
-
- zfcp_data.scsi_transport_template =
+ zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
- if (!zfcp_data.scsi_transport_template)
+ if (!zfcp_scsi_transport_template)
goto out_transport;
+ scsi_transport_reserve_device(zfcp_scsi_transport_template,
+ sizeof(struct zfcp_scsi_dev));
- retval = misc_register(&zfcp_cfdc_misc);
- if (retval) {
- pr_err("Registering the misc device zfcp_cfdc failed\n");
- goto out_misc;
- }
-
- retval = zfcp_ccw_register();
+ retval = ccw_driver_register(&zfcp_ccw_driver);
if (retval) {
pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
@@ -201,39 +153,27 @@ static int __init zfcp_module_init(void)
return 0;
out_ccw_register:
- misc_deregister(&zfcp_cfdc_misc);
-out_misc:
- fc_release_transport(zfcp_data.scsi_transport_template);
+ fc_release_transport(zfcp_scsi_transport_template);
out_transport:
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
- kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
-out:
+ kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
+out_qtcb_cache:
return retval;
}
module_init(zfcp_module_init);
-/**
- * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
- * @port: pointer to port to search for unit
- * @fcp_lun: FCP LUN to search for
- *
- * Returns: pointer to zfcp_unit or NULL
- */
-struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
+static void __exit zfcp_module_exit(void)
{
- struct zfcp_unit *unit;
-
- list_for_each_entry(unit, &port->unit_list_head, list)
- if ((unit->fcp_lun == fcp_lun) &&
- !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
- return unit;
- return NULL;
+ ccw_driver_unregister(&zfcp_ccw_driver);
+ fc_release_transport(zfcp_scsi_transport_template);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
+module_exit(zfcp_module_exit);
+
/**
* zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
* @adapter: pointer to adapter to search for port
@@ -244,146 +184,63 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
u64 wwpn)
{
+ unsigned long flags;
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
- if ((port->wwpn == wwpn) &&
- !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE))
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
+ if (port->wwpn == wwpn) {
+ if (!get_device(&port->dev))
+ port = NULL;
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
return NULL;
}
-static void zfcp_sysfs_unit_release(struct device *dev)
-{
- kfree(container_of(dev, struct zfcp_unit, sysfs_device));
-}
-
-/**
- * zfcp_unit_enqueue - enqueue unit to unit list of a port.
- * @port: pointer to port where unit is added
- * @fcp_lun: FCP LUN of unit to be enqueued
- * Returns: pointer to enqueued unit on success, ERR_PTR on error
- * Locks: config_sema must be held to serialize changes to the unit list
- *
- * Sets up some unit internal structures and creates sysfs entry.
- */
-struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
-{
- struct zfcp_unit *unit;
-
- unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
- if (!unit)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&unit->refcount, 0);
- init_waitqueue_head(&unit->remove_wq);
- INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
-
- unit->port = port;
- unit->fcp_lun = fcp_lun;
-
- dev_set_name(&unit->sysfs_device, "0x%016llx",
- (unsigned long long) fcp_lun);
- unit->sysfs_device.parent = &port->sysfs_device;
- unit->sysfs_device.release = zfcp_sysfs_unit_release;
- dev_set_drvdata(&unit->sysfs_device, unit);
-
- /* mark unit unusable as long as sysfs registration is not complete */
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
-
- spin_lock_init(&unit->latencies.lock);
- unit->latencies.write.channel.min = 0xFFFFFFFF;
- unit->latencies.write.fabric.min = 0xFFFFFFFF;
- unit->latencies.read.channel.min = 0xFFFFFFFF;
- unit->latencies.read.fabric.min = 0xFFFFFFFF;
- unit->latencies.cmd.channel.min = 0xFFFFFFFF;
- unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
-
- read_lock_irq(&zfcp_data.config_lock);
- if (zfcp_get_unit_by_lun(port, fcp_lun)) {
- read_unlock_irq(&zfcp_data.config_lock);
- goto err_out_free;
- }
- read_unlock_irq(&zfcp_data.config_lock);
-
- if (device_register(&unit->sysfs_device))
- goto err_out_free;
-
- if (sysfs_create_group(&unit->sysfs_device.kobj,
- &zfcp_sysfs_unit_attrs)) {
- device_unregister(&unit->sysfs_device);
- return ERR_PTR(-EIO);
- }
-
- zfcp_unit_get(unit);
-
- write_lock_irq(&zfcp_data.config_lock);
- list_add_tail(&unit->list, &port->unit_list_head);
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
- atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
-
- write_unlock_irq(&zfcp_data.config_lock);
-
- zfcp_port_get(port);
-
- return unit;
-
-err_out_free:
- kfree(unit);
- return ERR_PTR(-EINVAL);
-}
-
-/**
- * zfcp_unit_dequeue - dequeue unit
- * @unit: pointer to zfcp_unit
- *
- * waits until all work is done on unit and removes it then from the unit->list
- * of the associated port.
- */
-void zfcp_unit_dequeue(struct zfcp_unit *unit)
-{
- wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
- write_lock_irq(&zfcp_data.config_lock);
- list_del(&unit->list);
- write_unlock_irq(&zfcp_data.config_lock);
- zfcp_port_put(unit->port);
- sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
- device_unregister(&unit->sysfs_device);
-}
-
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
- /* must only be called with zfcp_data.config_sema taken */
- adapter->pool.fsf_req_erp =
- mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
- if (!adapter->pool.fsf_req_erp)
+ adapter->pool.erp_req =
+ mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+ if (!adapter->pool.erp_req)
+ return -ENOMEM;
+
+ adapter->pool.gid_pn_req =
+ mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+ if (!adapter->pool.gid_pn_req)
return -ENOMEM;
- adapter->pool.fsf_req_scsi =
- mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
- if (!adapter->pool.fsf_req_scsi)
+ adapter->pool.scsi_req =
+ mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+ if (!adapter->pool.scsi_req)
return -ENOMEM;
- adapter->pool.fsf_req_abort =
- mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
- if (!adapter->pool.fsf_req_abort)
+ adapter->pool.scsi_abort =
+ mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+ if (!adapter->pool.scsi_abort)
return -ENOMEM;
- adapter->pool.fsf_req_status_read =
+ adapter->pool.status_read_req =
mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
sizeof(struct zfcp_fsf_req));
- if (!adapter->pool.fsf_req_status_read)
+ if (!adapter->pool.status_read_req)
+ return -ENOMEM;
+
+ adapter->pool.qtcb_pool =
+ mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
+ if (!adapter->pool.qtcb_pool)
return -ENOMEM;
- adapter->pool.data_status_read =
- mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
- zfcp_data.sr_buffer_cache);
- if (!adapter->pool.data_status_read)
+ BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+ adapter->pool.sr_data =
+ mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+ if (!adapter->pool.sr_data)
return -ENOMEM;
- adapter->pool.data_gid_pn =
- mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
- if (!adapter->pool.data_gid_pn)
+ adapter->pool.gid_pn =
+ mempool_create_slab_pool(1, zfcp_fc_req_cache);
+ if (!adapter->pool.gid_pn)
return -ENOMEM;
return 0;
@@ -391,19 +248,20 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
- /* zfcp_data.config_sema must be held */
- if (adapter->pool.fsf_req_erp)
- mempool_destroy(adapter->pool.fsf_req_erp);
- if (adapter->pool.fsf_req_scsi)
- mempool_destroy(adapter->pool.fsf_req_scsi);
- if (adapter->pool.fsf_req_abort)
- mempool_destroy(adapter->pool.fsf_req_abort);
- if (adapter->pool.fsf_req_status_read)
- mempool_destroy(adapter->pool.fsf_req_status_read);
- if (adapter->pool.data_status_read)
- mempool_destroy(adapter->pool.data_status_read);
- if (adapter->pool.data_gid_pn)
- mempool_destroy(adapter->pool.data_gid_pn);
+ if (adapter->pool.erp_req)
+ mempool_destroy(adapter->pool.erp_req);
+ if (adapter->pool.scsi_req)
+ mempool_destroy(adapter->pool.scsi_req);
+ if (adapter->pool.scsi_abort)
+ mempool_destroy(adapter->pool.scsi_abort);
+ if (adapter->pool.qtcb_pool)
+ mempool_destroy(adapter->pool.qtcb_pool);
+ if (adapter->pool.status_read_req)
+ mempool_destroy(adapter->pool.status_read_req);
+ if (adapter->pool.sr_data)
+ mempool_destroy(adapter->pool.sr_data);
+ if (adapter->pool.gid_pn)
+ mempool_destroy(adapter->pool.gid_pn);
}
/**
@@ -418,10 +276,10 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
- if (zfcp_fsf_status_read(adapter)) {
- if (atomic_read(&adapter->stat_miss) >= 16) {
- zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
- NULL);
+ if (zfcp_fsf_status_read(adapter->qdio)) {
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
+ zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
@@ -446,137 +304,165 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
adapter->fsf_lic_version);
}
+static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+ char name[TASK_COMM_LEN];
+
+ snprintf(name, sizeof(name), "zfcp_q_%s",
+ dev_name(&adapter->ccw_device->dev));
+ adapter->work_queue = create_singlethread_workqueue(name);
+
+ if (adapter->work_queue)
+ return 0;
+ return -ENOMEM;
+}
+
+static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+ if (adapter->work_queue)
+ destroy_workqueue(adapter->work_queue);
+ adapter->work_queue = NULL;
+
+}
+
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
- * Returns: 0 if a new adapter was successfully enqueued
- * -ENOMEM if alloc failed
+ * Returns: struct zfcp_adapter*
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
- * locks: config_sema must be held to serialise changes to the adapter list
*/
-int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
- /*
- * Note: It is safe to release the list_lock, as any list changes
- * are protected by the config_sema, which must be held to get here
- */
+ if (!get_device(&ccw_device->dev))
+ return ERR_PTR(-ENODEV);
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
- if (!adapter)
- return -ENOMEM;
+ if (!adapter) {
+ put_device(&ccw_device->dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&adapter->ref);
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
- atomic_set(&adapter->refcount, 0);
- if (zfcp_qdio_allocate(adapter))
- goto qdio_allocate_failed;
+ INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+ INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
+
+ if (zfcp_qdio_setup(adapter))
+ goto failed;
if (zfcp_allocate_low_mem_buffers(adapter))
- goto failed_low_mem_buffers;
+ goto failed;
- if (zfcp_reqlist_alloc(adapter))
- goto failed_low_mem_buffers;
+ adapter->req_list = zfcp_reqlist_alloc();
+ if (!adapter->req_list)
+ goto failed;
- if (zfcp_adapter_debug_register(adapter))
- goto debug_register_failed;
+ if (zfcp_dbf_adapter_register(adapter))
+ goto failed;
- init_waitqueue_head(&adapter->remove_wq);
- init_waitqueue_head(&adapter->erp_thread_wqh);
+ if (zfcp_setup_adapter_work_queue(adapter))
+ goto failed;
+
+ if (zfcp_fc_gs_setup(adapter))
+ goto failed;
+
+ rwlock_init(&adapter->port_list_lock);
+ INIT_LIST_HEAD(&adapter->port_list);
+
+ INIT_LIST_HEAD(&adapter->events.list);
+ INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
+ spin_lock_init(&adapter->events.list_lock);
+
+ init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
- INIT_LIST_HEAD(&adapter->port_list_head);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
- spin_lock_init(&adapter->req_list_lock);
-
- spin_lock_init(&adapter->hba_dbf_lock);
- spin_lock_init(&adapter->san_dbf_lock);
- spin_lock_init(&adapter->scsi_dbf_lock);
- spin_lock_init(&adapter->rec_dbf_lock);
- spin_lock_init(&adapter->req_q_lock);
- spin_lock_init(&adapter->qdio_stat_lock);
-
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
- sema_init(&adapter->erp_ready_sem, 0);
-
- INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
- INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
+ if (zfcp_erp_thread_setup(adapter))
+ goto failed;
adapter->service_level.seq_print = zfcp_print_sl;
- /* mark adapter unusable as long as sysfs registration is not complete */
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-
dev_set_drvdata(&ccw_device->dev, adapter);
if (sysfs_create_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs))
- goto sysfs_failed;
+ goto failed;
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
+ /* report size limit per scatter-gather segment */
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
- zfcp_fc_nameserver_init(adapter);
+ adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
- if (!zfcp_adapter_scsi_register(adapter))
- return 0;
+ if (!zfcp_scsi_adapter_register(adapter))
+ return adapter;
-sysfs_failed:
- zfcp_adapter_debug_unregister(adapter);
-debug_register_failed:
- dev_set_drvdata(&ccw_device->dev, NULL);
- kfree(adapter->req_list);
-failed_low_mem_buffers:
- zfcp_free_low_mem_buffers(adapter);
-qdio_allocate_failed:
- zfcp_qdio_free(adapter);
- kfree(adapter);
- return -ENOMEM;
+failed:
+ zfcp_adapter_unregister(adapter);
+ return ERR_PTR(-ENOMEM);
+}
+
+void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
+{
+ struct ccw_device *cdev = adapter->ccw_device;
+
+ cancel_work_sync(&adapter->scan_work);
+ cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
+ zfcp_destroy_adapter_work_queue(adapter);
+
+ zfcp_fc_wka_ports_force_offline(adapter->gs);
+ zfcp_scsi_adapter_unregister(adapter);
+ sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+
+ zfcp_erp_thread_kill(adapter);
+ zfcp_dbf_adapter_unregister(adapter);
+ zfcp_qdio_destroy(adapter->qdio);
+
+ zfcp_ccw_adapter_put(adapter); /* final put to release */
}
/**
- * zfcp_adapter_dequeue - remove the adapter from the resource list
- * @adapter: pointer to struct zfcp_adapter which should be removed
+ * zfcp_adapter_release - remove the adapter from the resource list
+ * @ref: pointer to struct kref
* locks: adapter list write lock is assumed to be held by caller
*/
-void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
+void zfcp_adapter_release(struct kref *ref)
{
- int retval = 0;
- unsigned long flags;
+ struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
+ ref);
+ struct ccw_device *cdev = adapter->ccw_device;
- cancel_work_sync(&adapter->scan_work);
- cancel_work_sync(&adapter->stat_work);
- zfcp_adapter_scsi_unregister(adapter);
- sysfs_remove_group(&adapter->ccw_device->dev.kobj,
- &zfcp_sysfs_adapter_attrs);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
- /* sanity check: no pending FSF requests */
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- retval = zfcp_reqlist_isempty(adapter);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
- if (!retval)
- return;
-
- zfcp_adapter_debug_unregister(adapter);
- zfcp_qdio_free(adapter);
+ zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter);
+ put_device(&cdev->dev);
}
-static void zfcp_sysfs_port_release(struct device *dev)
+static void zfcp_port_release(struct device *dev)
{
- kfree(container_of(dev, struct zfcp_port, sysfs_device));
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+ zfcp_ccw_adapter_put(port->adapter);
+ kfree(port);
}
/**
@@ -586,7 +472,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
- * Locks: config_sema must be held to serialize changes to the port list
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
@@ -596,15 +481,26 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
- int retval;
+ int retval = -ENOMEM;
+
+ kref_get(&adapter->ref);
+
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ if (port) {
+ put_device(&port->dev);
+ retval = -EEXIST;
+ goto err_out;
+ }
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
- return ERR_PTR(-ENOMEM);
+ goto err_out;
+
+ rwlock_init(&port->unit_list_lock);
+ INIT_LIST_HEAD(&port->unit_list);
+ atomic_set(&port->units, 0);
- init_waitqueue_head(&port->remove_wq);
- INIT_LIST_HEAD(&port->unit_list_head);
- INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
+ INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -612,69 +508,32 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
+ port->dev.parent = &adapter->ccw_device->dev;
+ port->dev.groups = zfcp_port_attr_groups;
+ port->dev.release = zfcp_port_release;
- /* mark port unusable as long as sysfs registration is not complete */
- atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
- atomic_set(&port->refcount, 0);
-
- dev_set_name(&port->sysfs_device, "0x%016llx",
- (unsigned long long)wwpn);
- port->sysfs_device.parent = &adapter->ccw_device->dev;
-
- port->sysfs_device.release = zfcp_sysfs_port_release;
- dev_set_drvdata(&port->sysfs_device, port);
-
- read_lock_irq(&zfcp_data.config_lock);
- if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
- read_unlock_irq(&zfcp_data.config_lock);
- goto err_out_free;
+ if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+ kfree(port);
+ goto err_out;
}
- read_unlock_irq(&zfcp_data.config_lock);
-
- if (device_register(&port->sysfs_device))
- goto err_out_free;
-
- retval = sysfs_create_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_port_attrs);
+ retval = -EINVAL;
- if (retval) {
- device_unregister(&port->sysfs_device);
+ if (device_register(&port->dev)) {
+ put_device(&port->dev);
goto err_out;
}
- zfcp_port_get(port);
-
- write_lock_irq(&zfcp_data.config_lock);
- list_add_tail(&port->list, &adapter->port_list_head);
- atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
- atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
+ write_lock_irq(&adapter->port_list_lock);
+ list_add_tail(&port->list, &adapter->port_list);
+ write_unlock_irq(&adapter->port_list_lock);
- write_unlock_irq(&zfcp_data.config_lock);
+ atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
- zfcp_adapter_get(adapter);
return port;
-err_out_free:
- kfree(port);
err_out:
- return ERR_PTR(-EINVAL);
-}
-
-/**
- * zfcp_port_dequeue - dequeues a port from the port list of the adapter
- * @port: pointer to struct zfcp_port which should be removed
- */
-void zfcp_port_dequeue(struct zfcp_port *port)
-{
- wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
- write_lock_irq(&zfcp_data.config_lock);
- list_del(&port->list);
- write_unlock_irq(&zfcp_data.config_lock);
- if (port->rport)
- port->rport->dd_data = NULL;
- zfcp_adapter_put(port->adapter);
- sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
- device_unregister(&port->sysfs_device);
+ zfcp_ccw_adapter_put(adapter);
+ return ERR_PTR(retval);
}
/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 733fe3bf628..f9879d400d0 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,49 +3,98 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_MODEL_PRIV 0x4
+
+static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
+
+struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
+{
+ struct zfcp_adapter *adapter;
+ unsigned long flags;
+
+ spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+ adapter = dev_get_drvdata(&cdev->dev);
+ if (adapter)
+ kref_get(&adapter->ref);
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+ return adapter;
+}
+
+void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+ kref_put(&adapter->ref, zfcp_adapter_release);
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+}
+
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
+ zfcp_erp_clear_adapter_status(adapter, clear);
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ tag);
+ zfcp_erp_wait(adapter);
+ flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
+
+ zfcp_ccw_adapter_put(adapter);
+
+ return 0;
+}
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+ { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+ { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_probe - probe function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
- * This function gets called by the common i/o layer and sets up the initial
- * data structures for each fcp adapter, which was detected by the system.
- * Also the sysfs files for this adapter will be created by this function.
- * In addition the nameserver port will be added to the ports of the adapter
- * and its sysfs representation will be created too.
+ * This function gets called by the common i/o layer for each FCP
+ * device found on the current system. This is only a stub to make cio
+ * work: To only allocate adapter resources for devices actually used,
+ * the allocation is deferred to the first call to ccw_set_online.
*/
-static int zfcp_ccw_probe(struct ccw_device *ccw_device)
+static int zfcp_ccw_probe(struct ccw_device *cdev)
{
- int retval = 0;
-
- down(&zfcp_data.config_sema);
- if (zfcp_adapter_enqueue(ccw_device)) {
- dev_err(&ccw_device->dev,
- "Setting up data structures for the "
- "FCP adapter failed\n");
- retval = -EINVAL;
- }
- up(&zfcp_data.config_sema);
- return retval;
+ return 0;
}
/**
* zfcp_ccw_remove - remove function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and removes an adapter
* from the system. Task of this function is to get rid of all units and
* ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too.
*/
-static void zfcp_ccw_remove(struct ccw_device *ccw_device)
+static void zfcp_ccw_remove(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
@@ -53,134 +102,161 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
LIST_HEAD(unit_remove_lh);
LIST_HEAD(port_remove_lh);
- ccw_device_set_offline(ccw_device);
- down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(&ccw_device->dev);
+ ccw_device_set_offline(cdev);
+
+ adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ if (!adapter)
+ return;
- write_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
- list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
+ write_lock_irq(&adapter->port_list_lock);
+ list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+ write_lock(&port->unit_list_lock);
+ list_for_each_entry_safe(unit, u, &port->unit_list, list)
list_move(&unit->list, &unit_remove_lh);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
- &unit->status);
- }
+ write_unlock(&port->unit_list_lock);
list_move(&port->list, &port_remove_lh);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
}
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
- write_unlock_irq(&zfcp_data.config_lock);
-
- list_for_each_entry_safe(port, p, &port_remove_lh, list) {
- list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
- if (unit->device)
- scsi_remove_device(unit->device);
- zfcp_unit_dequeue(unit);
- }
- zfcp_port_dequeue(port);
- }
- wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
- zfcp_adapter_dequeue(adapter);
+ write_unlock_irq(&adapter->port_list_lock);
+ zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
+
+ list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
+ device_unregister(&unit->dev);
- up(&zfcp_data.config_sema);
+ list_for_each_entry_safe(port, p, &port_remove_lh, list)
+ device_unregister(&port->dev);
+
+ zfcp_adapter_unregister(adapter);
}
/**
* zfcp_ccw_set_online - set_online function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
*
- * This function gets called by the common i/o layer and sets an adapter
- * into state online. Setting an fcp device online means that it will be
- * registered with the SCSI stack, that the QDIO queues will be set up
- * and that the adapter will be opened (asynchronously).
+ * This function gets called by the common i/o layer and sets an
+ * adapter into state online. The first call will allocate all
+ * adapter resources that will be retained until the device is removed
+ * via zfcp_ccw_remove.
+ *
+ * Setting an fcp device online means that it will be registered with
+ * the SCSI stack, that the QDIO queues will be set up and that the
+ * adapter will be opened.
*/
-static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
+static int zfcp_ccw_set_online(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter;
- int retval;
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
- down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(&ccw_device->dev);
+ if (!adapter) {
+ adapter = zfcp_adapter_enqueue(cdev);
- retval = zfcp_erp_thread_setup(adapter);
- if (retval)
- goto out;
+ if (IS_ERR(adapter)) {
+ dev_err(&cdev->dev,
+ "Setting up data structures for the "
+ "FCP adapter failed\n");
+ return PTR_ERR(adapter);
+ }
+ kref_get(&adapter->ref);
+ }
/* initialize request counter */
- BUG_ON(!zfcp_reqlist_isempty(adapter));
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
- zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
- ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "ccsonl2", NULL);
- zfcp_erp_wait(adapter);
- up(&zfcp_data.config_sema);
- flush_work(&adapter->scan_work);
+ zfcp_ccw_activate(cdev, 0, "ccsonl1");
+ /* scan for remote ports
+ either at the end of any successful adapter recovery
+ or only after the adapter recovery for setting a device online */
+ zfcp_fc_inverse_conditional_port_scan(adapter);
+ flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
+ zfcp_ccw_adapter_put(adapter);
return 0;
-
- out:
- up(&zfcp_data.config_sema);
- return retval;
}
/**
- * zfcp_ccw_set_offline - set_offline function of zfcp driver
- * @ccw_device: pointer to belonging ccw device
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
-static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
{
- struct zfcp_adapter *adapter;
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
- down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(&ccw_device->dev);
- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
+ zfcp_erp_set_adapter_status(adapter, set);
+ zfcp_erp_adapter_shutdown(adapter, 0, tag);
zfcp_erp_wait(adapter);
- zfcp_erp_thread_kill(adapter);
- up(&zfcp_data.config_sema);
+
+ zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
+/**
* zfcp_ccw_notify - ccw notify function
- * @ccw_device: pointer to belonging ccw device
+ * @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
*
* This function gets called by the common i/o layer if an adapter has gone
* or reappeared.
*/
-static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
+static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 1;
switch (event) {
case CIO_GONE:
- dev_warn(&adapter->ccw_device->dev,
- "The FCP device has been detached\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccnigo1", adapter);
+ break;
+ }
+ dev_warn(&cdev->dev, "The FCP device has been detached\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
case CIO_NO_PATH:
- dev_warn(&adapter->ccw_device->dev,
+ dev_warn(&cdev->dev,
"The CHPID for the FCP device is offline\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
- dev_info(&adapter->ccw_device->dev,
- "The FCP device is operational again\n");
- zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
- ZFCP_STATUS_COMMON_RUNNING,
- ZFCP_SET);
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccniop1", adapter);
+ break;
+ }
+ dev_info(&cdev->dev, "The FCP device is operational again\n");
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "ccnoti4", NULL);
+ "ccnoti4");
break;
case CIO_BOXED:
- dev_warn(&adapter->ccw_device->dev,
- "The ccw device did not respond in time.\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
+ dev_warn(&cdev->dev, "The FCP device did not respond within "
+ "the specified time\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
break;
}
+
+ zfcp_ccw_adapter_put(adapter);
return 1;
}
@@ -190,26 +266,45 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
*/
static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
- struct zfcp_adapter *adapter;
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
- down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(&cdev->dev);
- zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
+ if (!adapter)
+ return;
+
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
zfcp_erp_wait(adapter);
- up(&zfcp_data.config_sema);
+ zfcp_erp_thread_kill(adapter);
+
+ zfcp_ccw_adapter_put(adapter);
}
-static struct ccw_device_id zfcp_ccw_device_id[] = {
- { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
- { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
- {},
-};
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+ return 0;
+}
-MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+ /* trace records for thaw and final shutdown during suspend
+ can only be found in system dump until the end of suspend
+ but not after resume because it's based on the memory image
+ right after the very first suspend (freeze) callback */
+ zfcp_ccw_activate(cdev, 0, "ccthaw1");
+ return 0;
+}
-static struct ccw_driver zfcp_ccw_driver = {
- .owner = THIS_MODULE,
- .name = "zfcp",
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+ return 0;
+}
+
+struct ccw_driver zfcp_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zfcp",
+ },
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
@@ -217,32 +312,7 @@ static struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
+ .freeze = zfcp_ccw_suspend,
+ .thaw = zfcp_ccw_thaw,
+ .restore = zfcp_ccw_resume,
};
-
-/**
- * zfcp_ccw_register - ccw register function
- *
- * Registers the driver at the common i/o layer. This function will be called
- * at module load time/system start.
- */
-int __init zfcp_ccw_register(void)
-{
- return ccw_driver_register(&zfcp_ccw_driver);
-}
-
-/**
- * zfcp_get_adapter_by_busid - find zfcp_adapter struct
- * @busid: bus id string of zfcp adapter to find
- */
-struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
-{
- struct ccw_device *ccw_device;
- struct zfcp_adapter *adapter = NULL;
-
- ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
- if (ccw_device) {
- adapter = dev_get_drvdata(&ccw_device->dev);
- put_device(&ccw_device->dev);
- }
- return adapter;
-}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
deleted file mode 100644
index 8305c874e86..00000000000
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * zfcp device driver
- *
- * Userspace interface for accessing the
- * Access Control Lists / Control File Data Channel
- *
- * Copyright IBM Corporation 2008, 2009
- */
-
-#define KMSG_COMPONENT "zfcp"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <asm/ccwdev.h>
-#include "zfcp_def.h"
-#include "zfcp_ext.h"
-#include "zfcp_fsf.h"
-
-#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
-#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
-#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
-#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
-#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
-
-#define ZFCP_CFDC_DOWNLOAD 0x00000001
-#define ZFCP_CFDC_UPLOAD 0x00000002
-#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
-
-#define ZFCP_CFDC_IOC_MAGIC 0xDD
-#define ZFCP_CFDC_IOC \
- _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
-
-/**
- * struct zfcp_cfdc_data - data for ioctl cfdc interface
- * @signature: request signature
- * @devno: FCP adapter device number
- * @command: command code
- * @fsf_status: returns status of FSF command to userspace
- * @fsf_status_qual: returned to userspace
- * @payloads: access conflicts list
- * @control_file: access control table
- */
-struct zfcp_cfdc_data {
- u32 signature;
- u32 devno;
- u32 command;
- u32 fsf_status;
- u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
- u8 payloads[256];
- u8 control_file[0];
-};
-
-static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
- void __user *user_buffer)
-{
- unsigned int length;
- unsigned int size = ZFCP_CFDC_MAX_SIZE;
-
- while (size) {
- length = min((unsigned int)size, sg->length);
- if (copy_from_user(sg_virt(sg++), user_buffer, length))
- return -EFAULT;
- user_buffer += length;
- size -= length;
- }
- return 0;
-}
-
-static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
- struct scatterlist *sg)
-{
- unsigned int length;
- unsigned int size = ZFCP_CFDC_MAX_SIZE;
-
- while (size) {
- length = min((unsigned int) size, sg->length);
- if (copy_to_user(user_buffer, sg_virt(sg++), length))
- return -EFAULT;
- user_buffer += length;
- size -= length;
- }
- return 0;
-}
-
-static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
-{
- char busid[9];
- snprintf(busid, sizeof(busid), "0.0.%04x", devno);
- return zfcp_get_adapter_by_busid(busid);
-}
-
-static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
-{
- switch (command) {
- case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
- fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
- fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
- break;
- case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
- fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
- fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
- break;
- case ZFCP_CFDC_CMND_FULL_ACCESS:
- fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
- fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
- break;
- case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
- fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
- fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
- break;
- case ZFCP_CFDC_CMND_UPLOAD:
- fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
- fsf_cfdc->option = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
- u8 __user *control_file)
-{
- int retval;
- retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
- if (retval)
- return retval;
-
- sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
-
- if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
- command & ZFCP_CFDC_DOWNLOAD) {
- retval = zfcp_cfdc_copy_from_user(sg, control_file);
- if (retval) {
- zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
-static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
- struct zfcp_fsf_req *req)
-{
- data->fsf_status = req->qtcb->header.fsf_status;
- memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
- sizeof(union fsf_status_qual));
- memcpy(&data->payloads, &req->qtcb->bottom.support.els,
- sizeof(req->qtcb->bottom.support.els));
-}
-
-static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
- unsigned long buffer)
-{
- struct zfcp_cfdc_data *data;
- struct zfcp_cfdc_data __user *data_user;
- struct zfcp_adapter *adapter;
- struct zfcp_fsf_req *req;
- struct zfcp_fsf_cfdc *fsf_cfdc;
- int retval;
-
- if (command != ZFCP_CFDC_IOC)
- return -ENOTTY;
-
- data_user = (void __user *) buffer;
- if (!data_user)
- return -EINVAL;
-
- fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
- if (!fsf_cfdc)
- return -ENOMEM;
-
- data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
- if (!data) {
- retval = -ENOMEM;
- goto no_mem_sense;
- }
-
- retval = copy_from_user(data, data_user, sizeof(*data));
- if (retval) {
- retval = -EFAULT;
- goto free_buffer;
- }
-
- if (data->signature != 0xCFDCACDF) {
- retval = -EINVAL;
- goto free_buffer;
- }
-
- retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
-
- adapter = zfcp_cfdc_get_adapter(data->devno);
- if (!adapter) {
- retval = -ENXIO;
- goto free_buffer;
- }
- zfcp_adapter_get(adapter);
-
- retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
- data_user->control_file);
- if (retval)
- goto adapter_put;
- req = zfcp_fsf_control_file(adapter, fsf_cfdc);
- if (IS_ERR(req)) {
- retval = PTR_ERR(req);
- goto free_sg;
- }
-
- if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
- retval = -ENXIO;
- goto free_fsf;
- }
-
- zfcp_cfdc_req_to_sense(data, req);
- retval = copy_to_user(data_user, data, sizeof(*data_user));
- if (retval) {
- retval = -EFAULT;
- goto free_fsf;
- }
-
- if (data->command & ZFCP_CFDC_UPLOAD)
- retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
- fsf_cfdc->sg);
-
- free_fsf:
- zfcp_fsf_req_free(req);
- free_sg:
- zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
- adapter_put:
- zfcp_adapter_put(adapter);
- free_buffer:
- kfree(data);
- no_mem_sense:
- kfree(fsf_cfdc);
- return retval;
-}
-
-static const struct file_operations zfcp_cfdc_fops = {
- .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = zfcp_cfdc_dev_ioctl
-#endif
-};
-
-struct miscdevice zfcp_cfdc_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "zfcp_cfdc",
- .fops = &zfcp_cfdc_fops,
-};
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0a1a5dd8d01..0ca64484cfa 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,15 +3,19 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/slab.h>
#include <asm/debug.h>
+#include "zfcp_dbf.h"
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
static u32 dbfsize = 4;
@@ -19,1128 +23,510 @@ module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize,
"number of pages for each debug feature area (default 4)");
-static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
- int level, char *from, int from_len)
-{
- int offset;
- struct zfcp_dbf_dump *dump = to;
- int room = to_len - sizeof(*dump);
-
- for (offset = 0; offset < from_len; offset += dump->size) {
- memset(to, 0, to_len);
- strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
- dump->total_size = from_len;
- dump->offset = offset;
- dump->size = min(from_len - offset, room);
- memcpy(dump->data, from + offset, dump->size);
- debug_event(dbf, level, dump, dump->size + sizeof(*dump));
- }
-}
+static u32 dbflevel = 3;
-/* FIXME: this duplicate this code in s390 debug feature */
-static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time)
-{
- unsigned long long sec;
-
- stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
- sec = stck >> 12;
- do_div(sec, 1000000);
- time->tv_sec = sec;
- stck -= (sec * 1000000) << 12;
- time->tv_nsec = ((stck * 1000) >> 12);
-}
+module_param(dbflevel, uint, 0400);
+MODULE_PARM_DESC(dbflevel,
+ "log level for each debug feature area "
+ "(default 3, range 0..6)");
-static void zfcp_dbf_tag(char **p, const char *label, const char *tag)
+static inline unsigned int zfcp_dbf_plen(unsigned int offset)
{
- int i;
-
- *p += sprintf(*p, "%-24s", label);
- for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
- *p += sprintf(*p, "%c", tag[i]);
- *p += sprintf(*p, "\n");
+ return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
}
-static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2)
+static inline
+void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+ u64 req_id)
{
- *buf += sprintf(*buf, "%-24s%s\n", s1, s2);
-}
+ struct zfcp_dbf_pay *pl = &dbf->pay_buf;
+ u16 offset = 0, rec_length;
-static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...)
-{
- va_list arg;
+ spin_lock(&dbf->pay_lock);
+ memset(pl, 0, sizeof(*pl));
+ pl->fsf_req_id = req_id;
+ memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
- *buf += sprintf(*buf, "%-24s", s);
- va_start(arg, format);
- *buf += vsprintf(*buf, format, arg);
- va_end(arg);
- *buf += sprintf(*buf, "\n");
-}
+ while (offset < length) {
+ rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
+ (u16) (length - offset));
+ memcpy(pl->data, data + offset, rec_length);
+ debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
-static void zfcp_dbf_outd(char **p, const char *label, char *buffer,
- int buflen, int offset, int total_size)
-{
- if (!offset)
- *p += sprintf(*p, "%-24s ", label);
- while (buflen--) {
- if (offset > 0) {
- if ((offset % 32) == 0)
- *p += sprintf(*p, "\n%-24c ", ' ');
- else if ((offset % 4) == 0)
- *p += sprintf(*p, " ");
- }
- *p += sprintf(*p, "%02x", *buffer++);
- if (++offset == total_size) {
- *p += sprintf(*p, "\n");
- break;
- }
+ offset += rec_length;
+ pl->counter++;
}
- if (!total_size)
- *p += sprintf(*p, "\n");
-}
-static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
- int area, debug_entry_t *entry, char *out_buf)
-{
- struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
- struct timespec t;
- char *p = out_buf;
-
- if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
- zfcp_dbf_timestamp(entry->id.stck, &t);
- zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
- t.tv_sec, t.tv_nsec);
- zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
- } else {
- zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset,
- dump->total_size);
- if ((dump->offset + dump->size) == dump->total_size)
- p += sprintf(p, "\n");
- }
- return p - out_buf;
+ spin_unlock(&dbf->pay_lock);
}
/**
- * zfcp_hba_dbf_event_fsf_response - trace event for request completion
- * @fsf_req: request that has been completed
+ * zfcp_dbf_hba_fsf_res - trace event for fsf responses
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request for which a response was received
*/
-void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct fsf_qtcb *qtcb = fsf_req->qtcb;
- union fsf_prot_status_qual *prot_status_qual =
- &qtcb->prefix.prot_status_qual;
- union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
- struct scsi_cmnd *scsi_cmnd;
- struct zfcp_port *port;
- struct zfcp_unit *unit;
- struct zfcp_send_els *send_els;
- struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
- struct zfcp_hba_dbf_record_response *response = &rec->u.response;
- int level;
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+ struct fsf_qtcb_header *q_head = &req->qtcb->header;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
- spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
- strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
-
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
- (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
- strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
- level = 1;
- } else if (qtcb->header.fsf_status != FSF_GOOD) {
- strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
- level = 1;
- } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
- (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
- strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
- level = 4;
- } else if (qtcb->header.log_length) {
- strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE);
- level = 5;
- } else {
- strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
- level = 6;
- }
- response->fsf_command = fsf_req->fsf_command;
- response->fsf_reqid = (unsigned long)fsf_req;
- response->fsf_seqno = fsf_req->seq_no;
- response->fsf_issued = fsf_req->issued;
- response->fsf_prot_status = qtcb->prefix.prot_status;
- response->fsf_status = qtcb->header.fsf_status;
- memcpy(response->fsf_prot_status_qual,
- prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
- memcpy(response->fsf_status_qual,
- fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
- response->fsf_req_status = fsf_req->status;
- response->sbal_first = fsf_req->sbal_first;
- response->sbal_last = fsf_req->sbal_last;
- response->sbal_response = fsf_req->sbal_response;
- response->pool = fsf_req->pool != NULL;
- response->erp_action = (unsigned long)fsf_req->erp_action;
-
- switch (fsf_req->fsf_command) {
- case FSF_QTCB_FCP_CMND:
- if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
- break;
- scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
- if (scsi_cmnd) {
- response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
- response->u.fcp.serial = scsi_cmnd->serial_number;
- }
- break;
-
- case FSF_QTCB_OPEN_PORT_WITH_DID:
- case FSF_QTCB_CLOSE_PORT:
- case FSF_QTCB_CLOSE_PHYSICAL_PORT:
- port = (struct zfcp_port *)fsf_req->data;
- response->u.port.wwpn = port->wwpn;
- response->u.port.d_id = port->d_id;
- response->u.port.port_handle = qtcb->header.port_handle;
- break;
-
- case FSF_QTCB_OPEN_LUN:
- case FSF_QTCB_CLOSE_LUN:
- unit = (struct zfcp_unit *)fsf_req->data;
- port = unit->port;
- response->u.unit.wwpn = port->wwpn;
- response->u.unit.fcp_lun = unit->fcp_lun;
- response->u.unit.port_handle = qtcb->header.port_handle;
- response->u.unit.lun_handle = qtcb->header.lun_handle;
- break;
-
- case FSF_QTCB_SEND_ELS:
- send_els = (struct zfcp_send_els *)fsf_req->data;
- response->u.els.d_id = qtcb->bottom.support.d_id;
- response->u.els.ls_code = send_els->ls_code >> 24;
- break;
-
- case FSF_QTCB_ABORT_FCP_CMND:
- case FSF_QTCB_SEND_GENERIC:
- case FSF_QTCB_EXCHANGE_CONFIG_DATA:
- case FSF_QTCB_EXCHANGE_PORT_DATA:
- case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
- case FSF_QTCB_UPLOAD_CONTROL_FILE:
- break;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_RES;
+ rec->fsf_req_id = req->req_id;
+ rec->fsf_req_status = req->status;
+ rec->fsf_cmd = req->fsf_command;
+ rec->fsf_seq_no = req->seq_no;
+ rec->u.res.req_issued = req->issued;
+ rec->u.res.prot_status = q_pref->prot_status;
+ rec->u.res.fsf_status = q_head->fsf_status;
+
+ memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+ FSF_PROT_STATUS_QUAL_SIZE);
+ memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
+ FSF_STATUS_QUALIFIER_SIZE);
+
+ if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
}
- debug_event(adapter->hba_dbf, level, rec, sizeof(*rec));
-
- /* have fcp channel microcode fixed to use as little as possible */
- if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) {
- /* adjust length skipping trailing zeros */
- char *buf = (char *)qtcb + qtcb->header.log_start;
- int len = qtcb->header.log_length;
- for (; len && !buf[len - 1]; len--);
- zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level,
- buf, len);
- }
-
- spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+ debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
- * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer
+ * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
- * @adapter: adapter that has issued the unsolicited status buffer
- * @status_buffer: buffer containing payload of unsolicited status
+ * @req: request providing the unsolicited status
*/
-void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
- struct fsf_status_read_buffer *status_buffer)
+void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
{
- struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct fsf_status_read_buffer *srb = req->data;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
- spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
- strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
- strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
-
- rec->u.status.failed = atomic_read(&adapter->stat_miss);
- if (status_buffer != NULL) {
- rec->u.status.status_type = status_buffer->status_type;
- rec->u.status.status_subtype = status_buffer->status_subtype;
- memcpy(&rec->u.status.queue_designator,
- &status_buffer->queue_designator,
- sizeof(struct fsf_queue_designator));
-
- switch (status_buffer->status_type) {
- case FSF_STATUS_READ_SENSE_DATA_AVAIL:
- rec->u.status.payload_size =
- ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
- break;
-
- case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- rec->u.status.payload_size =
- ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
- break;
-
- case FSF_STATUS_READ_LINK_DOWN:
- switch (status_buffer->status_subtype) {
- case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
- case FSF_STATUS_READ_SUB_FDISC_FAILED:
- rec->u.status.payload_size =
- sizeof(struct fsf_link_down_info);
- }
- break;
-
- case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
- rec->u.status.payload_size =
- ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
- break;
- }
- memcpy(&rec->u.status.payload,
- &status_buffer->payload, rec->u.status.payload_size);
- }
- debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec));
- spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
-}
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_USS;
+ rec->fsf_req_id = req->req_id;
+ rec->fsf_req_status = req->status;
+ rec->fsf_cmd = req->fsf_command;
-/**
- * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
- * @adapter: adapter affected by this QDIO related event
- * @qdio_error: as passed by qdio module
- * @sbal_index: first buffer with error condition, as passed by qdio module
- * @sbal_count: number of buffers affected, as passed by qdio module
- */
-void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
- unsigned int qdio_error, int sbal_index,
- int sbal_count)
-{
- struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
- unsigned long flags;
+ if (!srb)
+ goto log;
+
+ rec->u.uss.status_type = srb->status_type;
+ rec->u.uss.status_subtype = srb->status_subtype;
+ rec->u.uss.d_id = ntoh24(srb->d_id);
+ rec->u.uss.lun = srb->fcp_lun;
+ memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
+ sizeof(rec->u.uss.queue_designator));
- spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
- r->u.qdio.qdio_error = qdio_error;
- r->u.qdio.sbal_index = sbal_index;
- r->u.qdio.sbal_count = sbal_count;
- debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+ /* status read buffer payload length */
+ rec->pl_len = (!srb->length) ? 0 : srb->length -
+ offsetof(struct fsf_status_read_buffer, payload);
+
+ if (rec->pl_len)
+ zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
+ "fsf_uss", req->req_id);
+log:
+ debug_event(dbf->hba, 2, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
- * zfcp_hba_dbf_event_berr - trace event for bit error threshold
- * @adapter: adapter affected by this QDIO related event
- * @req: fsf request
+ * zfcp_dbf_hba_bit_err - trace event for bit error conditions
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request which caused the bit_error condition
*/
-void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
- struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
{
- struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
struct fsf_status_read_buffer *sr_buf = req->data;
- struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
unsigned long flags;
- spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
- memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
- debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
-}
-static void zfcp_hba_dbf_view_response(char **p,
- struct zfcp_hba_dbf_record_response *r)
-{
- struct timespec t;
-
- zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
- zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
- zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
- zfcp_dbf_timestamp(r->fsf_issued, &t);
- zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
- zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
- zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
- zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual,
- FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE);
- zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual,
- FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
- zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
- zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
- zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
- zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
- zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
-
- switch (r->fsf_command) {
- case FSF_QTCB_FCP_CMND:
- if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
- break;
- zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
- zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
- p += sprintf(*p, "\n");
- break;
-
- case FSF_QTCB_OPEN_PORT_WITH_DID:
- case FSF_QTCB_CLOSE_PORT:
- case FSF_QTCB_CLOSE_PHYSICAL_PORT:
- zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn);
- zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id);
- zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle);
- break;
-
- case FSF_QTCB_OPEN_LUN:
- case FSF_QTCB_CLOSE_LUN:
- zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn);
- zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun);
- zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle);
- zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle);
- break;
-
- case FSF_QTCB_SEND_ELS:
- zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
- zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
- break;
-
- case FSF_QTCB_ABORT_FCP_CMND:
- case FSF_QTCB_SEND_GENERIC:
- case FSF_QTCB_EXCHANGE_CONFIG_DATA:
- case FSF_QTCB_EXCHANGE_PORT_DATA:
- case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
- case FSF_QTCB_UPLOAD_CONTROL_FILE:
- break;
- }
-}
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
-static void zfcp_hba_dbf_view_status(char **p,
- struct zfcp_hba_dbf_record_status *r)
-{
- zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
- zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
- zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype);
- zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator,
- sizeof(struct fsf_queue_designator), 0,
- sizeof(struct fsf_queue_designator));
- zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0,
- r->payload_size);
-}
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_BIT;
+ rec->fsf_req_id = req->req_id;
+ rec->fsf_req_status = req->status;
+ rec->fsf_cmd = req->fsf_command;
+ memcpy(&rec->u.be, &sr_buf->payload.bit_error,
+ sizeof(struct fsf_bit_error_payload));
-static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
-{
- zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
- zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
- zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
+ debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
-static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
+/**
+ * zfcp_dbf_hba_def_err - trace event for deferred error messages
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_id: request id which caused the deferred error message
+ * @scount: number of sbals incl. the signaling sbal
+ * @pl: array of all involved sbals
+ */
+void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ void **pl)
{
- zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
- zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
- zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
- zfcp_dbf_out(p, "prim_seq_err", "%d",
- r->primitive_sequence_error_count);
- zfcp_dbf_out(p, "inval_trans_word_err", "%d",
- r->invalid_transmission_word_error_count);
- zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
- zfcp_dbf_out(p, "prim_seq_event_to", "%d",
- r->primitive_sequence_event_timeout_count);
- zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
- r->elastic_buffer_overrun_error_count);
- zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
- r->advertised_receive_b2b_credit);
- zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
- r->current_receive_b2b_credit);
- zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
- r->advertised_transmit_b2b_credit);
- zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
- r->current_transmit_b2b_credit);
-}
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ unsigned long flags;
+ u16 length;
-static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
- char *out_buf, const char *in_buf)
-{
- struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf;
- char *p = out_buf;
-
- if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
- return 0;
-
- zfcp_dbf_tag(&p, "tag", r->tag);
- if (isalpha(r->tag2[0]))
- zfcp_dbf_tag(&p, "tag2", r->tag2);
-
- if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
- zfcp_hba_dbf_view_response(&p, &r->u.response);
- else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
- zfcp_hba_dbf_view_status(&p, &r->u.status);
- else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
- zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
- else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
- zfcp_hba_dbf_view_berr(&p, &r->u.berr);
-
- if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
- p += sprintf(p, "\n");
- return p - out_buf;
-}
+ if (!pl)
+ return;
-static struct debug_view zfcp_hba_dbf_view = {
- "structured",
- NULL,
- &zfcp_dbf_view_header,
- &zfcp_hba_dbf_view_format,
- NULL,
- NULL
-};
-
-static const char *zfcp_rec_dbf_tags[] = {
- [ZFCP_REC_DBF_ID_THREAD] = "thread",
- [ZFCP_REC_DBF_ID_TARGET] = "target",
- [ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
- [ZFCP_REC_DBF_ID_ACTION] = "action",
-};
-
-static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
- char *buf, const char *_rec)
-{
- struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec;
- char *p = buf;
- char hint[ZFCP_DBF_ID_SIZE + 1];
-
- memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
- hint[ZFCP_DBF_ID_SIZE] = 0;
- zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]);
- zfcp_dbf_outs(&p, "hint", hint);
- switch (r->id) {
- case ZFCP_REC_DBF_ID_THREAD:
- zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
- zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready);
- zfcp_dbf_out(&p, "running", "%d", r->u.thread.running);
- break;
- case ZFCP_REC_DBF_ID_TARGET:
- zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref);
- zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status);
- zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count);
- zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id);
- zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn);
- zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun);
- break;
- case ZFCP_REC_DBF_ID_TRIGGER:
- zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref);
- zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action);
- zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want);
- zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need);
- zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn);
- zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
- zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
- zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
- zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us);
- break;
- case ZFCP_REC_DBF_ID_ACTION:
- zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
- zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req);
- zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status);
- zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step);
- break;
- }
- p += sprintf(p, "\n");
- return p - buf;
-}
+ spin_lock_irqsave(&dbf->pay_lock, flags);
+ memset(payload, 0, sizeof(*payload));
-static struct debug_view zfcp_rec_dbf_view = {
- "structured",
- NULL,
- &zfcp_dbf_view_header,
- &zfcp_rec_dbf_view_format,
- NULL,
- NULL
-};
+ memcpy(payload->area, "def_err", 7);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ length = min((u16)sizeof(struct qdio_buffer),
+ (u16)ZFCP_DBF_PAY_MAX_REC);
-/**
- * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
- * @id2: identifier for event
- * @adapter: adapter
- * This function assumes that the caller is holding erp_lock.
- */
-void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter)
-{
- struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
- unsigned long flags = 0;
- struct list_head *entry;
- unsigned ready = 0, running = 0, total;
+ while (payload->counter < scount && (char *)pl[payload->counter]) {
+ memcpy(payload->data, (char *)pl[payload->counter], length);
+ debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ payload->counter++;
+ }
- list_for_each(entry, &adapter->erp_ready_head)
- ready++;
- list_for_each(entry, &adapter->erp_running_head)
- running++;
- total = adapter->erp_total_count;
-
- spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- r->id = ZFCP_REC_DBF_ID_THREAD;
- memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
- r->u.thread.total = total;
- r->u.thread.ready = ready;
- r->u.thread.running = running;
- debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+ spin_unlock_irqrestore(&dbf->pay_lock, flags);
}
/**
- * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
- * @id2: identifier for event
- * @adapter: adapter
- * This function assumes that the caller does not hold erp_lock.
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
*/
-void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter)
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
- read_lock_irqsave(&adapter->erp_lock, flags);
- zfcp_rec_dbf_event_thread(id2, adapter);
- read_unlock_irqrestore(&adapter->erp_lock, flags);
-}
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
-static void zfcp_rec_dbf_event_target(char *id2, void *ref,
- struct zfcp_adapter *adapter,
- atomic_t *status, atomic_t *erp_count,
- u64 wwpn, u32 d_id, u64 fcp_lun)
-{
- struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
- unsigned long flags;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_BASIC;
- spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- r->id = ZFCP_REC_DBF_ID_TARGET;
- memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
- r->u.target.ref = (unsigned long)ref;
- r->u.target.status = atomic_read(status);
- r->u.target.wwpn = wwpn;
- r->u.target.d_id = d_id;
- r->u.target.fcp_lun = fcp_lun;
- r->u.target.erp_count = atomic_read(erp_count);
- debug_event(adapter->rec_dbf, 3, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+ debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
-/**
- * zfcp_rec_dbf_event_adapter - trace event for adapter state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @adapter: adapter
- */
-void zfcp_rec_dbf_event_adapter(char *id, void *ref,
- struct zfcp_adapter *adapter)
+static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev)
{
- zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status,
- &adapter->erp_counter, 0, 0, 0);
+ rec->adapter_status = atomic_read(&adapter->status);
+ if (port) {
+ rec->port_status = atomic_read(&port->status);
+ rec->wwpn = port->wwpn;
+ rec->d_id = port->d_id;
+ }
+ if (sdev) {
+ rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+ rec->lun = zfcp_scsi_dev_lun(sdev);
+ }
}
/**
- * zfcp_rec_dbf_event_port - trace event for port state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @port: port
+ * zfcp_dbf_rec_trig - trace event related to triggered recovery
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock has to be held.
*/
-void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port)
+void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev,
+ u8 want, u8 need)
{
- struct zfcp_adapter *adapter = port->adapter;
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ struct list_head *entry;
+ unsigned long flags;
- zfcp_rec_dbf_event_target(id, ref, adapter, &port->status,
- &port->erp_counter, port->wwpn, port->d_id,
- 0);
-}
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
-/**
- * zfcp_rec_dbf_event_unit - trace event for unit state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @unit: unit
- */
-void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit)
-{
- struct zfcp_port *port = unit->port;
- struct zfcp_adapter *adapter = port->adapter;
+ rec->id = ZFCP_DBF_REC_TRIG;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ zfcp_dbf_set_common(rec, adapter, port, sdev);
- zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status,
- &unit->erp_counter, port->wwpn, port->d_id,
- unit->fcp_lun);
-}
+ list_for_each(entry, &adapter->erp_ready_head)
+ rec->u.trig.ready++;
-/**
- * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery
- * @id2: identifier for error recovery trigger
- * @ref: additional reference (e.g. request)
- * @want: originally requested error recovery action
- * @need: error recovery action actually initiated
- * @action: address of error recovery action struct
- * @adapter: adapter
- * @port: port
- * @unit: unit
- */
-void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need,
- void *action, struct zfcp_adapter *adapter,
- struct zfcp_port *port, struct zfcp_unit *unit)
-{
- struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
- unsigned long flags;
+ list_for_each(entry, &adapter->erp_running_head)
+ rec->u.trig.running++;
- spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- r->id = ZFCP_REC_DBF_ID_TRIGGER;
- memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
- r->u.trigger.ref = (unsigned long)ref;
- r->u.trigger.want = want;
- r->u.trigger.need = need;
- r->u.trigger.action = (unsigned long)action;
- r->u.trigger.as = atomic_read(&adapter->status);
- if (port) {
- r->u.trigger.ps = atomic_read(&port->status);
- r->u.trigger.wwpn = port->wwpn;
- }
- if (unit) {
- r->u.trigger.us = atomic_read(&unit->status);
- r->u.trigger.fcp_lun = unit->fcp_lun;
- }
- debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+ rec->u.trig.want = want;
+ rec->u.trig.need = need;
+
+ debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+
/**
- * zfcp_rec_dbf_event_action - trace event showing progress of recovery action
- * @id2: identifier
- * @erp_action: error recovery action struct pointer
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
*/
-void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action)
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
{
- struct zfcp_adapter *adapter = erp_action->adapter;
- struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+ struct zfcp_dbf *dbf = erp->adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
unsigned long flags;
- spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- r->id = ZFCP_REC_DBF_ID_ACTION;
- memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
- r->u.action.action = (unsigned long)erp_action;
- r->u.action.status = erp_action->status;
- r->u.action.step = erp_action->step;
- r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
- debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
- spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
-}
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
-/**
- * zfcp_san_dbf_event_ct_request - trace event for issued CT request
- * @fsf_req: request containing issued CT data
- */
-void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
-{
- struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_wka_port *wka_port = ct->wka_port;
- struct zfcp_adapter *adapter = wka_port->adapter;
- struct ct_hdr *hdr = sg_virt(ct->req);
- struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
- struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
- int level = 3;
- unsigned long flags;
+ rec->id = ZFCP_DBF_REC_RUN;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
- spin_lock_irqsave(&adapter->san_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
- r->fsf_reqid = (unsigned long)fsf_req;
- r->fsf_seqno = fsf_req->seq_no;
- r->s_id = fc_host_port_id(adapter->scsi_host);
- r->d_id = wka_port->d_id;
- oct->cmd_req_code = hdr->cmd_rsp_code;
- oct->revision = hdr->revision;
- oct->gs_type = hdr->gs_type;
- oct->gs_subtype = hdr->gs_subtype;
- oct->options = hdr->options;
- oct->max_res_size = hdr->max_res_size;
- oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
- ZFCP_DBF_SAN_MAX_PAYLOAD);
- debug_event(adapter->san_dbf, level, r, sizeof(*r));
- zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
- (void *)hdr + sizeof(struct ct_hdr), oct->len);
- spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
-}
+ rec->u.run.fsf_req_id = erp->fsf_req_id;
+ rec->u.run.rec_status = erp->status;
+ rec->u.run.rec_step = erp->step;
+ rec->u.run.rec_action = erp->action;
-/**
- * zfcp_san_dbf_event_ct_response - trace event for completion of CT request
- * @fsf_req: request containing CT response
- */
-void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
-{
- struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_wka_port *wka_port = ct->wka_port;
- struct zfcp_adapter *adapter = wka_port->adapter;
- struct ct_hdr *hdr = sg_virt(ct->resp);
- struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
- struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
- int level = 3;
- unsigned long flags;
+ if (erp->sdev)
+ rec->u.run.rec_count =
+ atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
+ else if (erp->port)
+ rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
+ else
+ rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
- spin_lock_irqsave(&adapter->san_dbf_lock, flags);
- memset(r, 0, sizeof(*r));
- strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
- r->fsf_reqid = (unsigned long)fsf_req;
- r->fsf_seqno = fsf_req->seq_no;
- r->s_id = wka_port->d_id;
- r->d_id = fc_host_port_id(adapter->scsi_host);
- rct->cmd_rsp_code = hdr->cmd_rsp_code;
- rct->revision = hdr->revision;
- rct->reason_code = hdr->reason_code;
- rct->expl = hdr->reason_code_expl;
- rct->vendor_unique = hdr->vendor_unique;
- rct->max_res_size = hdr->max_res_size;
- rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
- ZFCP_DBF_SAN_MAX_PAYLOAD);
- debug_event(adapter->san_dbf, level, r, sizeof(*r));
- zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
- (void *)hdr + sizeof(struct ct_hdr), rct->len);
- spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+ debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
-static void zfcp_san_dbf_event_els(const char *tag, int level,
- struct zfcp_fsf_req *fsf_req, u32 s_id,
- u32 d_id, u8 ls_code, void *buffer,
- int buflen)
+static inline
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+ u64 req_id, u32 d_id)
{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+ struct zfcp_dbf_san *rec = &dbf->san_buf;
+ u16 rec_len;
unsigned long flags;
- spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+ spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
- strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
- rec->fsf_reqid = (unsigned long)fsf_req;
- rec->fsf_seqno = fsf_req->seq_no;
- rec->s_id = s_id;
+
+ rec->id = id;
+ rec->fsf_req_id = req_id;
rec->d_id = d_id;
- rec->u.els.ls_code = ls_code;
- debug_event(adapter->san_dbf, level, rec, sizeof(*rec));
- zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level,
- buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
- spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+ rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
+ memcpy(rec->payload, data, rec_len);
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+
+ debug_event(dbf->san, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->san_lock, flags);
}
/**
- * zfcp_san_dbf_event_els_request - trace event for issued ELS
- * @fsf_req: request containing issued ELS
+ * zfcp_dbf_san_req - trace event for issued SAN request
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
+ * d_id: destination ID
*/
-void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
{
- struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+ struct zfcp_dbf *dbf = fsf->adapter->dbf;
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ u16 length;
- zfcp_san_dbf_event_els("oels", 2, fsf_req,
- fc_host_port_id(els->adapter->scsi_host),
- els->d_id, *(u8 *) sg_virt(els->req),
- sg_virt(els->req), els->req->length);
+ length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
+ zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
+ fsf->req_id, d_id);
}
/**
- * zfcp_san_dbf_event_els_response - trace event for completed ELS
- * @fsf_req: request containing ELS response
+ * zfcp_dbf_san_res - trace event for received SAN request
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
*/
-void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
- struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
+ struct zfcp_dbf *dbf = fsf->adapter->dbf;
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ u16 length;
- zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
- fc_host_port_id(els->adapter->scsi_host),
- *(u8 *)sg_virt(els->req), sg_virt(els->resp),
- els->resp->length);
+ length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
+ zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
+ fsf->req_id, 0);
}
/**
- * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS
- * @fsf_req: request containing unsolicited status buffer with incoming ELS
+ * zfcp_dbf_san_in_els - trace event for incoming ELS
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
*/
-void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct fsf_status_read_buffer *buf =
- (struct fsf_status_read_buffer *)fsf_req->data;
- int length = (int)buf->length -
- (int)((void *)&buf->payload - (void *)buf);
-
- zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
- fc_host_port_id(adapter->scsi_host),
- buf->payload.data[0], (void *)buf->payload.data,
- length);
-}
+ struct zfcp_dbf *dbf = fsf->adapter->dbf;
+ struct fsf_status_read_buffer *srb =
+ (struct fsf_status_read_buffer *) fsf->data;
+ u16 length;
-static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
- char *out_buf, const char *in_buf)
-{
- struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf;
- char *p = out_buf;
-
- if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
- return 0;
-
- zfcp_dbf_tag(&p, "tag", r->tag);
- zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
- zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
- zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
- zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
-
- if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
- struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req;
- zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
- zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
- zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
- zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
- zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
- zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
- } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
- struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp;
- zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
- zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
- zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
- zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
- zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
- zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
- } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
- strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
- strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
- struct zfcp_san_dbf_record_els *els = &r->u.els;
- zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
- }
- return p - out_buf;
+ length = (u16)(srb->length -
+ offsetof(struct fsf_status_read_buffer, payload));
+ zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
+ fsf->req_id, ntoh24(srb->d_id));
}
-static struct debug_view zfcp_san_dbf_view = {
- "structured",
- NULL,
- &zfcp_dbf_view_header,
- &zfcp_san_dbf_view_format,
- NULL,
- NULL
-};
-
-static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
- struct zfcp_adapter *adapter,
- struct scsi_cmnd *scsi_cmnd,
- struct zfcp_fsf_req *fsf_req,
- unsigned long old_req_id)
-{
- struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
- struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
+/**
+ * zfcp_dbf_scsi - trace event for scsi commands
+ * @tag: identifier for event
+ * @sc: pointer to struct scsi_cmnd
+ * @fsf: pointer to struct zfcp_fsf_req
+ */
+void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+{
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *) sc->device->host->hostdata[0];
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+ struct fcp_resp_with_ext *fcp_rsp;
+ struct fcp_resp_rsp_info *fcp_rsp_info;
unsigned long flags;
- struct fcp_rsp_iu *fcp_rsp;
- char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
- int offset = 0, buflen = 0;
-
- spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
- do {
- memset(rec, 0, sizeof(*rec));
- if (offset == 0) {
- strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
- strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
- if (scsi_cmnd != NULL) {
- if (scsi_cmnd->device) {
- rec->scsi_id = scsi_cmnd->device->id;
- rec->scsi_lun = scsi_cmnd->device->lun;
- }
- rec->scsi_result = scsi_cmnd->result;
- rec->scsi_cmnd = (unsigned long)scsi_cmnd;
- rec->scsi_serial = scsi_cmnd->serial_number;
- memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
- min((int)scsi_cmnd->cmd_len,
- ZFCP_DBF_SCSI_OPCODE));
- rec->scsi_retries = scsi_cmnd->retries;
- rec->scsi_allowed = scsi_cmnd->allowed;
- }
- if (fsf_req != NULL) {
- fcp_rsp = (struct fcp_rsp_iu *)
- &(fsf_req->qtcb->bottom.io.fcp_rsp);
- fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
- fcp_sns_info =
- zfcp_get_fcp_sns_info_ptr(fcp_rsp);
-
- rec->rsp_validity = fcp_rsp->validity.value;
- rec->rsp_scsi_status = fcp_rsp->scsi_status;
- rec->rsp_resid = fcp_rsp->fcp_resid;
- if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
- rec->rsp_code = *(fcp_rsp_info + 3);
- if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
- buflen = min((int)fcp_rsp->fcp_sns_len,
- ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
- rec->sns_info_len = buflen;
- memcpy(rec->sns_info, fcp_sns_info,
- min(buflen,
- ZFCP_DBF_SCSI_FCP_SNS_INFO));
- offset += min(buflen,
- ZFCP_DBF_SCSI_FCP_SNS_INFO);
- }
-
- rec->fsf_reqid = (unsigned long)fsf_req;
- rec->fsf_seqno = fsf_req->seq_no;
- rec->fsf_issued = fsf_req->issued;
- }
- rec->old_fsf_reqid = old_req_id;
- } else {
- strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
- dump->total_size = buflen;
- dump->offset = offset;
- dump->size = min(buflen - offset,
- (int)sizeof(struct
- zfcp_scsi_dbf_record) -
- (int)sizeof(struct zfcp_dbf_dump));
- memcpy(dump->data, fcp_sns_info + offset, dump->size);
- offset += dump->size;
+
+ spin_lock_irqsave(&dbf->scsi_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_SCSI_CMND;
+ rec->scsi_result = sc->result;
+ rec->scsi_retries = sc->retries;
+ rec->scsi_allowed = sc->allowed;
+ rec->scsi_id = sc->device->id;
+ rec->scsi_lun = sc->device->lun;
+ rec->host_scribble = (unsigned long)sc->host_scribble;
+
+ memcpy(rec->scsi_opcode, sc->cmnd,
+ min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+
+ if (fsf) {
+ rec->fsf_req_id = fsf->req_id;
+ fcp_rsp = (struct fcp_resp_with_ext *)
+ &(fsf->qtcb->bottom.io.fcp_rsp);
+ memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+ if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+ fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
}
- debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec));
- } while (offset < buflen);
- spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
-}
+ if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+ rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+ (u16)ZFCP_DBF_PAY_MAX_REC);
+ zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+ "fcp_sns", fsf->req_id);
+ }
+ }
-/**
- * zfcp_scsi_dbf_event_result - trace event for SCSI command completion
- * @tag: tag indicating success or failure of SCSI command
- * @level: trace level applicable for this event
- * @adapter: adapter that has been used to issue the SCSI command
- * @scsi_cmnd: SCSI command pointer
- * @fsf_req: request used to issue SCSI command (might be NULL)
- */
-void zfcp_scsi_dbf_event_result(const char *tag, int level,
- struct zfcp_adapter *adapter,
- struct scsi_cmnd *scsi_cmnd,
- struct zfcp_fsf_req *fsf_req)
-{
- zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0);
+ debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
-/**
- * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort
- * @tag: tag indicating success or failure of abort operation
- * @adapter: adapter thas has been used to issue SCSI command to be aborted
- * @scsi_cmnd: SCSI command to be aborted
- * @new_fsf_req: request containing abort (might be NULL)
- * @old_req_id: identifier of request containg SCSI command to be aborted
- */
-void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
- struct scsi_cmnd *scsi_cmnd,
- struct zfcp_fsf_req *new_fsf_req,
- unsigned long old_req_id)
+static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
- zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req,
- old_req_id);
-}
+ struct debug_info *d;
-/**
- * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset
- * @tag: tag indicating success or failure of reset operation
- * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
- * @unit: unit that needs reset
- * @scsi_cmnd: SCSI command which caused this error recovery
- */
-void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag,
- struct zfcp_unit *unit,
- struct scsi_cmnd *scsi_cmnd)
-{
- zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
- unit->port->adapter, scsi_cmnd, NULL, 0);
+ d = debug_register(name, size, 1, rec_size);
+ if (!d)
+ return NULL;
+
+ debug_register_view(d, &debug_hex_ascii_view);
+ debug_set_level(d, dbflevel);
+
+ return d;
}
-static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
- char *out_buf, const char *in_buf)
+static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
{
- struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf;
- struct timespec t;
- char *p = out_buf;
-
- if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
- return 0;
-
- zfcp_dbf_tag(&p, "tag", r->tag);
- zfcp_dbf_tag(&p, "tag2", r->tag2);
- zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id);
- zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
- zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
- zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
- zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
- zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
- 0, ZFCP_DBF_SCSI_OPCODE);
- zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
- zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed);
- if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0)
- zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid);
- zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
- zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
- zfcp_dbf_timestamp(r->fsf_issued, &t);
- zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
-
- if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
- zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity);
- zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x",
- r->rsp_scsi_status);
- zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid);
- zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code);
- zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len);
- zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info,
- min((int)r->sns_info_len,
- ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
- r->sns_info_len);
- }
- p += sprintf(p, "\n");
- return p - out_buf;
-}
+ if (!dbf)
+ return;
-static struct debug_view zfcp_scsi_dbf_view = {
- "structured",
- NULL,
- &zfcp_dbf_view_header,
- &zfcp_scsi_dbf_view_format,
- NULL,
- NULL
-};
+ debug_unregister(dbf->scsi);
+ debug_unregister(dbf->san);
+ debug_unregister(dbf->hba);
+ debug_unregister(dbf->pay);
+ debug_unregister(dbf->rec);
+ kfree(dbf);
+}
/**
* zfcp_adapter_debug_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
-int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
+int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
{
- char dbf_name[DEBUG_MAX_NAME_LEN];
+ char name[DEBUG_MAX_NAME_LEN];
+ struct zfcp_dbf *dbf;
+
+ dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
+ if (!dbf)
+ return -ENOMEM;
+
+ spin_lock_init(&dbf->pay_lock);
+ spin_lock_init(&dbf->hba_lock);
+ spin_lock_init(&dbf->san_lock);
+ spin_lock_init(&dbf->scsi_lock);
+ spin_lock_init(&dbf->rec_lock);
/* debug feature area which records recovery activity */
- sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
- adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
- sizeof(struct zfcp_rec_dbf_record));
- if (!adapter->rec_dbf)
- goto failed;
- debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view);
- debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view);
- debug_set_level(adapter->rec_dbf, 3);
+ sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
+ dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
+ if (!dbf->rec)
+ goto err_out;
/* debug feature area which records HBA (FSF and QDIO) conditions */
- sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
- adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
- sizeof(struct zfcp_hba_dbf_record));
- if (!adapter->hba_dbf)
- goto failed;
- debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
- debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
- debug_set_level(adapter->hba_dbf, 3);
+ sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
+ dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
+ if (!dbf->hba)
+ goto err_out;
+
+ /* debug feature area which records payload info */
+ sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
+ dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
+ if (!dbf->pay)
+ goto err_out;
/* debug feature area which records SAN command failures and recovery */
- sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
- adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
- sizeof(struct zfcp_san_dbf_record));
- if (!adapter->san_dbf)
- goto failed;
- debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
- debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
- debug_set_level(adapter->san_dbf, 6);
+ sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
+ dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
+ if (!dbf->san)
+ goto err_out;
/* debug feature area which records SCSI command failures and recovery */
- sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
- adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
- sizeof(struct zfcp_scsi_dbf_record));
- if (!adapter->scsi_dbf)
- goto failed;
- debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
- debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
- debug_set_level(adapter->scsi_dbf, 3);
-
- return 0;
+ sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
+ dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
+ if (!dbf->scsi)
+ goto err_out;
- failed:
- zfcp_adapter_debug_unregister(adapter);
+ adapter->dbf = dbf;
+ return 0;
+err_out:
+ zfcp_dbf_unregister(dbf);
return -ENOMEM;
}
@@ -1148,14 +534,11 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
* zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
-void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
+void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
{
- debug_unregister(adapter->scsi_dbf);
- debug_unregister(adapter->san_dbf);
- debug_unregister(adapter->hba_dbf);
- debug_unregister(adapter->rec_dbf);
- adapter->scsi_dbf = NULL;
- adapter->san_dbf = NULL;
- adapter->hba_dbf = NULL;
- adapter->rec_dbf = NULL;
+ struct zfcp_dbf *dbf = adapter->dbf;
+
+ adapter->dbf = NULL;
+ zfcp_dbf_unregister(dbf);
}
+
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index a573f7344dd..0be3d48681a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -1,225 +1,383 @@
/*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
+ * debug feature declarations
*
- * Copyright IBM Corp. 2008, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corp. 2008, 2010
*/
#ifndef ZFCP_DBF_H
#define ZFCP_DBF_H
+#include <scsi/fc/fc_fcp.h>
+#include "zfcp_ext.h"
#include "zfcp_fsf.h"
+#include "zfcp_def.h"
-#define ZFCP_DBF_TAG_SIZE 4
-#define ZFCP_DBF_ID_SIZE 7
+#define ZFCP_DBF_TAG_LEN 7
-struct zfcp_dbf_dump {
- u8 tag[ZFCP_DBF_TAG_SIZE];
- u32 total_size; /* size of total dump data */
- u32 offset; /* how much data has being already dumped */
- u32 size; /* how much data comes with this record */
- u8 data[]; /* dump data */
-} __attribute__ ((packed));
+#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
-struct zfcp_rec_dbf_record_thread {
- u32 total;
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
u32 ready;
u32 running;
-};
-
-struct zfcp_rec_dbf_record_target {
- u64 ref;
- u32 status;
- u32 d_id;
- u64 wwpn;
- u64 fcp_lun;
- u32 erp_count;
-};
-
-struct zfcp_rec_dbf_record_trigger {
u8 want;
u8 need;
- u32 as;
- u32 ps;
- u32 us;
- u64 ref;
- u64 action;
- u64 wwpn;
- u64 fcp_lun;
-};
+} __packed;
+
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+ u64 fsf_req_id;
+ u32 rec_status;
+ u16 rec_step;
+ u8 rec_action;
+ u8 rec_count;
+} __packed;
-struct zfcp_rec_dbf_record_action {
- u32 status;
- u32 step;
- u64 action;
- u64 fsf_req;
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+ ZFCP_DBF_REC_TRIG = 1,
+ ZFCP_DBF_REC_RUN = 2,
};
-struct zfcp_rec_dbf_record {
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
u8 id;
- char id2[7];
+ char tag[ZFCP_DBF_TAG_LEN];
+ u64 lun;
+ u64 wwpn;
+ u32 d_id;
+ u32 adapter_status;
+ u32 port_status;
+ u32 lun_status;
union {
- struct zfcp_rec_dbf_record_action action;
- struct zfcp_rec_dbf_record_thread thread;
- struct zfcp_rec_dbf_record_target target;
- struct zfcp_rec_dbf_record_trigger trigger;
+ struct zfcp_dbf_rec_trigger trig;
+ struct zfcp_dbf_rec_running run;
} u;
-};
+} __packed;
-enum {
- ZFCP_REC_DBF_ID_ACTION,
- ZFCP_REC_DBF_ID_THREAD,
- ZFCP_REC_DBF_ID_TARGET,
- ZFCP_REC_DBF_ID_TRIGGER,
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+ ZFCP_DBF_SAN_REQ = 1,
+ ZFCP_DBF_SAN_RES = 2,
+ ZFCP_DBF_SAN_ELS = 3,
};
-struct zfcp_hba_dbf_record_response {
- u32 fsf_command;
- u64 fsf_reqid;
- u32 fsf_seqno;
- u64 fsf_issued;
- u32 fsf_prot_status;
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+ u8 id;
+ char tag[ZFCP_DBF_TAG_LEN];
+ u64 fsf_req_id;
+ u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+ char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+ u64 req_issued;
+ u32 prot_status;
+ u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
- u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
- u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
- u32 fsf_req_status;
- u8 sbal_first;
- u8 sbal_last;
- u8 sbal_response;
- u8 pool;
- u64 erp_action;
- union {
- struct {
- u64 cmnd;
- u64 serial;
- } fcp;
- struct {
- u64 wwpn;
- u32 d_id;
- u32 port_handle;
- } port;
- struct {
- u64 wwpn;
- u64 fcp_lun;
- u32 port_handle;
- u32 lun_handle;
- } unit;
- struct {
- u32 d_id;
- u8 ls_code;
- } els;
- } u;
-} __attribute__ ((packed));
+ u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+} __packed;
-struct zfcp_hba_dbf_record_status {
- u8 failed;
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
u32 status_type;
u32 status_subtype;
- struct fsf_queue_designator
- queue_designator;
- u32 payload_size;
-#define ZFCP_DBF_UNSOL_PAYLOAD 80
-#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
-#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
-#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
- u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record_qdio {
- u32 qdio_error;
- u8 sbal_index;
- u8 sbal_count;
-} __attribute__ ((packed));
-
-struct zfcp_hba_dbf_record {
- u8 tag[ZFCP_DBF_TAG_SIZE];
- u8 tag2[ZFCP_DBF_TAG_SIZE];
- union {
- struct zfcp_hba_dbf_record_response response;
- struct zfcp_hba_dbf_record_status status;
- struct zfcp_hba_dbf_record_qdio qdio;
- struct fsf_bit_error_payload berr;
- } u;
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record_ct_request {
- u16 cmd_req_code;
- u8 revision;
- u8 gs_type;
- u8 gs_subtype;
- u8 options;
- u16 max_res_size;
- u32 len;
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record_ct_response {
- u16 cmd_rsp_code;
- u8 revision;
- u8 reason_code;
- u8 expl;
- u8 vendor_unique;
- u16 max_res_size;
- u32 len;
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record_els {
- u8 ls_code;
- u32 len;
-} __attribute__ ((packed));
-
-struct zfcp_san_dbf_record {
- u8 tag[ZFCP_DBF_TAG_SIZE];
- u64 fsf_reqid;
- u32 fsf_seqno;
- u32 s_id;
u32 d_id;
+ u64 lun;
+ u64 queue_designator;
+} __packed;
+
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+ ZFCP_DBF_HBA_RES = 1,
+ ZFCP_DBF_HBA_USS = 2,
+ ZFCP_DBF_HBA_BIT = 3,
+ ZFCP_DBF_HBA_BASIC = 4,
+};
+
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+ u8 id;
+ char tag[ZFCP_DBF_TAG_LEN];
+ u64 fsf_req_id;
+ u32 fsf_req_status;
+ u32 fsf_cmd;
+ u32 fsf_seq_no;
+ u16 pl_len;
union {
- struct zfcp_san_dbf_record_ct_request ct_req;
- struct zfcp_san_dbf_record_ct_response ct_resp;
- struct zfcp_san_dbf_record_els els;
+ struct zfcp_dbf_hba_res res;
+ struct zfcp_dbf_hba_uss uss;
+ struct fsf_bit_error_payload be;
} u;
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
- u8 payload[32];
-} __attribute__ ((packed));
+} __packed;
+
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+ ZFCP_DBF_SCSI_CMND = 1,
+};
-struct zfcp_scsi_dbf_record {
- u8 tag[ZFCP_DBF_TAG_SIZE];
- u8 tag2[ZFCP_DBF_TAG_SIZE];
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
+ */
+struct zfcp_dbf_scsi {
+ u8 id;
+ char tag[ZFCP_DBF_TAG_LEN];
u32 scsi_id;
u32 scsi_lun;
u32 scsi_result;
- u64 scsi_cmnd;
- u64 scsi_serial;
-#define ZFCP_DBF_SCSI_OPCODE 16
- u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
u8 scsi_retries;
u8 scsi_allowed;
- u64 fsf_reqid;
- u32 fsf_seqno;
- u64 fsf_issued;
- u64 old_fsf_reqid;
- u8 rsp_validity;
- u8 rsp_scsi_status;
- u32 rsp_resid;
- u8 rsp_code;
-#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
-#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
- u32 sns_info_len;
- u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
-} __attribute__ ((packed));
+ u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE 16
+ u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+ u64 fsf_req_id;
+ u64 host_scribble;
+ u16 pl_len;
+ struct fcp_resp_with_ext fcp_rsp;
+} __packed;
+
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+ u8 counter;
+ char area[ZFCP_DBF_TAG_LEN];
+ u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+ char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
+struct zfcp_dbf {
+ debug_info_t *pay;
+ debug_info_t *rec;
+ debug_info_t *hba;
+ debug_info_t *san;
+ debug_info_t *scsi;
+ spinlock_t pay_lock;
+ spinlock_t rec_lock;
+ spinlock_t hba_lock;
+ spinlock_t san_lock;
+ spinlock_t scsi_lock;
+ struct zfcp_dbf_pay pay_buf;
+ struct zfcp_dbf_rec rec_buf;
+ struct zfcp_dbf_hba hba_buf;
+ struct zfcp_dbf_san san_buf;
+ struct zfcp_dbf_scsi scsi_buf;
+};
+
+static inline
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+{
+ if (debug_level_enabled(req->adapter->dbf->hba, level))
+ zfcp_dbf_hba_fsf_res(tag, req);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_response - trace event for request completion
+ * @req: request that has been completed
+ */
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+{
+ struct fsf_qtcb *qtcb = req->qtcb;
+
+ if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+
+ } else if (qtcb->header.fsf_status != FSF_GOOD) {
+ zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+
+ } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+ zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
+
+ } else if (qtcb->header.log_length) {
+ zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
+
+ } else {
+ zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
+ }
+}
+
+static inline
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+ struct zfcp_fsf_req *req)
+{
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+ scmd->device->host->hostdata[0];
+
+ if (debug_level_enabled(adapter->dbf->scsi, level))
+ zfcp_dbf_scsi(tag, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_result - trace event for SCSI command completion
+ * @scmd: SCSI command pointer
+ * @req: FSF request used to issue SCSI command
+ */
+static inline
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
+{
+ if (scmd->result != 0)
+ _zfcp_dbf_scsi("rsl_err", 3, scmd, req);
+ else if (scmd->retries > 0)
+ _zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
+ else
+ _zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @scmd: SCSI command pointer
+ */
+static inline
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
+{
+ _zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
+}
+
+/**
+ * zfcp_dbf_scsi_abort - trace event for SCSI command abort
+ * @tag: tag indicating success or failure of abort operation
+ * @scmd: SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
+ */
+static inline
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+ struct zfcp_fsf_req *fsf_req)
+{
+ _zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
+}
+
+/**
+ * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
+ * @tag: tag indicating success or failure of reset operation
+ * @scmnd: SCSI command which caused this error recovery
+ * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ */
+static inline
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+{
+ char tmp_tag[ZFCP_DBF_TAG_LEN];
+
+ if (flag == FCP_TMF_TGT_RESET)
+ memcpy(tmp_tag, "tr_", 3);
+ else
+ memcpy(tmp_tag, "lr_", 3);
+
+ memcpy(&tmp_tag[3], tag, 4);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+}
#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4c362a9069f..d91173f326c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_DEF_H
@@ -22,6 +22,8 @@
#include <linux/syscalls.h>
#include <linux/scatterlist.h>
#include <linux/ioctl.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
@@ -29,46 +31,20 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
-#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
-#include "zfcp_dbf.h"
#include "zfcp_fsf.h"
+#include "zfcp_fc.h"
+#include "zfcp_qdio.h"
-
-/********************* GENERAL DEFINES *********************************/
-
-#define REQUEST_LIST_SIZE 128
+struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-
-/* Adapter Identification Parameters */
-#define ZFCP_CONTROL_UNIT_TYPE 0x1731
-#define ZFCP_CONTROL_UNIT_MODEL 0x03
-#define ZFCP_DEVICE_TYPE 0x1732
-#define ZFCP_DEVICE_MODEL 0x03
-#define ZFCP_DEVICE_MODEL_PRIV 0x04
-
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain */
-#define ZFCP_MAX_SBALES_PER_REQ \
- (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
- /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-
-#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
- /* max. number of (data buffer) SBALEs in largest SBAL chain
- multiplied with number of sectors per 4k block */
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
@@ -77,134 +53,6 @@
/* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
-/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
-
-/* timeout for name-server lookup (in seconds) */
-#define ZFCP_NS_GID_PN_TIMEOUT 10
-
-/* task attribute values in FCP-2 FCP_CMND IU */
-#define SIMPLE_Q 0
-#define HEAD_OF_Q 1
-#define ORDERED_Q 2
-#define ACA_Q 4
-#define UNTAGGED 5
-
-/* task management flags in FCP-2 FCP_CMND IU */
-#define FCP_CLEAR_ACA 0x40
-#define FCP_TARGET_RESET 0x20
-#define FCP_LOGICAL_UNIT_RESET 0x10
-#define FCP_CLEAR_TASK_SET 0x04
-#define FCP_ABORT_TASK_SET 0x02
-
-#define FCP_CDB_LENGTH 16
-
-#define ZFCP_DID_MASK 0x00FFFFFF
-
-/* FCP(-2) FCP_CMND IU */
-struct fcp_cmnd_iu {
- u64 fcp_lun; /* FCP logical unit number */
- u8 crn; /* command reference number */
- u8 reserved0:5; /* reserved */
- u8 task_attribute:3; /* task attribute */
- u8 task_management_flags; /* task management flags */
- u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
- u8 rddata:1; /* read data */
- u8 wddata:1; /* write data */
- u8 fcp_cdb[FCP_CDB_LENGTH];
-} __attribute__((packed));
-
-/* FCP(-2) FCP_RSP IU */
-struct fcp_rsp_iu {
- u8 reserved0[10];
- union {
- struct {
- u8 reserved1:3;
- u8 fcp_conf_req:1;
- u8 fcp_resid_under:1;
- u8 fcp_resid_over:1;
- u8 fcp_sns_len_valid:1;
- u8 fcp_rsp_len_valid:1;
- } bits;
- u8 value;
- } validity;
- u8 scsi_status;
- u32 fcp_resid;
- u32 fcp_sns_len;
- u32 fcp_rsp_len;
-} __attribute__((packed));
-
-
-#define RSP_CODE_GOOD 0
-#define RSP_CODE_LENGTH_MISMATCH 1
-#define RSP_CODE_FIELD_INVALID 2
-#define RSP_CODE_RO_MISMATCH 3
-#define RSP_CODE_TASKMAN_UNSUPP 4
-#define RSP_CODE_TASKMAN_FAILED 5
-
-/* see fc-fs */
-#define LS_RSCN 0x61
-#define LS_LOGO 0x05
-#define LS_PLOGI 0x03
-
-struct fcp_rscn_head {
- u8 command;
- u8 page_length; /* always 0x04 */
- u16 payload_len;
-} __attribute__((packed));
-
-struct fcp_rscn_element {
- u8 reserved:2;
- u8 event_qual:4;
- u8 addr_format:2;
- u32 nport_did:24;
-} __attribute__((packed));
-
-/* see fc-ph */
-struct fcp_logo {
- u32 command;
- u32 nport_did;
- u64 nport_wwpn;
-} __attribute__((packed));
-
-/*
- * FC-FS stuff
- */
-#define R_A_TOV 10 /* seconds */
-
-#define ZFCP_LS_RLS 0x0f
-#define ZFCP_LS_ADISC 0x52
-#define ZFCP_LS_RPS 0x56
-#define ZFCP_LS_RSCN 0x61
-#define ZFCP_LS_RNID 0x78
-
-struct zfcp_ls_adisc {
- u8 code;
- u8 field[3];
- u32 hard_nport_id;
- u64 wwpn;
- u64 wwnn;
- u32 nport_id;
-} __attribute__ ((packed));
-
-/*
- * FC-GS-2 stuff
- */
-#define ZFCP_CT_REVISION 0x01
-#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
-#define ZFCP_CT_NAME_SERVER 0x02
-#define ZFCP_CT_SYNCHRONOUS 0x00
-#define ZFCP_CT_SCSI_FCP 0x08
-#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
-#define ZFCP_CT_GID_PN 0x0121
-#define ZFCP_CT_GPN_FT 0x0172
-#define ZFCP_CT_ACCEPT 0x8002
-#define ZFCP_CT_REJECT 0x8001
-
-/*
- * FC-GS-4 stuff
- */
-#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
-
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/*
@@ -214,7 +62,6 @@ struct zfcp_ls_adisc {
#define ZFCP_COMMON_FLAGS 0xfff00000
/* common status bits */
-#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
@@ -225,48 +72,26 @@ struct zfcp_ls_adisc {
#define ZFCP_STATUS_COMMON_NOESC 0x00200000
/* adapter status */
+#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
+#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
-#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
-#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
+#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
-
-/* FC-PH/FC-GS well-known address identifiers for generic services */
-#define ZFCP_DID_WKA 0xFFFFF0
-#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
-#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
-#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
-#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
-#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
+#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
-
-/* well known address (WKA) port status*/
-enum zfcp_wka_status {
- ZFCP_WKA_PORT_OFFLINE,
- ZFCP_WKA_PORT_CLOSING,
- ZFCP_WKA_PORT_OPENING,
- ZFCP_WKA_PORT_ONLINE,
-};
-
-/* logical unit status */
-#define ZFCP_STATUS_UNIT_SHARED 0x00000004
-#define ZFCP_STATUS_UNIT_READONLY 0x00000008
+#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
-#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
-#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
-#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
-#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -275,118 +100,14 @@ struct zfcp_fsf_req;
/* holds various memory pools of an adapter */
struct zfcp_adapter_mempool {
- mempool_t *fsf_req_erp;
- mempool_t *fsf_req_scsi;
- mempool_t *fsf_req_abort;
- mempool_t *fsf_req_status_read;
- mempool_t *data_status_read;
- mempool_t *data_gid_pn;
-};
-
-/*
- * header for CT_IU
- */
-struct ct_hdr {
- u8 revision; // 0x01
- u8 in_id[3]; // 0x00
- u8 gs_type; // 0xFC Directory Service
- u8 gs_subtype; // 0x02 Name Server
- u8 options; // 0x00 single bidirectional exchange
- u8 reserved0;
- u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
- u16 max_res_size; // <= (4096 - 16) / 4
- u8 reserved1;
- u8 reason_code;
- u8 reason_code_expl;
- u8 vendor_unique;
-} __attribute__ ((packed));
-
-/* nameserver request CT_IU -- for requests where
- * a port name is required */
-struct ct_iu_gid_pn_req {
- struct ct_hdr header;
- u64 wwpn;
-} __attribute__ ((packed));
-
-/* FS_ACC IU and data unit for GID_PN nameserver request */
-struct ct_iu_gid_pn_resp {
- struct ct_hdr header;
- u32 d_id;
-} __attribute__ ((packed));
-
-/**
- * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
- * @wka_port: port where the request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @timeout: FSF timeout for this request
- * @completion: completion for synchronization purposes
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_ct {
- struct zfcp_wka_port *wka_port;
- struct scatterlist *req;
- struct scatterlist *resp;
- void (*handler)(unsigned long);
- unsigned long handler_data;
- int timeout;
- struct completion *completion;
- int status;
-};
-
-/* used for name server requests in error recovery */
-struct zfcp_gid_pn_data {
- struct zfcp_send_ct ct;
- struct scatterlist req;
- struct scatterlist resp;
- struct ct_iu_gid_pn_req ct_iu_req;
- struct ct_iu_gid_pn_resp ct_iu_resp;
- struct zfcp_port *port;
-};
-
-/**
- * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
- * @adapter: adapter where request is sent from
- * @port: port where ELS is destinated (port reference count has to be increased)
- * @d_id: destiniation id of port where request is sent to
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
- * @handler: handler function (called for response to the request)
- * @handler_data: data passed to handler function
- * @completion: completion for synchronization purposes
- * @ls_code: hex code of ELS command
- * @status: used to pass error status to calling function
- */
-struct zfcp_send_els {
- struct zfcp_adapter *adapter;
- struct zfcp_port *port;
- u32 d_id;
- struct scatterlist *req;
- struct scatterlist *resp;
- void (*handler)(unsigned long);
- unsigned long handler_data;
- struct completion *completion;
- int ls_code;
- int status;
-};
-
-struct zfcp_wka_port {
- struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
- enum zfcp_wka_status status;
- atomic_t refcount;
- u32 d_id;
- u32 handle;
- struct mutex mutex;
- struct delayed_work work;
-};
-
-struct zfcp_qdio_queue {
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
- u8 first; /* index of next free bfr in queue */
- atomic_t count; /* number of free buffers in queue */
+ mempool_t *erp_req;
+ mempool_t *gid_pn_req;
+ mempool_t *scsi_req;
+ mempool_t *scsi_abort;
+ mempool_t *status_read_req;
+ mempool_t *sr_data;
+ mempool_t *gid_pn;
+ mempool_t *qtcb_pool;
};
struct zfcp_erp_action {
@@ -394,11 +115,10 @@ struct zfcp_erp_action {
int action; /* requested action code */
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
- struct zfcp_unit *unit;
+ struct scsi_device *sdev;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
- struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
- for this action */
+ unsigned long fsf_req_id;
struct timer_list timer;
};
@@ -422,13 +142,12 @@ struct zfcp_latencies {
};
struct zfcp_adapter {
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
+ struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
+ struct zfcp_qdio *qdio;
u32 hydra_version; /* Hydra version */
u32 fsf_lic_version;
u32 adapter_features; /* FCP channel features */
@@ -436,31 +155,23 @@ struct zfcp_adapter {
u32 hardware_version; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
- struct list_head port_list_head; /* remote port list */
+ struct list_head port_list; /* remote port list */
+ rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
- struct list_head *req_list; /* list of pending reqs */
- spinlock_t req_list_lock; /* request list lock */
- struct zfcp_qdio_queue req_q; /* request queue */
- spinlock_t req_q_lock; /* for operations on queue */
- ktime_t req_q_time; /* time of last fill level change */
- u64 req_q_util; /* for accounting */
- spinlock_t qdio_stat_lock;
+ struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
- wait_queue_head_t request_wq; /* can be used to wait for
- more avaliable SBALs */
- struct zfcp_qdio_queue resp_q; /* response queue */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
+ unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
adapter/devices */
+ wait_queue_head_t erp_ready_wq;
struct list_head erp_running_head;
rwlock_t erp_lock;
- struct semaphore erp_ready_sem;
- wait_queue_head_t erp_thread_wqh;
wait_queue_head_t erp_done_wqh;
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
@@ -468,38 +179,29 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
- struct zfcp_wka_port nsp; /* adapter's nameserver */
- debug_info_t *rec_dbf;
- debug_info_t *hba_dbf;
- debug_info_t *san_dbf; /* debug feature areas */
- debug_info_t *scsi_dbf;
- spinlock_t rec_dbf_lock;
- spinlock_t hba_dbf_lock;
- spinlock_t san_dbf_lock;
- spinlock_t scsi_dbf_lock;
- struct zfcp_rec_dbf_record rec_dbf_buf;
- struct zfcp_hba_dbf_record hba_dbf_buf;
- struct zfcp_san_dbf_record san_dbf_buf;
- struct zfcp_scsi_dbf_record scsi_dbf_buf;
+ struct task_struct *erp_thread;
+ struct zfcp_fc_wka_ports *gs; /* generic services */
+ struct zfcp_dbf *dbf; /* debug traces */
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
- struct qdio_initialize qdio_init_data; /* for qdio_establish */
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct work_struct ns_up_work;
struct service_level service_level;
- atomic_t qdio_outb_full; /* queue full incidents */
+ struct workqueue_struct *work_queue;
+ struct device_dma_parameters dma_parms;
+ struct zfcp_fc_events events;
};
struct zfcp_port {
- struct device sysfs_device; /* sysfs device */
+ struct device dev;
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
struct zfcp_adapter *adapter; /* adapter used to access port */
- struct list_head unit_list_head; /* head of logical unit list */
+ struct list_head unit_list; /* head of logical unit list */
+ rwlock_t unit_list_lock; /* unit list lock */
+ atomic_t units; /* zfcp_unit count */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
@@ -513,165 +215,108 @@ struct zfcp_port {
struct work_struct test_link_work;
struct work_struct rport_work;
enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
+ unsigned int starget_id;
};
+/**
+ * struct zfcp_unit - LUN configured via zfcp sysfs
+ * @dev: struct device for sysfs representation and reference counting
+ * @list: entry in LUN/unit list per zfcp_port
+ * @port: reference to zfcp_port where this LUN is configured
+ * @fcp_lun: 64 bit LUN value
+ * @scsi_work: for running scsi_scan_target
+ *
+ * This is the representation of a LUN that has been configured for
+ * usage. The main data here is the 64 bit LUN value, data for
+ * running I/O and recovery is in struct zfcp_scsi_dev.
+ */
struct zfcp_unit {
- struct device sysfs_device; /* sysfs device */
- struct list_head list; /* list of logical units */
- atomic_t refcount; /* reference count */
- wait_queue_head_t remove_wq; /* can be used to wait for
- refcount drop to zero */
- struct zfcp_port *port; /* remote port of unit */
- atomic_t status; /* status of this logical unit */
- u64 fcp_lun; /* own FCP_LUN */
- u32 handle; /* handle assigned by FSF */
- struct scsi_device *device; /* scsi device struct pointer */
- struct zfcp_erp_action erp_action; /* pending error recovery */
- atomic_t erp_counter;
- struct zfcp_latencies latencies;
+ struct device dev;
+ struct list_head list;
+ struct zfcp_port *port;
+ u64 fcp_lun;
struct work_struct scsi_work;
};
-/* FSF request */
-struct zfcp_fsf_req {
- struct list_head list; /* list of FSF requests */
- unsigned long req_id; /* unique request ID */
- struct zfcp_adapter *adapter; /* adapter request belongs to */
- u8 sbal_number; /* nr of SBALs free for use */
- u8 sbal_first; /* first SBAL for this request */
- u8 sbal_last; /* last SBAL for this request */
- u8 sbal_limit; /* last possible SBAL for
- this reuest */
- u8 sbale_curr; /* current SBALE during creation
- of request */
- u8 sbal_response; /* SBAL used in interrupt */
- wait_queue_head_t completion_wq; /* can be used by a routine
- to wait for completion */
- u32 status; /* status of this request */
- u32 fsf_command; /* FSF Command copy */
- struct fsf_qtcb *qtcb; /* address of associated QTCB */
- u32 seq_no; /* Sequence number of request */
- void *data; /* private data of request */
- struct timer_list timer; /* used for erp or scsi er */
- struct zfcp_erp_action *erp_action; /* used if this request is
- issued on behalf of erp */
- mempool_t *pool; /* used if request was alloacted
- from emergency pool */
- unsigned long long issued; /* request sent time (STCK) */
- struct zfcp_unit *unit;
- void (*handler)(struct zfcp_fsf_req *);
- u16 qdio_outb_usage;/* usage of outbound queue */
- u16 qdio_inb_usage; /* usage of inbound queue */
-};
-
-/* driver data */
-struct zfcp_data {
- struct scsi_host_template scsi_host_template;
- struct scsi_transport_template *scsi_transport_template;
- rwlock_t config_lock; /* serialises changes
- to adapter/port/unit
- lists */
- struct semaphore config_sema; /* serialises configuration
- changes */
- struct kmem_cache *fsf_req_qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
- struct workqueue_struct *work_queue;
-};
-
-/* struct used by memory pools for fsf_requests */
-struct zfcp_fsf_req_qtcb {
- struct zfcp_fsf_req fsf_req;
- struct fsf_qtcb qtcb;
+/**
+ * struct zfcp_scsi_dev - zfcp data per SCSI device
+ * @status: zfcp internal status flags
+ * @lun_handle: handle from "open lun" for issuing FSF requests
+ * @erp_action: zfcp erp data for opening and recovering this LUN
+ * @erp_counter: zfcp erp counter for this LUN
+ * @latencies: FSF channel and fabric latencies
+ * @port: zfcp_port where this LUN belongs to
+ */
+struct zfcp_scsi_dev {
+ atomic_t status;
+ u32 lun_handle;
+ struct zfcp_erp_action erp_action;
+ atomic_t erp_counter;
+ struct zfcp_latencies latencies;
+ struct zfcp_port *port;
};
-/********************** ZFCP SPECIFIC DEFINES ********************************/
-
-#define ZFCP_SET 0x00000100
-#define ZFCP_CLEAR 0x00000200
-
-/*
- * Helper functions for request ID management.
+/**
+ * sdev_to_zfcp - Access zfcp LUN data for SCSI device
+ * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
*/
-static inline int zfcp_reqlist_hash(unsigned long req_id)
-{
- return req_id % REQUEST_LIST_SIZE;
-}
-
-static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
- struct zfcp_fsf_req *fsf_req)
+static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
{
- list_del(&fsf_req->list);
+ return scsi_transport_device_data(sdev);
}
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- idx = zfcp_reqlist_hash(req_id);
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request->req_id == req_id)
- return request;
- return NULL;
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request == req)
- return request;
- }
- return NULL;
-}
-
-/*
- * functions needed for reference/usage counting
+/**
+ * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
+ * @sdev: SCSI device where to get the LUN from
*/
-
-static inline void
-zfcp_unit_get(struct zfcp_unit *unit)
+static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
{
- atomic_inc(&unit->refcount);
-}
+ u64 fcp_lun;
-static inline void
-zfcp_unit_put(struct zfcp_unit *unit)
-{
- if (atomic_dec_return(&unit->refcount) == 0)
- wake_up(&unit->remove_wq);
+ int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
+ return fcp_lun;
}
-static inline void
-zfcp_port_get(struct zfcp_port *port)
-{
- atomic_inc(&port->refcount);
-}
-
-static inline void
-zfcp_port_put(struct zfcp_port *port)
-{
- if (atomic_dec_return(&port->refcount) == 0)
- wake_up(&port->remove_wq);
-}
-
-static inline void
-zfcp_adapter_get(struct zfcp_adapter *adapter)
-{
- atomic_inc(&adapter->refcount);
-}
+/**
+ * struct zfcp_fsf_req - basic FSF request structure
+ * @list: list of FSF requests
+ * @req_id: unique request ID
+ * @adapter: adapter this request belongs to
+ * @qdio_req: qdio queue related values
+ * @completion: used to signal the completion of the request
+ * @status: status of the request
+ * @fsf_command: FSF command issued
+ * @qtcb: associated QTCB
+ * @seq_no: sequence number of this request
+ * @data: private data
+ * @timer: timer data of this request
+ * @erp_action: reference to erp action if request issued on behalf of ERP
+ * @pool: reference to memory pool if used for this request
+ * @issued: time when request was send (STCK)
+ * @handler: handler which should be called to process response
+ */
+struct zfcp_fsf_req {
+ struct list_head list;
+ unsigned long req_id;
+ struct zfcp_adapter *adapter;
+ struct zfcp_qdio_req qdio_req;
+ struct completion completion;
+ u32 status;
+ u32 fsf_command;
+ struct fsf_qtcb *qtcb;
+ u32 seq_no;
+ void *data;
+ struct timer_list timer;
+ struct zfcp_erp_action *erp_action;
+ mempool_t *pool;
+ unsigned long long issued;
+ void (*handler)(struct zfcp_fsf_req *);
+};
-static inline void
-zfcp_adapter_put(struct zfcp_adapter *adapter)
+static inline
+int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
{
- if (atomic_dec_return(&adapter->refcount) == 0)
- wake_up(&adapter->remove_wq);
+ return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
}
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fdc9b4352a6..c82fe65c412 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,13 +3,15 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/kthread.h>
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
#define ZFCP_MAX_ERPS 3
@@ -19,6 +21,7 @@ enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
+ ZFCP_STATUS_ERP_NO_REF = 0x00800000,
};
enum zfcp_erp_steps {
@@ -26,14 +29,13 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
- ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
- ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
- ZFCP_ERP_STEP_UNIT_OPENING = 0x2000,
+ ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
+ ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
enum zfcp_erp_act_type {
- ZFCP_ERP_ACTION_REOPEN_UNIT = 1,
+ ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
@@ -55,9 +57,8 @@ enum zfcp_erp_act_result {
static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
{
- zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL,
- ZFCP_STATUS_COMMON_UNBLOCKED | mask,
- ZFCP_CLEAR);
+ zfcp_erp_clear_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_UNBLOCKED | mask);
}
static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
@@ -75,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
list_move(&act->list, &act->adapter->erp_ready_head);
- zfcp_rec_dbf_event_action("erardy1", act);
- up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread("erardy2", adapter);
+ zfcp_dbf_rec_run("erardy1", act);
+ wake_up(&adapter->erp_ready_wq);
+ zfcp_dbf_rec_run("erardy2", act);
}
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -87,21 +88,27 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
zfcp_erp_action_ready(act);
}
-static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
+static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
{
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
- zfcp_erp_action_dismiss(&unit->erp_action);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+ zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
}
static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
{
- struct zfcp_unit *unit;
+ struct scsi_device *sdev;
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- list_for_each_entry(unit, &port->unit_list_head, list)
- zfcp_erp_action_dismiss_unit(unit);
+ else {
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ zfcp_erp_action_dismiss_lun(sdev);
+ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,22 +117,27 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&adapter->erp_action);
- else
- list_for_each_entry(port, &adapter->port_list_head, list)
+ else {
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_action_dismiss_port(port);
+ read_unlock(&adapter->port_list_lock);
+ }
}
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
- struct zfcp_unit *unit)
+ struct scsi_device *sdev)
{
int need = want;
- int u_status, p_status, a_status;
+ int l_status, p_status, a_status;
+ struct zfcp_scsi_dev *zfcp_sdev;
switch (want) {
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- u_status = atomic_read(&unit->status);
- if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ l_status = atomic_read(&zfcp_sdev->status);
+ if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
@@ -134,15 +146,21 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
- case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
+ if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
+ need = ZFCP_ERP_ACTION_REOPEN_PORT;
+ /* fall through */
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
+ if (p_status & ZFCP_STATUS_COMMON_NOESC)
+ return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
/* fall through */
@@ -150,105 +168,116 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
+ if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
+ !(a_status & ZFCP_STATUS_COMMON_OPEN))
+ return 0; /* shutdown requested for closed adapter */
}
return need;
}
-static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
+static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
- struct zfcp_unit *unit)
+ struct scsi_device *sdev)
{
struct zfcp_erp_action *erp_action;
- u32 status = 0;
+ struct zfcp_scsi_dev *zfcp_sdev;
switch (need) {
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- zfcp_unit_get(unit);
- atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
- erp_action = &unit->erp_action;
- if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
- status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
+ if (scsi_device_get(sdev))
+ return NULL;
+ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ &zfcp_sdev->status);
+ erp_action = &zfcp_sdev->erp_action;
+ memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ erp_action->port = port;
+ erp_action->sdev = sdev;
+ if (!(atomic_read(&zfcp_sdev->status) &
+ ZFCP_STATUS_COMMON_RUNNING))
+ act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- zfcp_port_get(port);
+ if (!get_device(&port->dev))
+ return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
+ memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ erp_action->port = port;
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
- status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- zfcp_adapter_get(adapter);
+ kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
+ memset(erp_action, 0, sizeof(struct zfcp_erp_action));
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
- status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+ act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
default:
return NULL;
}
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->adapter = adapter;
- erp_action->port = port;
- erp_action->unit = unit;
erp_action->action = need;
- erp_action->status = status;
+ erp_action->status = act_status;
return erp_action;
}
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
- struct zfcp_unit *unit, char *id, void *ref)
+ struct scsi_device *sdev,
+ char *id, u32 act_status)
{
int retval = 1, need;
- struct zfcp_erp_action *act = NULL;
+ struct zfcp_erp_action *act;
- if (!(atomic_read(&adapter->status) &
- ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
+ if (!adapter->erp_thread)
return -EIO;
- need = zfcp_erp_required_act(want, adapter, port, unit);
+ need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
- atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
- act = zfcp_erp_setup_act(need, adapter, port, unit);
+ act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act)
goto out;
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
- up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread("eracte1", adapter);
+ wake_up(&adapter->erp_ready_wq);
retval = 0;
out:
- zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
- adapter, port, unit);
+ zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
return retval;
}
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
- int clear_mask, char *id, void *ref)
+ int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
/* ensure propagation of failed status to new devices */
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
- adapter, NULL, NULL, id, ref);
+ adapter, NULL, NULL, id, 0);
}
/**
@@ -256,18 +285,22 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
* @adapter: Adapter to reopen.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
*/
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
- char *id, void *ref)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
{
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
- _zfcp_erp_adapter_reopen(adapter, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ zfcp_erp_adapter_block(adapter, clear);
+ zfcp_scsi_schedule_rports_block(adapter);
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
+ else
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+ NULL, NULL, id, 0);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
@@ -275,13 +308,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
* @adapter: Adapter to shut down.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
- char *id, void *ref)
+ char *id)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
+ zfcp_erp_adapter_reopen(adapter, clear | flags, id);
}
/**
@@ -289,38 +321,21 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
* @port: Port to shut down.
* @clear: Status flags to clear.
* @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
- */
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
- void *ref)
-{
- int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_port_reopen(port, clear | flags, id, ref);
-}
-
-/**
- * zfcp_erp_unit_shutdown - Shutdown unit
- * @unit: Unit to shut down.
- * @clear: Status flags to clear.
- * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
*/
-void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
- void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
- zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
+ zfcp_erp_port_reopen(port, clear | flags, id);
}
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
{
- zfcp_erp_modify_port_status(port, "erpblk1", NULL,
- ZFCP_STATUS_COMMON_UNBLOCKED | clear,
- ZFCP_CLEAR);
+ zfcp_erp_clear_port_status(port,
+ ZFCP_STATUS_COMMON_UNBLOCKED | clear);
}
-static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
- int clear, char *id, void *ref)
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+ char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
@@ -329,168 +344,197 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
- port->adapter, port, NULL, id, ref);
+ port->adapter, port, NULL, id, 0);
}
/**
* zfcp_erp_port_forced_reopen - Forced close of port and open again
* @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
* @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
*/
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
- void *ref)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
- _zfcp_erp_port_forced_reopen(port, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ _zfcp_erp_port_forced_reopen(port, clear, id);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
- void *ref)
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
/* ensure propagation of failed status to new devices */
- zfcp_erp_port_failed(port, "erpreo1", NULL);
+ zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
- port->adapter, port, NULL, id, ref);
+ port->adapter, port, NULL, id, 0);
}
/**
* zfcp_erp_port_reopen - trigger remote port recovery
* @port: port to recover
* @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
*
* Returns 0 if recovery has been triggered, < 0 if not.
*/
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
- unsigned long flags;
int retval;
+ unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
- retval = _zfcp_erp_port_reopen(port, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ retval = _zfcp_erp_port_reopen(port, clear, id);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
return retval;
}
-static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
+static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
{
- zfcp_erp_modify_unit_status(unit, "erublk1", NULL,
- ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
- ZFCP_CLEAR);
+ zfcp_erp_clear_lun_status(sdev,
+ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
}
-static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
- void *ref)
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+ u32 act_status)
{
- struct zfcp_adapter *adapter = unit->port->adapter;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
- zfcp_erp_unit_block(unit, clear);
+ zfcp_erp_lun_block(sdev, clear);
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
- zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
- adapter, unit->port, unit, id, ref);
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
+ zfcp_sdev->port, sdev, id, act_status);
}
/**
- * zfcp_erp_unit_reopen - initiate reopen of a unit
- * @unit: unit to be reopened
- * @clear_mask: specifies flags in unit status to be cleared
+ * zfcp_erp_lun_reopen - initiate reopen of a LUN
+ * @sdev: SCSI device / LUN to be reopened
+ * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
* Return: 0 on success, < 0 on error
*/
-void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
- void *ref)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
{
unsigned long flags;
- struct zfcp_port *port = unit->port;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_adapter *adapter = port->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
- _zfcp_erp_unit_reopen(unit, clear, id, ref);
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static int status_change_set(unsigned long mask, atomic_t *status)
+/**
+ * zfcp_erp_lun_shutdown - Shutdown LUN
+ * @sdev: SCSI device / LUN to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
{
- return (atomic_read(status) ^ mask) & mask;
+ int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+ zfcp_erp_lun_reopen(sdev, clear | flags, id);
}
-static int status_change_clear(unsigned long mask, atomic_t *status)
+/**
+ * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
+ * @sdev: SCSI device / LUN to shut down.
+ * @id: Id for debug trace event.
+ *
+ * Do not acquire a reference for the LUN when creating the ERP
+ * action. It is safe, because this function waits for the ERP to
+ * complete first. This allows to shutdown the LUN, even when the SCSI
+ * device is in the state SDEV_DEL when scsi_device_get will fail.
+ */
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
{
- return atomic_read(status) & mask;
+ unsigned long flags;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_port *port = zfcp_sdev->port;
+ struct zfcp_adapter *adapter = port->adapter;
+ int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+ zfcp_erp_wait(adapter);
+}
+
+static int status_change_set(unsigned long mask, atomic_t *status)
+{
+ return (atomic_read(status) ^ mask) & mask;
}
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
- zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter);
+ zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
- zfcp_rec_dbf_event_port("erpubl1", NULL, port);
+ zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
-static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
+static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
- if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
- zfcp_rec_dbf_event_unit("eruubl1", NULL, unit);
- atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+ if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
+ zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
+ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
- zfcp_rec_dbf_event_action("erator1", erp_action);
+ zfcp_dbf_rec_run("erator1", erp_action);
}
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
+ struct zfcp_fsf_req *req;
- if (!act->fsf_req)
+ if (!act->fsf_req_id)
return;
- spin_lock(&adapter->req_list_lock);
- if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
- act->fsf_req->erp_action == act) {
+ spin_lock(&adapter->req_list->lock);
+ req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+ if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
- act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
- zfcp_rec_dbf_event_action("erscf_1", act);
- act->fsf_req->erp_action = NULL;
+ req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ zfcp_dbf_rec_run("erscf_1", act);
+ req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
- zfcp_rec_dbf_event_action("erscf_2", act);
- if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
- ZFCP_STATUS_FSFREQ_DISMISSED))
- act->fsf_req = NULL;
+ zfcp_dbf_rec_run("erscf_2", act);
+ if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ act->fsf_req_id = 0;
} else
- act->fsf_req = NULL;
- spin_unlock(&adapter->req_list_lock);
+ act->fsf_req_id = 0;
+ spin_unlock(&adapter->req_list->lock);
}
/**
@@ -536,57 +580,57 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
}
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
- int clear, char *id, void *ref)
+ int clear, char *id)
{
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
- _zfcp_erp_port_reopen(port, clear, id, ref);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_reopen(port, clear, id);
+ read_unlock(&adapter->port_list_lock);
}
-static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
- char *id, void *ref)
+static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+ char *id)
{
- struct zfcp_unit *unit;
+ struct scsi_device *sdev;
- list_for_each_entry(unit, &port->unit_list_head, list)
- _zfcp_erp_unit_reopen(unit, clear, id, ref);
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ spin_unlock(port->adapter->scsi_host->host_lock);
}
-static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
+static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
{
- struct zfcp_adapter *adapter = act->adapter;
- struct zfcp_port *port = act->port;
- struct zfcp_unit *unit = act->unit;
- u32 status = act->status;
-
- /* initiate follow-up actions depending on success of finished action */
switch (act->action) {
-
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL);
- else
- _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL);
+ _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
break;
-
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL);
- else
- _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL);
+ _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
break;
-
case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (status == ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL);
- else
- _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL);
+ _zfcp_erp_port_reopen(act->port, 0, "ersff_3");
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
break;
+ }
+}
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if (status != ZFCP_ERP_SUCCEEDED)
- _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL);
+static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
+{
+ switch (act->action) {
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ _zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
break;
}
}
@@ -595,25 +639,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- read_lock(&adapter->erp_lock);
+ read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status);
wake_up(&adapter->erp_done_wqh);
}
- read_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-}
-
-static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
-{
- if (zfcp_qdio_open(act->adapter))
- return ZFCP_ERP_FAILED;
- init_waitqueue_head(&act->adapter->request_wq);
- atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
- return ZFCP_ERP_SUCCEEDED;
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
@@ -623,7 +656,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
+ _zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -646,9 +679,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
}
- zfcp_rec_dbf_event_thread_lock("erasfx1", adapter);
- down(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock("erasfx2", adapter);
+ wait_event(adapter->erp_ready_wq,
+ !list_empty(&adapter->erp_ready_head));
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
break;
@@ -687,9 +719,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
if (ret)
return ZFCP_ERP_FAILED;
- zfcp_rec_dbf_event_thread_lock("erasox1", adapter);
- down(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock("erasox2", adapter);
+ zfcp_dbf_rec_run("erasox1", act);
+ wait_event(adapter->erp_ready_wq,
+ !list_empty(&adapter->erp_ready_head));
+ zfcp_dbf_rec_run("erasox2", act);
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
return ZFCP_ERP_FAILED;
@@ -704,7 +737,15 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- atomic_set(&act->adapter->stat_miss, 16);
+ if (mempool_resize(act->adapter->pool.sr_data,
+ act->adapter->stat_read_buf_num, GFP_KERNEL))
+ return ZFCP_ERP_FAILED;
+
+ if (mempool_resize(act->adapter->pool.status_read_req,
+ act->adapter->stat_read_buf_num, GFP_KERNEL))
+ return ZFCP_ERP_FAILED;
+
+ atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
@@ -716,13 +757,12 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
/* close queues to ensure that buffers are not accessed by adapter */
- zfcp_qdio_close(adapter);
+ zfcp_qdio_close(adapter->qdio);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
- zfcp_fc_wka_port_force_offline(&adapter->nsp);
- /* all ports and units are closed */
- zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
- ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
+ zfcp_fc_wka_ports_force_offline(adapter->gs);
+ /* all ports and LUNs are closed */
+ zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
@@ -732,7 +772,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
- if (zfcp_erp_adapter_strategy_open_qdio(act)) {
+ if (zfcp_qdio_open(adapter->qdio)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&adapter->status);
@@ -801,7 +841,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
- if (status & ZFCP_STATUS_PORT_PHYS_OPEN)
+ if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
return ZFCP_ERP_SUCCEEDED;
}
return ZFCP_ERP_FAILED;
@@ -839,28 +879,13 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
- zfcp_erp_port_failed(port, "eroptp1", NULL);
+ zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return ZFCP_ERP_FAILED;
}
port->d_id = adapter->peer_d_id;
return zfcp_erp_port_strategy_open_port(act);
}
-void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
-{
- int retval;
- struct zfcp_port *port = container_of(work, struct zfcp_port,
- gid_pn_work);
-
- retval = zfcp_fc_ns_gid_pn(&port->erp_action);
- if (retval == -ENOMEM)
- zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
- port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
- if (retval)
- zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
- zfcp_port_put(port);
-}
-
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -874,28 +899,25 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!port->d_id) {
- zfcp_port_get(port);
- if (!queue_work(zfcp_data.work_queue,
- &port->gid_pn_work))
- zfcp_port_put(port);
- return ZFCP_ERP_CONTINUES;
+ zfcp_fc_trigger_did_lookup(port);
+ return ZFCP_ERP_EXIT;
}
- case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
- if (!port->d_id)
- return ZFCP_ERP_FAILED;
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
if (p_status & ZFCP_STATUS_COMMON_OPEN) {
- if (port->d_id)
- return ZFCP_ERP_SUCCEEDED;
- else {
- act->step = ZFCP_ERP_STEP_PORT_CLOSING;
- return ZFCP_ERP_CONTINUES;
+ if (!port->d_id) {
+ zfcp_fc_trigger_did_lookup(port);
+ return ZFCP_ERP_EXIT;
}
- /* fall through otherwise */
+ return ZFCP_ERP_SUCCEEDED;
+ }
+ if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+ port->d_id = 0;
+ return ZFCP_ERP_FAILED;
}
+ /* fall through otherwise */
}
return ZFCP_ERP_FAILED;
}
@@ -903,19 +925,21 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
+ int p_status = atomic_read(&port->status);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
+ if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
+ !(p_status & ZFCP_STATUS_COMMON_OPEN))
goto close_init_done;
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_port_strategy_clearstati(port);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
+ if (p_status & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_port_strategy_close(erp_action);
break;
case ZFCP_ERP_STEP_PORT_CLOSING:
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
+ if (p_status & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
break;
}
@@ -927,82 +951,86 @@ close_init_done:
return zfcp_erp_port_strategy_open_common(erp_action);
}
-static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
+static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
{
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_UNIT_SHARED |
- ZFCP_STATUS_UNIT_READONLY,
- &unit->status);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+ &zfcp_sdev->status);
}
-static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
{
- int retval = zfcp_fsf_close_unit(erp_action);
+ int retval = zfcp_fsf_close_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
- erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
+ erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
{
- int retval = zfcp_fsf_open_unit(erp_action);
+ int retval = zfcp_fsf_open_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
- erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
+ erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
-static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
{
- struct zfcp_unit *unit = erp_action->unit;
+ struct scsi_device *sdev = erp_action->sdev;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
- zfcp_erp_unit_strategy_clearstati(unit);
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
- return zfcp_erp_unit_strategy_close(erp_action);
+ zfcp_erp_lun_strategy_clearstati(sdev);
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+ return zfcp_erp_lun_strategy_close(erp_action);
/* already closed, fall through */
- case ZFCP_ERP_STEP_UNIT_CLOSING:
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+ case ZFCP_ERP_STEP_LUN_CLOSING:
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
- return zfcp_erp_unit_strategy_open(erp_action);
+ return zfcp_erp_lun_strategy_open(erp_action);
- case ZFCP_ERP_STEP_UNIT_OPENING:
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+ case ZFCP_ERP_STEP_LUN_OPENING:
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_SUCCEEDED;
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
+static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
{
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (result) {
case ZFCP_ERP_SUCCEEDED :
- atomic_set(&unit->erp_counter, 0);
- zfcp_erp_unit_unblock(unit);
+ atomic_set(&zfcp_sdev->erp_counter, 0);
+ zfcp_erp_lun_unblock(sdev);
break;
case ZFCP_ERP_FAILED :
- atomic_inc(&unit->erp_counter);
- if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
- dev_err(&unit->port->adapter->ccw_device->dev,
- "ERP failed for unit 0x%016Lx on "
+ atomic_inc(&zfcp_sdev->erp_counter);
+ if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
+ dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
+ "ERP failed for LUN 0x%016Lx on "
"port 0x%016Lx\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, "erusck1", NULL);
+ (unsigned long long)zfcp_scsi_dev_lun(sdev),
+ (unsigned long long)zfcp_sdev->port->wwpn);
+ zfcp_erp_set_lun_status(sdev,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
- if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- zfcp_erp_unit_block(unit, 0);
+ if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+ zfcp_erp_lun_block(sdev, 0);
result = ZFCP_ERP_EXIT;
}
return result;
@@ -1026,7 +1054,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
dev_err(&port->adapter->ccw_device->dev,
"ERP failed for remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
- zfcp_erp_port_failed(port, "erpsck1", NULL);
+ zfcp_erp_set_port_status(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
@@ -1053,7 +1082,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
dev_err(&adapter->ccw_device->dev,
"ERP cannot recover an error "
"on the FCP device\n");
- zfcp_erp_adapter_failed(adapter, "erasck1", NULL);
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
@@ -1070,12 +1100,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_port *port = erp_action->port;
- struct zfcp_unit *unit = erp_action->unit;
+ struct scsi_device *sdev = erp_action->sdev;
switch (erp_action->action) {
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- result = zfcp_erp_strategy_check_unit(unit, result);
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ result = zfcp_erp_strategy_check_lun(sdev, result);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1110,7 +1140,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
int action = act->action;
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
- struct zfcp_unit *unit = act->unit;
+ struct scsi_device *sdev = act->sdev;
+ struct zfcp_scsi_dev *zfcp_sdev;
u32 erp_status = act->status;
switch (action) {
@@ -1118,7 +1149,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED,
- "ersscg1", NULL);
+ "ersscg1");
return ZFCP_ERP_EXIT;
}
break;
@@ -1128,16 +1159,17 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
_zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
- "ersscg2", NULL);
+ "ersscg2");
return ZFCP_ERP_EXIT;
}
break;
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
- _zfcp_erp_unit_reopen(unit,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "ersscg3", NULL);
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
+ _zfcp_erp_lun_reopen(sdev,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "ersscg3", 0);
return ZFCP_ERP_EXIT;
}
break;
@@ -1148,6 +1180,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_scsi_dev *zfcp_sdev;
adapter->erp_total_count--;
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
@@ -1156,12 +1189,13 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
list_del(&erp_action->list);
- zfcp_rec_dbf_event_action("eractd1", erp_action);
+ zfcp_dbf_rec_run("eractd1", erp_action);
switch (erp_action->action) {
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
- &erp_action->unit->status);
+ &zfcp_sdev->status);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1181,33 +1215,31 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
- struct zfcp_unit *unit = act->unit;
+ struct scsi_device *sdev = act->sdev;
switch (act->action) {
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
- zfcp_unit_get(unit);
- if (scsi_queue_work(unit->port->adapter->scsi_host,
- &unit->scsi_work) <= 0)
- zfcp_unit_put(unit);
- }
- zfcp_unit_put(unit);
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
+ scsi_device_put(sdev);
break;
- case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
- zfcp_port_put(port);
+ /* fall through */
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
- schedule_work(&adapter->scan_work);
+ zfcp_fc_conditional_port_scan(adapter);
+ queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
- zfcp_adapter_put(adapter);
+
+ kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
}
@@ -1221,8 +1253,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
return zfcp_erp_port_forced_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT:
return zfcp_erp_port_strategy(erp_action);
- case ZFCP_ERP_ACTION_REOPEN_UNIT:
- return zfcp_erp_unit_strategy(erp_action);
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ return zfcp_erp_lun_strategy(erp_action);
}
return ZFCP_ERP_FAILED;
}
@@ -1230,12 +1262,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
- struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
+ struct zfcp_adapter *adapter = erp_action->adapter;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ kref_get(&adapter->ref);
+ write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_strategy_check_fsfreq(erp_action);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1244,14 +1276,17 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
goto unlock;
}
+ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+ retval = ZFCP_ERP_FAILED;
+ goto check_target;
+ }
+
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
retval = zfcp_erp_strategy_do_action(erp_action);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- write_lock(&adapter->erp_lock);
+ write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
retval = ZFCP_ERP_CONTINUES;
@@ -1263,7 +1298,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
}
if (adapter->erp_total_count == adapter->erp_low_mem_count)
- _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
+ _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
else {
zfcp_erp_strategy_memwait(erp_action);
retval = ZFCP_ERP_CONTINUES;
@@ -1278,20 +1313,24 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
goto unlock;
}
+check_target:
retval = zfcp_erp_strategy_check_target(erp_action, retval);
zfcp_erp_action_dequeue(erp_action);
retval = zfcp_erp_strategy_statechange(erp_action, retval);
if (retval == ZFCP_ERP_EXIT)
goto unlock;
- zfcp_erp_strategy_followup_actions(erp_action);
+ if (retval == ZFCP_ERP_SUCCEEDED)
+ zfcp_erp_strategy_followup_success(erp_action);
+ if (retval == ZFCP_ERP_FAILED)
+ zfcp_erp_strategy_followup_failed(erp_action);
unlock:
- write_unlock(&adapter->erp_lock);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
if (retval != ZFCP_ERP_CONTINUES)
zfcp_erp_action_cleanup(erp_action, retval);
+ kref_put(&adapter->ref, zfcp_adapter_release);
return retval;
}
@@ -1301,20 +1340,14 @@ static int zfcp_erp_thread(void *data)
struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
- int ignore;
- daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
- /* Block all signals */
- siginitsetinv(&current->blocked, 0);
- atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
- wake_up(&adapter->erp_thread_wqh);
+ for (;;) {
+ wait_event_interruptible(adapter->erp_ready_wq,
+ !list_empty(&adapter->erp_ready_head) ||
+ kthread_should_stop());
- while (!(atomic_read(&adapter->status) &
- ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
-
- zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
- ignore = down_interruptible(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
+ if (kthread_should_stop())
+ break;
write_lock_irqsave(&adapter->erp_lock, flags);
next = adapter->erp_ready_head.next;
@@ -1329,9 +1362,6 @@ static int zfcp_erp_thread(void *data)
}
}
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
- wake_up(&adapter->erp_thread_wqh);
-
return 0;
}
@@ -1343,18 +1373,17 @@ static int zfcp_erp_thread(void *data)
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
- int retval;
+ struct task_struct *thread;
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
- retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
- if (retval < 0) {
+ thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
+ dev_name(&adapter->ccw_device->dev));
+ if (IS_ERR(thread)) {
dev_err(&adapter->ccw_device->dev,
"Creating an ERP thread for the FCP device failed.\n");
- return retval;
+ return PTR_ERR(thread);
}
- wait_event(adapter->erp_thread_wqh,
- atomic_read(&adapter->status) &
- ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
+
+ adapter->erp_thread = thread;
return 0;
}
@@ -1369,52 +1398,10 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
*/
void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
- atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
- up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock("erthrk1", adapter);
-
- wait_event(adapter->erp_thread_wqh,
- !(atomic_read(&adapter->status) &
- ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
-
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
- &adapter->status);
-}
-
-/**
- * zfcp_erp_adapter_failed - Set adapter status to failed.
- * @adapter: Failed adapter.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
-{
- zfcp_erp_modify_adapter_status(adapter, id, ref,
- ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-}
-
-/**
- * zfcp_erp_port_failed - Set port status to failed.
- * @port: Failed port.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
-{
- zfcp_erp_modify_port_status(port, id, ref,
- ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-}
-
-/**
- * zfcp_erp_unit_failed - Set unit status to failed.
- * @unit: Failed unit.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
-{
- zfcp_erp_modify_unit_status(unit, id, ref,
- ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
+ kthread_stop(adapter->erp_thread);
+ adapter->erp_thread = NULL;
+ WARN_ON(!list_empty(&adapter->erp_ready_head));
+ WARN_ON(!list_empty(&adapter->erp_running_head));
}
/**
@@ -1429,207 +1416,158 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
}
/**
- * zfcp_erp_modify_adapter_status - change adapter status bits
+ * zfcp_erp_set_adapter_status - set adapter status bits
* @adapter: adapter to change the status
- * @id: id for the debug trace
- * @ref: reference for the debug trace
* @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
*
- * Changes in common status bits are propagated to attached ports and units.
+ * Changes in common status bits are propagated to attached ports and LUNs.
*/
-void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
- void *ref, u32 mask, int set_or_clear)
+void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
struct zfcp_port *port;
+ struct scsi_device *sdev;
+ unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
- if (set_or_clear == ZFCP_SET) {
- if (status_change_set(mask, &adapter->status))
- zfcp_rec_dbf_event_adapter(id, ref, adapter);
- atomic_set_mask(mask, &adapter->status);
- } else {
- if (status_change_clear(mask, &adapter->status))
- zfcp_rec_dbf_event_adapter(id, ref, adapter);
- atomic_clear_mask(mask, &adapter->status);
- if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
- atomic_set(&adapter->erp_counter, 0);
- }
+ atomic_set_mask(mask, &adapter->status);
+
+ if (!common_mask)
+ return;
- if (common_mask)
- list_for_each_entry(port, &adapter->port_list_head, list)
- zfcp_erp_modify_port_status(port, id, ref, common_mask,
- set_or_clear);
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
+ atomic_set_mask(common_mask, &port->status);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host)
+ atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
- * zfcp_erp_modify_port_status - change port status bits
- * @port: port to change the status bits
- * @id: id for the debug trace
- * @ref: reference for the debug trace
+ * zfcp_erp_clear_adapter_status - clear adapter status bits
+ * @adapter: adapter to change the status
* @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
*
- * Changes in common status bits are propagated to attached units.
+ * Changes in common status bits are propagated to attached ports and LUNs.
*/
-void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
- u32 mask, int set_or_clear)
+void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
- struct zfcp_unit *unit;
+ struct zfcp_port *port;
+ struct scsi_device *sdev;
+ unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+
+ atomic_clear_mask(mask, &adapter->status);
+
+ if (!common_mask)
+ return;
- if (set_or_clear == ZFCP_SET) {
- if (status_change_set(mask, &port->status))
- zfcp_rec_dbf_event_port(id, ref, port);
- atomic_set_mask(mask, &port->status);
- } else {
- if (status_change_clear(mask, &port->status))
- zfcp_rec_dbf_event_port(id, ref, port);
- atomic_clear_mask(mask, &port->status);
- if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+ if (clear_counter)
+ atomic_set(&adapter->erp_counter, 0);
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ atomic_clear_mask(common_mask, &port->status);
+ if (clear_counter)
atomic_set(&port->erp_counter, 0);
}
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
- if (common_mask)
- list_for_each_entry(unit, &port->unit_list_head, list)
- zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
- set_or_clear);
-}
-
-/**
- * zfcp_erp_modify_unit_status - change unit status bits
- * @unit: unit to change the status bits
- * @id: id for the debug trace
- * @ref: reference for the debug trace
- * @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
- */
-void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
- u32 mask, int set_or_clear)
-{
- if (set_or_clear == ZFCP_SET) {
- if (status_change_set(mask, &unit->status))
- zfcp_rec_dbf_event_unit(id, ref, unit);
- atomic_set_mask(mask, &unit->status);
- } else {
- if (status_change_clear(mask, &unit->status))
- zfcp_rec_dbf_event_unit(id, ref, unit);
- atomic_clear_mask(mask, &unit->status);
- if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
- atomic_set(&unit->erp_counter, 0);
- }
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
+ atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
- * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
- * @port: The "boxed" port.
- * @id: The debug trace id.
- * @id: Reference for the debug trace.
+ * zfcp_erp_set_port_status - set port status bits
+ * @port: port to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached LUNs.
*/
-void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
+void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- zfcp_erp_modify_port_status(port, id, ref,
- ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
- zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
-}
+ atomic_set_mask(mask, &port->status);
-/**
- * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
- * @port: The "boxed" unit.
- * @id: The debug trace id.
- * @id: Reference for the debug trace.
- */
-void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
-{
- zfcp_erp_modify_unit_status(unit, id, ref,
- ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
- zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
+ if (!common_mask)
+ return;
+
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ atomic_set_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
- * zfcp_erp_port_access_denied - Adapter denied access to port.
- * @port: port where access has been denied
- * @id: id for debug trace
- * @ref: reference for debug trace
+ * zfcp_erp_clear_port_status - clear port status bits
+ * @port: adapter to change the status
+ * @mask: status bits to change
*
- * Since the adapter has denied access, stop using the port and the
- * attached units.
+ * Changes in common status bits are propagated to attached LUNs.
*/
-void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
+void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
{
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- zfcp_erp_modify_port_status(port, id, ref,
- ZFCP_STATUS_COMMON_ERP_FAILED |
- ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-}
-
-/**
- * zfcp_erp_unit_access_denied - Adapter denied access to unit.
- * @unit: unit where access has been denied
- * @id: id for debug trace
- * @ref: reference for debug trace
- *
- * Since the adapter has denied access, stop using the unit.
- */
-void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref)
-{
- zfcp_erp_modify_unit_status(unit, id, ref,
- ZFCP_STATUS_COMMON_ERP_FAILED |
- ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
-}
+ atomic_clear_mask(mask, &port->status);
-static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id,
- void *ref)
-{
- int status = atomic_read(&unit->status);
- if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED)))
+ if (!common_mask)
return;
- zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
+ if (clear_counter)
+ atomic_set(&port->erp_counter, 0);
+
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port) {
+ atomic_clear_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+ }
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
-static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
- void *ref)
+/**
+ * zfcp_erp_set_lun_status - set lun status bits
+ * @sdev: SCSI device / lun to set the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
{
- struct zfcp_unit *unit;
- int status = atomic_read(&port->status);
-
- if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
- list_for_each_entry(unit, &port->unit_list_head, list)
- zfcp_erp_unit_access_changed(unit, id, ref);
- return;
- }
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
+ atomic_set_mask(mask, &zfcp_sdev->status);
}
/**
- * zfcp_erp_adapter_access_changed - Process change in adapter ACT
- * @adapter: Adapter where the Access Control Table (ACT) changed
- * @id: Id for debug trace
- * @ref: Reference for debug trace
+ * zfcp_erp_clear_lun_status - clear lun status bits
+ * @sdev: SCSi device / lun to clear the status bits
+ * @mask: status bits to change
*/
-void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
- void *ref)
+void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
{
- struct zfcp_port *port;
- unsigned long flags;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
- return;
+ atomic_clear_mask(mask, &zfcp_sdev->status);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
- zfcp_erp_port_access_changed(port, id, ref);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+ atomic_set(&zfcp_sdev->erp_counter, 0);
}
+
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2e31b536548..a9c570a09b8 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,169 +3,157 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_EXT_H
#define ZFCP_EXT_H
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
#include "zfcp_def.h"
+#include "zfcp_fc.h"
/* zfcp_aux.c */
-extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
-extern int zfcp_adapter_enqueue(struct ccw_device *);
-extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
-extern void zfcp_port_dequeue(struct zfcp_port *);
-extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern void zfcp_unit_dequeue(struct zfcp_unit *);
-extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
+extern void zfcp_adapter_release(struct kref *);
+extern void zfcp_adapter_unregister(struct zfcp_adapter *);
/* zfcp_ccw.c */
-extern int zfcp_ccw_register(void);
-extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
-
-/* zfcp_cfdc.c */
-extern struct miscdevice zfcp_cfdc_misc;
+extern struct ccw_driver zfcp_ccw_driver;
+extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
+extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
/* zfcp_dbf.c */
-extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
-extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *);
-extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *,
- struct zfcp_adapter *,
- struct zfcp_port *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *);
-extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
-extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
- struct fsf_status_read_buffer *);
-extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
- int);
-extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
- struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
-extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
- struct scsi_cmnd *,
- struct zfcp_fsf_req *);
-extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
- struct scsi_cmnd *, struct zfcp_fsf_req *,
- unsigned long);
-extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
- struct scsi_cmnd *);
+extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
/* zfcp_erp.c */
-extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
- void *, u32, int);
-extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
-extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
- void *);
-extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *);
-extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32,
- int);
-extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
- void *);
-extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32,
- int);
-extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
-extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
-extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
+extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
+extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
+extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
-extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
-extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
-extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
- void *);
extern void zfcp_erp_timeout_handler(unsigned long);
-extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
/* zfcp_fc.c */
-extern int zfcp_scan_ports(struct zfcp_adapter *);
-extern void _zfcp_scan_ports_later(struct work_struct *);
+extern struct kmem_cache *zfcp_fc_req_cache;
+extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
+ enum fc_host_event_code event_code, u32);
+extern void zfcp_fc_post_event(struct work_struct *);
+extern void zfcp_fc_scan_ports(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
-extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
-extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
-extern void zfcp_test_link(struct zfcp_port *);
+extern void zfcp_fc_port_did_lookup(struct work_struct *);
+extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
+extern void zfcp_fc_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
-extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
-extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
+extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
+extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
+extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
+extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
+extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
-extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
-extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
-extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
-extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_config *);
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
-extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
- struct zfcp_fsf_cfdc *);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
-extern int zfcp_fsf_status_read(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
-extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
- struct zfcp_erp_action *);
-extern int zfcp_fsf_send_els(struct zfcp_send_els *);
-extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
- struct scsi_cmnd *);
-extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
+extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
+ mempool_t *, unsigned int);
+extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
+ struct zfcp_fsf_ct_els *, unsigned int);
+extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
-extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
-extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
- struct zfcp_unit *);
+extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
+extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
-extern int zfcp_qdio_allocate(struct zfcp_adapter *);
-extern void zfcp_qdio_free(struct zfcp_adapter *);
-extern int zfcp_qdio_send(struct zfcp_fsf_req *);
-extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
-extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
- struct scatterlist *, int);
-extern int zfcp_qdio_open(struct zfcp_adapter *);
-extern void zfcp_qdio_close(struct zfcp_adapter *);
+extern int zfcp_qdio_setup(struct zfcp_adapter *);
+extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
+ struct scatterlist *);
+extern int zfcp_qdio_open(struct zfcp_qdio *);
+extern void zfcp_qdio_close(struct zfcp_qdio *);
+extern void zfcp_qdio_siosl(struct zfcp_adapter *);
/* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
-extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
-extern void zfcp_scsi_scan(struct work_struct *);
+extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
+extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
/* zfcp_sysfs.c */
-extern struct attribute_group zfcp_sysfs_unit_attrs;
+extern const struct attribute_group *zfcp_unit_attr_groups[];
extern struct attribute_group zfcp_sysfs_adapter_attrs;
-extern struct attribute_group zfcp_sysfs_port_attrs;
+extern const struct attribute_group *zfcp_port_attr_groups[];
+extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+/* zfcp_unit.c */
+extern int zfcp_unit_add(struct zfcp_port *, u64);
+extern int zfcp_unit_remove(struct zfcp_port *, u64);
+extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
+extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
+extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
+extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
+extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
+
#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 19ae0842047..ca28e1c6611 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,219 +3,266 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
-enum rscn_address_format {
- RSCN_PORT_ADDRESS = 0x0,
- RSCN_AREA_ADDRESS = 0x1,
- RSCN_DOMAIN_ADDRESS = 0x2,
- RSCN_FABRIC_ADDRESS = 0x3,
-};
+struct kmem_cache *zfcp_fc_req_cache;
-static u32 rscn_range_mask[] = {
- [RSCN_PORT_ADDRESS] = 0xFFFFFF,
- [RSCN_AREA_ADDRESS] = 0xFFFF00,
- [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
- [RSCN_FABRIC_ADDRESS] = 0x000000,
+static u32 zfcp_fc_rscn_range_mask[] = {
+ [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
+ [ELS_ADDR_FMT_AREA] = 0xFFFF00,
+ [ELS_ADDR_FMT_DOM] = 0xFF0000,
+ [ELS_ADDR_FMT_FAB] = 0x000000,
};
-struct ct_iu_gpn_ft_req {
- struct ct_hdr header;
- u8 flags;
- u8 domain_id_scope;
- u8 area_id_scope;
- u8 fc4_type;
-} __attribute__ ((packed));
-
-struct gpn_ft_resp_acc {
- u8 control;
- u8 port_id[3];
- u8 reserved[4];
- u64 wwpn;
-} __attribute__ ((packed));
-
-#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
- / sizeof(struct gpn_ft_resp_acc))
-#define ZFCP_GPN_FT_BUFFERS 4
-#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
- - sizeof(struct ct_hdr))
-#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
-
-struct ct_iu_gpn_ft_resp {
- struct ct_hdr header;
- struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
-} __attribute__ ((packed));
-
-struct zfcp_gpn_ft {
- struct zfcp_send_ct ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
-};
+static bool no_auto_port_rescan;
+module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
+MODULE_PARM_DESC(no_auto_port_rescan,
+ "no automatic port_rescan (default off)");
-struct zfcp_fc_ns_handler_data {
- struct completion done;
- void (*handler)(unsigned long);
- unsigned long handler_data;
-};
+void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+ if (no_auto_port_rescan)
+ return;
+
+ queue_work(adapter->work_queue, &adapter->scan_work);
+}
-static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
+void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+ if (!no_auto_port_rescan)
+ return;
+
+ queue_work(adapter->work_queue, &adapter->scan_work);
+}
+
+/**
+ * zfcp_fc_post_event - post event to userspace via fc_transport
+ * @work: work struct with enqueued events
+ */
+void zfcp_fc_post_event(struct work_struct *work)
+{
+ struct zfcp_fc_event *event = NULL, *tmp = NULL;
+ LIST_HEAD(tmp_lh);
+ struct zfcp_fc_events *events = container_of(work,
+ struct zfcp_fc_events, work);
+ struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
+ events);
+
+ spin_lock_bh(&events->list_lock);
+ list_splice_init(&events->list, &tmp_lh);
+ spin_unlock_bh(&events->list_lock);
+
+ list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
+ fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
+ event->code, event->data);
+ list_del(&event->list);
+ kfree(event);
+ }
+
+}
+
+/**
+ * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
+ * @adapter: The adapter where to enqueue the event
+ * @event_code: The event code (as defined in fc_host_event_code in
+ * scsi_transport_fc.h)
+ * @event_data: The event data (e.g. n_port page in case of els)
+ */
+void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
+ enum fc_host_event_code event_code, u32 event_data)
+{
+ struct zfcp_fc_event *event;
+
+ event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->code = event_code;
+ event->data = event_data;
+
+ spin_lock(&adapter->events.list_lock);
+ list_add_tail(&event->list, &adapter->events.list);
+ spin_unlock(&adapter->events.list_lock);
+
+ queue_work(adapter->work_queue, &adapter->events.work);
+}
+
+static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
- if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
- wka_port->status == ZFCP_WKA_PORT_CLOSING) {
- wka_port->status = ZFCP_WKA_PORT_OPENING;
+ if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
+ wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
+ wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
- wait_event_timeout(
- wka_port->completion_wq,
- wka_port->status == ZFCP_WKA_PORT_ONLINE ||
- wka_port->status == ZFCP_WKA_PORT_OFFLINE,
- HZ >> 1);
+ wait_event(wka_port->completion_wq,
+ wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
- if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
+ if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
return -EIO;
}
-static void zfcp_wka_port_offline(struct work_struct *work)
+static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
- struct zfcp_wka_port *wka_port =
- container_of(dw, struct zfcp_wka_port, work);
+ struct zfcp_fc_wka_port *wka_port =
+ container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
- (wka_port->status != ZFCP_WKA_PORT_ONLINE))
+ (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
- wka_port->status = ZFCP_WKA_PORT_CLOSING;
+ wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
-static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
+static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
- /* wait 10 miliseconds, other reqs might pop in */
+ /* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
-void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
+static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
+ struct zfcp_adapter *adapter)
{
- struct zfcp_wka_port *wka_port = &adapter->nsp;
-
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
- wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
+ wka_port->d_id = d_id;
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
- INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
+ INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
-void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
+static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
- wka->status = ZFCP_WKA_PORT_OFFLINE;
+ wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
+void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
+{
+ if (!gs)
+ return;
+ zfcp_fc_wka_port_force_offline(&gs->ms);
+ zfcp_fc_wka_port_force_offline(&gs->ts);
+ zfcp_fc_wka_port_force_offline(&gs->ds);
+ zfcp_fc_wka_port_force_offline(&gs->as);
+}
+
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
- struct fcp_rscn_element *elem)
+ struct fc_els_rscn_page *page)
{
unsigned long flags;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
- if ((port->d_id & range) == (elem->nport_did & range))
- zfcp_test_link(port);
-
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
+ zfcp_fc_test_link(port);
+ if (!port->d_id)
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
- struct fcp_rscn_head *fcp_rscn_head;
- struct fcp_rscn_element *fcp_rscn_element;
+ struct fc_els_rscn *head;
+ struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
- u32 range_mask;
+ unsigned int afmt;
- fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
- fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
+ head = (struct fc_els_rscn *) status_buffer->payload.data;
+ page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
- no_entries = fcp_rscn_head->payload_len /
- sizeof(struct fcp_rscn_element);
+ no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
- fcp_rscn_element++;
- range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
- _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
+ page++;
+ afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
+ _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
+ page);
+ zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
+ *(u32 *)page);
}
- schedule_work(&fsf_req->adapter->scan_work);
+ zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
+ unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
- unsigned long flags;
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->wwpn == wwpn)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
+ if (port->wwpn == wwpn) {
+ zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
break;
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-
- if (port && (port->wwpn == wwpn))
- zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
- struct fsf_status_read_buffer *status_buffer =
- (struct fsf_status_read_buffer *)req->data;
- struct fsf_plogi *els_plogi =
- (struct fsf_plogi *) status_buffer->payload.data;
+ struct fsf_status_read_buffer *status_buffer;
+ struct fc_els_flogi *plogi;
- zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
+ status_buffer = (struct fsf_status_read_buffer *) req->data;
+ plogi = (struct fc_els_flogi *) status_buffer->payload.data;
+ zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
- struct fcp_logo *els_logo =
- (struct fcp_logo *) status_buffer->payload.data;
+ struct fc_els_logo *logo =
+ (struct fc_els_logo *) status_buffer->payload.data;
- zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
+ zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
}
/**
@@ -228,114 +275,138 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
- zfcp_san_dbf_event_incoming_els(fsf_req);
- if (els_type == LS_PLOGI)
+ zfcp_dbf_san_in_els("fciels1", fsf_req);
+ if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
- else if (els_type == LS_LOGO)
+ else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
- else if (els_type == LS_RSCN)
+ else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_handler(unsigned long data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
- struct zfcp_fc_ns_handler_data *compl_rec =
- (struct zfcp_fc_ns_handler_data *) data;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
- if (compl_rec->handler)
- compl_rec->handler(compl_rec->handler_data);
+ if (ct_els->status)
+ return;
+ if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
+ return;
- complete(&compl_rec->done);
+ /* looks like a valid d_id */
+ ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
-static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
+static void zfcp_fc_complete(void *data)
{
- struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
- struct zfcp_send_ct *ct = &gid_pn->ct;
- struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
- struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
- struct zfcp_port *port = gid_pn->port;
-
- if (ct->status)
- return;
- if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT)
- return;
+ complete(data);
+}
- /* paranoia */
- if (ct_iu_req->wwpn != port->wwpn)
- return;
- /* looks like a valid d_id */
- port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+ ct_hdr->ct_rev = FC_CT_REV;
+ ct_hdr->ct_fs_type = FC_FST_DIR;
+ ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct_hdr->ct_cmd = cmd;
+ ct_hdr->ct_mr_size = mr_size / 4;
}
-int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
- struct zfcp_gid_pn_data *gid_pn)
+static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
+ struct zfcp_fc_req *fc_req)
{
- struct zfcp_adapter *adapter = erp_action->adapter;
- struct zfcp_fc_ns_handler_data compl_rec;
+ struct zfcp_adapter *adapter = port->adapter;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
- gid_pn->port = erp_action->port;
- gid_pn->ct.wka_port = &adapter->nsp;
- gid_pn->ct.handler = zfcp_fc_ns_handler;
- gid_pn->ct.handler_data = (unsigned long) &compl_rec;
- gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
- gid_pn->ct.req = &gid_pn->req;
- gid_pn->ct.resp = &gid_pn->resp;
- sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
- sizeof(struct ct_iu_gid_pn_req));
- sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
- sizeof(struct ct_iu_gid_pn_resp));
-
- /* setup nameserver request */
- gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
- gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
- gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
- gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
- gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
- gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
- gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
-
- init_completion(&compl_rec.done);
- compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
- compl_rec.handler_data = (unsigned long) gid_pn;
- ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
- erp_action);
- if (!ret)
- wait_for_completion(&compl_rec.done);
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.handler = zfcp_fc_complete;
+ fc_req->ct_els.handler_data = &completion;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+ sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+ zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+ FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+ gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
+ adapter->pool.gid_pn_req,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret) {
+ wait_for_completion(&completion);
+ zfcp_fc_ns_gid_pn_eval(fc_req);
+ }
return ret;
}
/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
- * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
+ * @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
-int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
+static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_gid_pn_data *gid_pn;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_fc_req *fc_req;
+ struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
- if (!gid_pn)
+ fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- memset(gid_pn, 0, sizeof(*gid_pn));
+ memset(fc_req, 0, sizeof(*fc_req));
- ret = zfcp_wka_port_get(&adapter->nsp);
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
- ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
+ ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
- zfcp_wka_port_put(&adapter->nsp);
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.data_gid_pn);
+ mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
+void zfcp_fc_port_did_lookup(struct work_struct *work)
+{
+ int ret;
+ struct zfcp_port *port = container_of(work, struct zfcp_port,
+ gid_pn_work);
+
+ ret = zfcp_fc_ns_gid_pn(port);
+ if (ret) {
+ /* could not issue gid_pn for some reason */
+ zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
+ goto out;
+ }
+
+ if (!port->d_id) {
+ zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+ goto out;
+ }
+
+ zfcp_erp_port_reopen(port, 0, "fcgpn_3");
+out:
+ put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
+ * @port: The zfcp_port to lookup the d_id for.
+ */
+void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
+{
+ get_device(&port->dev);
+ if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
+ put_device(&port->dev);
+}
+
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
@@ -343,88 +414,97 @@ out:
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
-void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
- port->maxframe_size = plogi->serv_param.common_serv_param[7] |
- ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
- if (plogi->serv_param.class1_serv_param[0] & 0x80)
+ if (plogi->fl_wwpn != port->wwpn) {
+ port->d_id = 0;
+ dev_warn(&port->adapter->ccw_device->dev,
+ "A port opened with WWPN 0x%016Lx returned data that "
+ "identifies it as WWPN 0x%016Lx\n",
+ (unsigned long long) port->wwpn,
+ (unsigned long long) plogi->fl_wwpn);
+ return;
+ }
+
+ port->wwnn = plogi->fl_wwnn;
+ port->maxframe_size = plogi->fl_csp.sp_bb_data;
+
+ if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS1;
- if (plogi->serv_param.class2_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS2;
- if (plogi->serv_param.class3_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS3;
- if (plogi->serv_param.class4_serv_param[0] & 0x80)
+ if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS4;
}
-struct zfcp_els_adisc {
- struct zfcp_send_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct zfcp_ls_adisc ls_adisc;
- struct zfcp_ls_adisc ls_adisc_acc;
-};
-
-static void zfcp_fc_adisc_handler(unsigned long data)
+static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
- struct zfcp_port *port = adisc->els.port;
- struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
+ struct zfcp_fc_req *fc_req = data;
+ struct zfcp_port *port = fc_req->ct_els.port;
+ struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
- if (adisc->els.status) {
+ if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcadh_1", NULL);
+ "fcadh_1");
goto out;
}
if (!port->wwnn)
- port->wwnn = ls_adisc->wwnn;
+ port->wwnn = adisc_resp->adisc_wwnn;
- if ((port->wwpn != ls_adisc->wwpn) ||
+ if ((port->wwpn != adisc_resp->adisc_wwpn) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcadh_2", NULL);
+ "fcadh_2");
goto out;
}
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
- zfcp_port_put(port);
- kfree(adisc);
+ atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ put_device(&port->dev);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_els_adisc *adisc;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ int ret;
- adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
- if (!adisc)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- adisc->els.req = &adisc->req;
- adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->ls_adisc,
- sizeof(struct zfcp_ls_adisc));
- sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
- sizeof(struct zfcp_ls_adisc));
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
+ sizeof(struct fc_els_adisc));
+ sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
+ sizeof(struct fc_els_adisc));
- adisc->els.adapter = adapter;
- adisc->els.port = port;
- adisc->els.d_id = port->d_id;
- adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = (unsigned long) adisc;
- adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
+ fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+ fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
+ fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+ fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- return zfcp_fsf_send_els(&adisc->els);
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+
+ return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
@@ -433,240 +513,479 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
- zfcp_port_get(port);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
+ /* only issue one test command at one time per port */
+ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+ goto out;
+
+ atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
- zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
+ atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
- zfcp_port_put(port);
+out:
+ put_device(&port->dev);
}
/**
- * zfcp_test_link - lightweight link test procedure
+ * zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
-void zfcp_test_link(struct zfcp_port *port)
-{
- zfcp_port_get(port);
- if (!queue_work(zfcp_data.work_queue, &port->test_link_work))
- zfcp_port_put(port);
-}
-
-static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
+void zfcp_fc_test_link(struct zfcp_port *port)
{
- struct scatterlist *sg = &gpn_ft->sg_req;
-
- kfree(sg_virt(sg)); /* free request buffer */
- zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
- kfree(gpn_ft);
+ get_device(&port->dev);
+ if (!queue_work(port->adapter->work_queue, &port->test_link_work))
+ put_device(&port->dev);
}
-static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
- struct zfcp_gpn_ft *gpn_ft;
- struct ct_iu_gpn_ft_req *req;
+ struct zfcp_fc_req *fc_req;
- gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
- if (!gpn_ft)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
return NULL;
- req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
- if (!req) {
- kfree(gpn_ft);
- gpn_ft = NULL;
- goto out;
+ if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+ return NULL;
}
- sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
- if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
- zfcp_free_sg_env(gpn_ft, buf_num);
- gpn_ft = NULL;
- }
-out:
- return gpn_ft;
-}
+ sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+ sizeof(struct zfcp_fc_gpn_ft_req));
+ return fc_req;
+}
-static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
- struct zfcp_adapter *adapter,
- int max_bytes)
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
+ struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_send_ct *ct = &gpn_ft->ct;
- struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
- struct zfcp_fc_ns_handler_data compl_rec;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
+ DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- /* prepare CT IU for GPN_FT */
- req->header.revision = ZFCP_CT_REVISION;
- req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
- req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
- req->header.options = ZFCP_CT_SYNCHRONOUS;
- req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
- req->header.max_res_size = max_bytes / 4;
- req->flags = 0;
- req->domain_id_scope = 0;
- req->area_id_scope = 0;
- req->fc4_type = ZFCP_CT_SCSI_FCP;
-
- /* prepare zfcp_send_ct */
- ct->wka_port = &adapter->nsp;
- ct->handler = zfcp_fc_ns_handler;
- ct->handler_data = (unsigned long)&compl_rec;
- ct->timeout = 10;
- ct->req = &gpn_ft->sg_req;
- ct->resp = gpn_ft->sg_resp;
-
- init_completion(&compl_rec.done);
- compl_rec.handler = NULL;
- ret = zfcp_fsf_send_ct(ct, NULL, NULL);
+ zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
+ req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
if (!ret)
- wait_for_completion(&compl_rec.done);
+ wait_for_completion(&completion);
return ret;
}
-static void zfcp_validate_port(struct zfcp_port *port)
+static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
- struct zfcp_adapter *adapter = port->adapter;
-
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
- !list_empty(&port->unit_list_head)) {
- zfcp_port_put(port);
+ !list_empty(&port->unit_list))
return;
- }
- zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL);
- zfcp_erp_wait(adapter);
- zfcp_port_put(port);
- zfcp_port_dequeue(port);
+
+ list_move_tail(&port->list, lh);
}
-static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
+ struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_send_ct *ct = &gpn_ft->ct;
- struct scatterlist *sg = gpn_ft->sg_resp;
- struct ct_hdr *hdr = sg_virt(sg);
- struct gpn_ft_resp_acc *acc = sg_virt(sg);
- struct zfcp_adapter *adapter = ct->wka_port->adapter;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct scatterlist *sg = &fc_req->sg_rsp;
+ struct fc_ct_hdr *hdr = sg_virt(sg);
+ struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
+ unsigned long flags;
+ LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
- if (ct->status)
+ if (ct_els->status)
return -EIO;
- if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
- if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
+ if (hdr->ct_cmd != FC_FS_ACC) {
+ if (hdr->ct_reason == FC_BA_RJT_UNABLE)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
- if (hdr->max_res_size) {
+ if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
- hdr->max_res_size);
+ hdr->ct_mr_size);
return -E2BIG;
}
- down(&zfcp_data.config_sema);
-
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
- if (x % (ZFCP_GPN_FT_ENTRIES + 1))
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
- last = acc->control & 0x80;
- d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
- acc->port_id[2];
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
- if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
+ if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
- if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
- continue;
- port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
- if (port)
+ if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
continue;
- port = zfcp_port_enqueue(adapter, acc->wwpn,
+ port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
- if (IS_ERR(port))
+ if (!IS_ERR(port))
+ zfcp_erp_port_reopen(port, 0, "fcegpf1");
+ else if (PTR_ERR(port) != -EEXIST)
ret = PTR_ERR(port);
- else
- zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
}
zfcp_erp_wait(adapter);
- list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
- zfcp_validate_port(port);
- up(&zfcp_data.config_sema);
+ write_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
+ zfcp_fc_validate_port(port, &remove_lh);
+ write_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+ list_for_each_entry_safe(port, tmp, &remove_lh, list) {
+ zfcp_erp_port_shutdown(port, 0, "fcegpf2");
+ device_unregister(&port->dev);
+ }
+
return ret;
}
/**
- * zfcp_scan_ports - scan remote ports and attach new ports
- * @adapter: pointer to struct zfcp_adapter
+ * zfcp_fc_scan_ports - scan remote ports and attach new ports
+ * @work: reference to scheduled work
*/
-int zfcp_scan_ports(struct zfcp_adapter *adapter)
+void zfcp_fc_scan_ports(struct work_struct *work)
{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ scan_work);
int ret, i;
- struct zfcp_gpn_ft *gpn_ft;
+ struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
- buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
- max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
- max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
+ buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
+ max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
+ max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
- return 0;
+ return;
- ret = zfcp_wka_port_get(&adapter->nsp);
- if (ret)
- return ret;
+ if (zfcp_fc_wka_port_get(&adapter->gs->ds))
+ return;
- gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft) {
- ret = -ENOMEM;
+ fc_req = zfcp_alloc_sg_env(buf_num);
+ if (!fc_req)
goto out;
- }
for (i = 0; i < 3; i++) {
- ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
+ ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
- ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
- zfcp_free_sg_env(gpn_ft, buf_num);
+ zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
- zfcp_wka_port_put(&adapter->nsp);
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+}
+
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ char devno[] = "DEVNO:";
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+ struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+ int ret;
+
+ zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+ sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+ sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&completion);
+ if (ct_els->status)
+ return ct_els->status;
+
+ if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+ !(strstr(gspn_rsp->gspn.fp_name, devno)))
+ snprintf(fc_host_symbolic_name(adapter->scsi_host),
+ FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+ gspn_rsp->gspn.fp_name, devno,
+ dev_name(&adapter->ccw_device->dev),
+ init_utsname()->nodename);
+ else
+ strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+ return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+ struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+ int ret, len;
+
+ zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+ len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+ FC_SYMBOLIC_NAME_SIZE);
+ rspn_req->rspn.fr_name_len = len;
+
+ sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+ sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret)
+ wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ ns_up_work);
+ int ret;
+ struct zfcp_fc_req *fc_req;
+
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+ fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ return;
+
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
+ return;
+
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+ if (ret)
+ goto out_free;
+
+ ret = zfcp_fc_gspn(adapter, fc_req);
+ if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ goto out_ds_put;
+
+ memset(fc_req, 0, sizeof(*fc_req));
+ zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
+static void zfcp_fc_ct_els_job_handler(void *data)
+{
+ struct fc_bsg_job *job = data;
+ struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
+ struct fc_bsg_reply *jr = job->reply;
+
+ jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+ jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ jr->result = zfcp_ct_els->status ? -EIO : 0;
+ job->job_done(job);
+}
+
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+{
+ u32 preamble_word1;
+ u8 gs_type;
+ struct zfcp_adapter *adapter;
+
+ preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ gs_type = (preamble_word1 & 0xff000000) >> 24;
+
+ adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+
+ switch (gs_type) {
+ case FC_FST_ALIAS:
+ return &adapter->gs->as;
+ case FC_FST_MGMT:
+ return &adapter->gs->ms;
+ case FC_FST_TIME:
+ return &adapter->gs->ts;
+ break;
+ case FC_FST_DIR:
+ return &adapter->gs->ds;
+ break;
+ default:
+ return NULL;
+ }
+}
+
+static void zfcp_fc_ct_job_handler(void *data)
+{
+ struct fc_bsg_job *job = data;
+ struct zfcp_fc_wka_port *wka_port;
+
+ wka_port = zfcp_fc_job_wka_port(job);
+ zfcp_fc_wka_port_put(wka_port);
+
+ zfcp_fc_ct_els_job_handler(data);
+}
+
+static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+ struct zfcp_adapter *adapter)
+{
+ struct zfcp_fsf_ct_els *els = job->dd_data;
+ struct fc_rport *rport = job->rport;
+ struct zfcp_port *port;
+ u32 d_id;
+
+ if (rport) {
+ port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+ if (!port)
+ return -EINVAL;
+
+ d_id = port->d_id;
+ put_device(&port->dev);
+ } else
+ d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+
+ els->handler = zfcp_fc_ct_els_job_handler;
+ return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
+}
+
+static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+ struct zfcp_adapter *adapter)
+{
+ int ret;
+ struct zfcp_fsf_ct_els *ct = job->dd_data;
+ struct zfcp_fc_wka_port *wka_port;
+
+ wka_port = zfcp_fc_job_wka_port(job);
+ if (!wka_port)
+ return -EINVAL;
+
+ ret = zfcp_fc_wka_port_get(wka_port);
+ if (ret)
+ return ret;
+
+ ct->handler = zfcp_fc_ct_job_handler;
+ ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
+ if (ret)
+ zfcp_fc_wka_port_put(wka_port);
+
return ret;
}
+int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct zfcp_adapter *adapter;
+ struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+
+ shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+ return -EINVAL;
+
+ ct_els->req = job->request_payload.sg_list;
+ ct_els->resp = job->reply_payload.sg_list;
+ ct_els->handler_data = job;
+
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ return zfcp_fc_exec_els_job(job, adapter);
+ case FC_BSG_RPT_CT:
+ case FC_BSG_HST_CT:
+ return zfcp_fc_exec_ct_job(job, adapter);
+ default:
+ return -EINVAL;
+ }
+}
+
+int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+{
+ /* hardware tracks timeout, reset bsg timeout to not interfere */
+ return -EAGAIN;
+}
+
+int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
+{
+ struct zfcp_fc_wka_ports *wka_ports;
+
+ wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
+ if (!wka_ports)
+ return -ENOMEM;
+
+ adapter->gs = wka_ports;
+ zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
+ zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
+ zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
+ zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
-void _zfcp_scan_ports_later(struct work_struct *work)
+ return 0;
+}
+
+void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
- zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
+ kfree(adapter->gs);
+ adapter->gs = NULL;
}
+
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 00000000000..b1d2024ed51
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,297 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+ * Copyright IBM Corp. 2009
+ */
+
+#ifndef ZFCP_FC_H
+#define ZFCP_FC_H
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "zfcp_fsf.h"
+
+#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
+ / sizeof(struct fc_gpn_ft_resp))
+#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
+
+#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
+ - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
+ (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+
+#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
+
+/**
+ * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
+ * @code: Event code
+ * @data: Event data
+ * @list: list_head for zfcp_fc_events list
+ */
+struct zfcp_fc_event {
+ enum fc_host_event_code code;
+ u32 data;
+ struct list_head list;
+};
+
+/**
+ * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
+ * @list: List for queueing of events from irq context to workqueue
+ * @list_lock: Lock for event list
+ * @work: work_struct for forwarding events in workqueue
+*/
+struct zfcp_fc_events {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct work_struct work;
+};
+
+/**
+ * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN request
+ */
+struct zfcp_fc_gid_pn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_gid_pn gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN response
+ */
+struct zfcp_fc_gid_pn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gid_pn_resp gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: GPN_FT request
+ */
+struct zfcp_fc_gpn_ft_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_gid_ft gpn_ft;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID request
+ */
+struct zfcp_fc_gspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gid_pn_resp gspn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
+ */
+struct zfcp_fc_gspn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gspn_resp gspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
+ */
+struct zfcp_fc_rspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_rspn rspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+ struct zfcp_fsf_ct_els ct_els;
+ struct scatterlist sg_req;
+ struct scatterlist sg_rsp;
+ union {
+ struct {
+ struct fc_els_adisc req;
+ struct fc_els_adisc rsp;
+ } adisc;
+ struct {
+ struct zfcp_fc_gid_pn_req req;
+ struct zfcp_fc_gid_pn_rsp rsp;
+ } gid_pn;
+ struct {
+ struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+ struct zfcp_fc_gpn_ft_req req;
+ } gpn_ft;
+ struct {
+ struct zfcp_fc_gspn_req req;
+ struct zfcp_fc_gspn_rsp rsp;
+ } gspn;
+ struct {
+ struct zfcp_fc_rspn_req req;
+ struct fc_ct_hdr rsp;
+ } rspn;
+ } u;
+};
+
+/**
+ * enum zfcp_fc_wka_status - FC WKA port status in zfcp
+ * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
+ * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
+ * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
+ * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
+ */
+enum zfcp_fc_wka_status {
+ ZFCP_FC_WKA_PORT_OFFLINE,
+ ZFCP_FC_WKA_PORT_CLOSING,
+ ZFCP_FC_WKA_PORT_OPENING,
+ ZFCP_FC_WKA_PORT_ONLINE,
+};
+
+/**
+ * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
+ * @adapter: Pointer to adapter structure this WKA port belongs to
+ * @completion_wq: Wait for completion of open/close command
+ * @status: Current status of WKA port
+ * @refcount: Reference count to keep port open as long as it is in use
+ * @d_id: FC destination id or well-known-address
+ * @handle: FSF handle for the open WKA port
+ * @mutex: Mutex used during opening/closing state changes
+ * @work: For delaying the closing of the WKA port
+ */
+struct zfcp_fc_wka_port {
+ struct zfcp_adapter *adapter;
+ wait_queue_head_t completion_wq;
+ enum zfcp_fc_wka_status status;
+ atomic_t refcount;
+ u32 d_id;
+ u32 handle;
+ struct mutex mutex;
+ struct delayed_work work;
+};
+
+/**
+ * struct zfcp_fc_wka_ports - Data structures for FC generic services
+ * @ms: FC Management service
+ * @ts: FC time service
+ * @ds: FC directory service
+ * @as: FC alias service
+ */
+struct zfcp_fc_wka_ports {
+ struct zfcp_fc_wka_port ms;
+ struct zfcp_fc_wka_port ts;
+ struct zfcp_fc_wka_port ds;
+ struct zfcp_fc_wka_port as;
+};
+
+/**
+ * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
+ * @fcp: fcp_cmnd to setup
+ * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
+ */
+static inline
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+ u8 tm_flags)
+{
+ char tag[2];
+
+ int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+
+ if (unlikely(tm_flags)) {
+ fcp->fc_tm_flags = tm_flags;
+ return;
+ }
+
+ if (scsi_populate_tag_msg(scsi, tag)) {
+ switch (tag[0]) {
+ case MSG_ORDERED_TAG:
+ fcp->fc_pri_ta |= FCP_PTA_ORDERED;
+ break;
+ case MSG_SIMPLE_TAG:
+ fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
+ break;
+ };
+ } else
+ fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+
+ if (scsi->sc_data_direction == DMA_FROM_DEVICE)
+ fcp->fc_flags |= FCP_CFL_RDDATA;
+ if (scsi->sc_data_direction == DMA_TO_DEVICE)
+ fcp->fc_flags |= FCP_CFL_WRDATA;
+
+ memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
+
+ fcp->fc_dl = scsi_bufflen(scsi);
+
+ if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
+ fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
+}
+
+/**
+ * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
+ * @fcp_rsp: FCP RSP IU to evaluate
+ * @scsi: SCSI command where to update status and sense buffer
+ */
+static inline
+void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ struct scsi_cmnd *scsi)
+{
+ struct fcp_resp_rsp_info *rsp_info;
+ char *sense;
+ u32 sense_len, resid;
+ u8 rsp_flags;
+
+ set_msg_byte(scsi, COMMAND_COMPLETE);
+ scsi->result |= fcp_rsp->resp.fr_status;
+
+ rsp_flags = fcp_rsp->resp.fr_flags;
+
+ if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
+ rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ set_host_byte(scsi, DID_OK);
+ else {
+ set_host_byte(scsi, DID_ERROR);
+ return;
+ }
+ }
+
+ if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
+ sense = (char *) &fcp_rsp[1];
+ if (rsp_flags & FCP_RSP_LEN_VAL)
+ sense += fcp_rsp->ext.fr_rsp_len;
+ sense_len = min(fcp_rsp->ext.fr_sns_len,
+ (u32) SCSI_SENSE_BUFFERSIZE);
+ memcpy(scsi->sense_buffer, sense, sense_len);
+ }
+
+ if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
+ resid = fcp_rsp->ext.fr_resid;
+ scsi_set_resid(scsi, resid);
+ if (scsi_bufflen(scsi) - resid < scsi->underflow &&
+ !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
+ }
+}
+
+#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 74dee32afba..0fe8d5d9511 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,23 +3,29 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
-#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
-#define ZFCP_REQ_NO_QTCB 0x00000008
+struct kmem_cache *zfcp_fsf_qtcb_cache;
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "fsrth_1", NULL);
+ "fsrth_1");
}
static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -57,50 +63,11 @@ static u32 fsf_qtcb_type[] = {
[FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
};
-static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
-{
- u16 subtable = table >> 16;
- u16 rule = table & 0xffff;
- const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
-
- if (subtable && subtable < ARRAY_SIZE(act_type))
- dev_warn(&adapter->ccw_device->dev,
- "Access denied according to ACT rule type %s, "
- "rule %d\n", act_type[subtable], rule);
-}
-
-static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
- struct zfcp_port *port)
-{
- struct fsf_qtcb_header *header = &req->qtcb->header;
- dev_warn(&req->adapter->ccw_device->dev,
- "Access denied to port 0x%016Lx\n",
- (unsigned long long)port->wwpn);
- zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
- zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
- zfcp_erp_port_access_denied(port, "fspad_1", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-}
-
-static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
- struct zfcp_unit *unit)
-{
- struct fsf_qtcb_header *header = &req->qtcb->header;
- dev_warn(&req->adapter->ccw_device->dev,
- "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
- zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
- zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-}
-
static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
{
dev_err(&req->adapter->ccw_device->dev, "FCP device not "
"operational because of an unsupported FC class\n");
- zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -111,77 +78,45 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
{
if (likely(req->pool)) {
+ if (likely(req->qtcb))
+ mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
mempool_free(req, req->pool);
return;
}
- if (req->qtcb) {
- kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
- return;
- }
-}
-
-/**
- * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
- * @adapter: pointer to struct zfcp_adapter
- *
- * Never ever call this without shutting down the adapter first.
- * Otherwise the adapter would continue using and corrupting s390 storage.
- * Included BUG_ON() call to ensure this is done.
- * ERP is supposed to be the only user of this function.
- */
-void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
-{
- struct zfcp_fsf_req *req, *tmp;
- unsigned long flags;
- LIST_HEAD(remove_queue);
- unsigned int i;
-
- BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- for (i = 0; i < REQUEST_LIST_SIZE; i++)
- list_splice_init(&adapter->req_list[i], &remove_queue);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
- list_for_each_entry_safe(req, tmp, &remove_queue, list) {
- list_del(&req->list);
- req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
- zfcp_fsf_req_complete(req);
- }
+ if (likely(req->qtcb))
+ kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
+ kfree(req);
}
static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
{
+ unsigned long flags;
struct fsf_status_read_buffer *sr_buf = req->data;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
- int d_id = sr_buf->d_id & ZFCP_DID_MASK;
- unsigned long flags;
+ int d_id = ntoh24(sr_buf->d_id);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
if (port->d_id == d_id) {
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
- zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
- return;
+ zfcp_erp_port_reopen(port, 0, "fssrpc1");
+ break;
}
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
-static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
struct fsf_link_down_info *link_down)
{
struct zfcp_adapter *adapter = req->adapter;
- unsigned long flags;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
- read_lock_irqsave(&zfcp_data.config_lock, flags);
zfcp_scsi_schedule_rports_block(adapter);
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (!link_down)
goto out;
@@ -251,7 +186,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
"the FC fabric is down\n");
}
out:
- zfcp_erp_adapter_failed(adapter, id, req);
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
@@ -262,13 +197,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
switch (sr_buf->status_subtype) {
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
- zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
+ zfcp_fsf_link_down_info_eval(req, ldi);
break;
case FSF_STATUS_READ_SUB_FDISC_FAILED:
- zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
+ zfcp_fsf_link_down_info_eval(req, ldi);
break;
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
- zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
+ zfcp_fsf_link_down_info_eval(req, NULL);
};
}
@@ -278,13 +213,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
struct fsf_status_read_buffer *sr_buf = req->data;
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
- zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
- mempool_free(sr_buf, adapter->pool.data_status_read);
+ zfcp_dbf_hba_fsf_uss("fssrh_1", req);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
- zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
+ zfcp_dbf_hba_fsf_uss("fssrh_4", req);
switch (sr_buf->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
@@ -299,43 +234,39 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
dev_warn(&adapter->ccw_device->dev,
"The error threshold for checksum statistics "
"has been exceeded\n");
- zfcp_hba_dbf_event_berr(adapter, req);
+ zfcp_dbf_hba_bit_err("fssrh_3", req);
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
+ zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
break;
case FSF_STATUS_READ_LINK_UP:
dev_info(&adapter->ccw_device->dev,
"The local link has been restored\n");
/* All ports should be marked as ready to run again */
- zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
- ZFCP_STATUS_COMMON_RUNNING,
- ZFCP_SET);
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
- "fssrh_2", req);
+ "fssrh_2");
+ zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
+
break;
case FSF_STATUS_READ_NOTIFICATION_LOST:
- if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
- zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
- req);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
- schedule_work(&adapter->scan_work);
- break;
- case FSF_STATUS_READ_CFDC_UPDATED:
- zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
+ zfcp_fc_conditional_port_scan(adapter);
break;
case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
adapter->adapter_features = sr_buf->payload.word[0];
break;
}
- mempool_free(sr_buf, adapter->pool.data_status_read);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
- queue_work(zfcp_data.work_queue, &adapter->stat_work);
+ queue_work(adapter->work_queue, &adapter->stat_work);
}
static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -347,13 +278,13 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
return;
case FSF_SQ_COMMAND_ABORTED:
- req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
break;
case FSF_SQ_NO_RECOM:
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter reported a problem "
"that cannot be recovered\n");
- zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
+ zfcp_qdio_siosl(req->adapter);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
break;
}
/* all non-return stats set FSFREQ_ERROR*/
@@ -370,7 +301,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter does not recognize the command 0x%x\n",
req->qtcb->header.fsf_command);
- zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -385,11 +316,10 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
struct fsf_qtcb *qtcb = req->qtcb;
union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
- zfcp_hba_dbf_event_fsf_response(req);
+ zfcp_dbf_hba_fsf_response(req);
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
return;
}
@@ -402,17 +332,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
"QTCB version 0x%x not supported by FCP adapter "
"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
psq->word[0], psq->word[1]);
- zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
break;
case FSF_PROT_ERROR_STATE:
case FSF_PROT_SEQ_NUMB_ERROR:
- zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
- req->status |= ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_UNSUPP_QTCB_TYPE:
dev_err(&adapter->ccw_device->dev,
"The QTCB type is not supported by the FCP adapter\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -422,29 +352,28 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"0x%Lx is an ambiguous request identifier\n",
(unsigned long long)qtcb->bottom.support.req_handle);
- zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
break;
case FSF_PROT_LINK_DOWN:
- zfcp_fsf_link_down_info_eval(req, "fspse_5",
- &psq->link_down_info);
- /* FIXME: reopening adapter now? better wait for link up */
- zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
+ zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
+ /* go through reopen to flush pending requests */
+ zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
break;
case FSF_PROT_REEST_QUEUE:
/* All ports should be marked as ready to run again */
- zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
- ZFCP_STATUS_COMMON_RUNNING,
- ZFCP_SET);
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
- "fspse_8", req);
+ "fspse_8");
break;
default:
dev_err(&adapter->ccw_device->dev,
"0x%x is not a valid transfer protocol status\n",
qtcb->prefix.prot_status);
- zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
+ zfcp_qdio_siosl(adapter);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
}
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -458,7 +387,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
* is called to process the completion status and trigger further
* events related to the FSF request.
*/
-void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
zfcp_fsf_status_read_handler(req);
@@ -472,53 +401,110 @@ void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
if (req->erp_action)
zfcp_erp_notify(req->erp_action, 0);
- req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
zfcp_fsf_req_free(req);
else
- /* notify initiator waiting for the requests completion */
- /*
- * FIXME: Race! We must not access fsf_req here as it might have been
- * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
- * flag. It's an improbable case. But, we have the same paranoia for
- * the cleanup flag already.
- * Might better be handled using complete()?
- * (setting the flag and doing wakeup ought to be atomic
- * with regard to checking the flag as long as waitqueue is
- * part of the to be released structure)
- */
- wake_up(&req->completion_wq);
+ complete(&req->completion);
+}
+
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
+ * @adapter: pointer to struct zfcp_adapter
+ *
+ * Never ever call this without shutting down the adapter first.
+ * Otherwise the adapter would continue using and corrupting s390 storage.
+ * Included BUG_ON() call to ensure this is done.
+ * ERP is supposed to be the only user of this function.
+ */
+void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+{
+ struct zfcp_fsf_req *req, *tmp;
+ LIST_HEAD(remove_queue);
+
+ BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
+ zfcp_reqlist_move(adapter->req_list, &remove_queue);
+
+ list_for_each_entry_safe(req, tmp, &remove_queue, list) {
+ list_del(&req->list);
+ req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ zfcp_fsf_req_complete(req);
+ }
+}
+
+#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
+#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
+#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
+#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
+#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
+#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+ u32 fdmi_speed = 0;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+ fdmi_speed |= FC_PORTSPEED_1GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+ fdmi_speed |= FC_PORTSPEED_2GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+ fdmi_speed |= FC_PORTSPEED_4GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+ fdmi_speed |= FC_PORTSPEED_10GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+ fdmi_speed |= FC_PORTSPEED_8GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+ fdmi_speed |= FC_PORTSPEED_16GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+ fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+ return fdmi_speed;
}
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
- struct fsf_qtcb_bottom_config *bottom;
+ struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
+ struct fc_els_flogi *nsp, *plogi;
- bottom = &req->qtcb->bottom.config;
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
- fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
- fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_port_name(shost) = nsp->fl_wwpn;
+ fc_host_node_name(shost) = nsp->fl_wwnn;
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
- adapter->hydra_version = bottom->adapter_type;
- adapter->timer_ticks = bottom->timer_interval;
+ adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+ (u16)FSF_STATUS_READS_RECOM);
if (fc_host_permanent_port_name(shost) == -1)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+ zfcp_scsi_set_prot(adapter);
+
+ /* no error return above here, otherwise must fix call chains */
+ /* do not evaluate invalid fields */
+ if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
+ return 0;
+
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+ adapter->hydra_version = bottom->adapter_type;
+
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
- adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
- adapter->peer_wwpn = bottom->plogi_payload.wwpn;
- adapter->peer_wwnn = bottom->plogi_payload.wwnn;
+ adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+ adapter->peer_wwpn = plogi->fl_wwpn;
+ adapter->peer_wwnn = plogi->fl_wwnn;
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
@@ -526,11 +512,12 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ /* fall through */
default:
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
return -EIO;
}
@@ -564,7 +551,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
"FCP adapter maximum QTCB size (%d bytes) "
"is too small\n",
bottom->max_qtcb_size);
- zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
return;
}
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -578,14 +565,17 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
adapter->hydra_version = 0;
+ /* avoids adapter shutdown to be able to recognize
+ * events such as LINK UP */
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
-
- zfcp_fsf_link_down_info_eval(req, "fsecdh2",
+ zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+ if (zfcp_fsf_exchange_config_evaluate(req))
+ return;
break;
default:
- zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
return;
}
@@ -601,14 +591,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports newer "
"control block versions\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
return;
}
if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports older "
"control block versions\n");
- zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
}
}
@@ -627,7 +617,12 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) = bottom->supported_speed;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -643,126 +638,89 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_fsf_exchange_port_evaluate(req);
- zfcp_fsf_link_down_info_eval(req, "fsepdh1",
+ zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
break;
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
-{
- struct zfcp_qdio_queue *req_q = &adapter->req_q;
-
- spin_lock_bh(&adapter->req_q_lock);
- if (atomic_read(&req_q->count))
- return 1;
- spin_unlock_bh(&adapter->req_q_lock);
- return 0;
-}
-
-static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
+static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
- long ret;
-
- spin_unlock_bh(&adapter->req_q_lock);
- ret = wait_event_interruptible_timeout(adapter->request_wq,
- zfcp_fsf_sbal_check(adapter), 5 * HZ);
- if (ret > 0)
- return 0;
- if (!ret)
- atomic_inc(&adapter->qdio_outb_full);
+ struct zfcp_fsf_req *req;
- spin_lock_bh(&adapter->req_q_lock);
- return -EIO;
-}
+ if (likely(pool))
+ req = mempool_alloc(pool, GFP_ATOMIC);
+ else
+ req = kmalloc(sizeof(*req), GFP_ATOMIC);
-static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
-{
- struct zfcp_fsf_req *req;
- req = mempool_alloc(pool, GFP_ATOMIC);
- if (!req)
+ if (unlikely(!req))
return NULL;
+
memset(req, 0, sizeof(*req));
req->pool = pool;
return req;
}
-static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
+static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
{
- struct zfcp_fsf_req_qtcb *qtcb;
+ struct fsf_qtcb *qtcb;
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
- qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
- GFP_ATOMIC);
+ qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
+
if (unlikely(!qtcb))
return NULL;
memset(qtcb, 0, sizeof(*qtcb));
- qtcb->fsf_req.qtcb = &qtcb->qtcb;
- qtcb->fsf_req.pool = pool;
-
- return &qtcb->fsf_req;
+ return qtcb;
}
-static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
- u32 fsf_cmd, int req_flags,
+static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
+ u32 fsf_cmd, u8 sbtype,
mempool_t *pool)
{
- struct qdio_buffer_element *sbale;
-
- struct zfcp_fsf_req *req;
- struct zfcp_qdio_queue *req_q = &adapter->req_q;
-
- if (req_flags & ZFCP_REQ_NO_QTCB)
- req = zfcp_fsf_alloc_noqtcb(pool);
- else
- req = zfcp_fsf_alloc_qtcb(pool);
+ struct zfcp_adapter *adapter = qdio->adapter;
+ struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
if (unlikely(!req))
- return ERR_PTR(-EIO);
+ return ERR_PTR(-ENOMEM);
if (adapter->req_no == 0)
adapter->req_no++;
INIT_LIST_HEAD(&req->list);
init_timer(&req->timer);
- init_waitqueue_head(&req->completion_wq);
+ init_completion(&req->completion);
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->sbal_number = 1;
- req->sbal_first = req_q->first;
- req->sbal_last = req_q->first;
- req->sbale_curr = 1;
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].addr = (void *) req->req_id;
- sbale[0].flags |= SBAL_FLAGS0_COMMAND;
+ if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
+ if (likely(pool))
+ req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
+ else
+ req->qtcb = zfcp_qtcb_alloc(NULL);
+
+ if (unlikely(!req->qtcb)) {
+ zfcp_fsf_req_free(req);
+ return ERR_PTR(-ENOMEM);
+ }
- if (likely(req->qtcb)) {
- req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
+ req->seq_no = adapter->fsf_req_seq_no;
+ req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- req->seq_no = adapter->fsf_req_seq_no;
- req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
- sbale[1].addr = (void *) req->qtcb;
- sbale[1].length = sizeof(struct fsf_qtcb);
}
- if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
- zfcp_fsf_req_free(req);
- return ERR_PTR(-EIO);
- }
-
- if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
- req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+ req->qtcb, sizeof(struct fsf_qtcb));
return req;
}
@@ -770,26 +728,19 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
- unsigned long flags;
- int idx;
- int with_qtcb = (req->qtcb != NULL);
-
- /* put allocated FSF request into hash table */
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- idx = zfcp_reqlist_hash(req->req_id);
- list_add_tail(&req->list, &adapter->req_list[idx]);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
- req->qdio_outb_usage = atomic_read(&adapter->req_q.count);
- req->issued = get_clock();
- if (zfcp_qdio_send(req)) {
+ struct zfcp_qdio *qdio = adapter->qdio;
+ int with_qtcb = (req->qtcb != NULL);
+ int req_id = req->req_id;
+
+ zfcp_reqlist_add(adapter->req_list, req);
+
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+ req->issued = get_tod_clock();
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
/* lookup request again, list might have changed */
- if (zfcp_reqlist_find_safe(adapter, req))
- zfcp_reqlist_remove(adapter, req);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
- zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
+ zfcp_reqlist_find_rm(adapter->req_list, req_id);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
return -EIO;
}
@@ -807,40 +758,37 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
* @req_flags: request flags
* Returns: 0 on success, ERROR otherwise
*/
-int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
+int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
{
+ struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- struct qdio_buffer_element *sbale;
+ struct page *page;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
- ZFCP_REQ_NO_QTCB,
- adapter->pool.fsf_req_status_read);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ SBAL_SFLAGS0_TYPE_STATUS,
+ adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
- sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->sbale_curr = 2;
-
- sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
- if (!sr_buf) {
+ page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+ if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
+ sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(req);
- sbale->addr = (void *) sr_buf;
- sbale->length = sizeof(*sr_buf);
+
+ zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
@@ -849,34 +797,38 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
goto out;
failed_req_send:
- mempool_free(sr_buf, adapter->pool.data_status_read);
+ req->data = NULL;
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
+ zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
- zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_unit *unit = req->data;
+ struct scsi_device *sdev = req->data;
+ struct zfcp_scsi_dev *zfcp_sdev;
union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
- zfcp_erp_adapter_reopen(unit->port->adapter, 0,
- "fsafch1", req);
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
+ "fsafch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
case FSF_LUN_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
- zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
+ zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
@@ -884,19 +836,23 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, "fsafch3", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_port_status(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_port_reopen(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
- zfcp_erp_unit_boxed(unit, "fsafch4", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fsafch4");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- zfcp_test_link(unit->port);
+ zfcp_fc_test_link(zfcp_sdev->port);
+ /* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -909,41 +865,40 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
}
/**
- * zfcp_fsf_abort_fcp_command - abort running SCSI command
- * @old_req_id: unsigned long
- * @unit: pointer to struct zfcp_unit
+ * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
+ * @scmnd: The SCSI command to abort
* Returns: pointer to struct zfcp_fsf_req
*/
-struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
- struct zfcp_unit *unit)
+struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
- struct zfcp_adapter *adapter = unit->port->adapter;
+ struct scsi_device *sdev = scmnd->device;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
+ unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
- 0, adapter->pool.fsf_req_abort);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
goto out;
}
- if (unlikely(!(atomic_read(&unit->status) &
+ if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
- req->data = unit;
+ req->data = sdev;
req->handler = zfcp_fsf_abort_fcp_command_handler;
- req->qtcb->header.lun_handle = unit->handle;
- req->qtcb->header.port_handle = unit->port->handle;
+ req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+ req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -954,25 +909,25 @@ out_error_free:
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return req;
}
static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
- struct zfcp_send_ct *send_ct = req->data;
+ struct zfcp_fsf_ct_els *ct = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
- send_ct->status = -EINVAL;
+ ct->status = -EINVAL;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto skip_fsfstatus;
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_san_dbf_event_ct_response(req);
- send_ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
+ ct->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -985,14 +940,12 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
}
break;
- case FSF_ACCESS_DENIED:
- break;
case FSF_PORT_BOXED:
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
+ /* fall through */
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1003,45 +956,87 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
}
skip_fsfstatus:
- if (send_ct->handler)
- send_ct->handler(send_ct->handler_data);
+ if (ct->handler)
+ ct->handler(ct->handler_data);
+}
+
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req,
+ struct scatterlist *sg_req,
+ struct scatterlist *sg_resp)
+{
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+ zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
- struct scatterlist *sg_resp,
- int max_sbals)
+ struct scatterlist *sg_resp)
{
- struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
- u32 feat = req->adapter->adapter_features;
- int bytes;
-
- if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE ||
- !sg_is_last(sg_req) || !sg_is_last(sg_resp))
- return -EOPNOTSUPP;
-
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
- sbale[2].addr = sg_virt(sg_req);
- sbale[2].length = sg_req->length;
- sbale[3].addr = sg_virt(sg_resp);
- sbale[3].length = sg_resp->length;
- sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
+ struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ struct fsf_qtcb *qtcb = req->qtcb;
+ u32 feat = adapter->adapter_features;
+
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+ return -EIO;
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+ return -EIO;
+
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ zfcp_qdio_sbale_count(sg_req));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+ zfcp_qdio_set_scount(qdio, &req->qdio_req);
+ return 0;
+ }
+
+ /* use single, unchained SBAL if it can hold the request */
+ if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
+ zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
- bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
- sg_req, max_sbals);
- if (bytes <= 0)
- return -ENOMEM;
- req->qtcb->bottom.support.req_buf_length = bytes;
- req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
+ return -EOPNOTSUPP;
+
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+ return -EIO;
+
+ qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
+
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+ zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
+
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+ return -EIO;
- bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
- sg_resp, max_sbals);
- if (bytes <= 0)
- return -ENOMEM;
- req->qtcb->bottom.support.resp_buf_length = bytes;
+ qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
+
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+ return 0;
+}
+
+static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
+ struct scatterlist *sg_req,
+ struct scatterlist *sg_resp,
+ unsigned int timeout)
+{
+ int ret;
+
+ ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
+ if (ret)
+ return ret;
+
+ /* common settings for ct/gs and els requests */
+ if (timeout > 255)
+ timeout = 255; /* max value accepted by hardware */
+ req->qtcb->bottom.support.service_class = FSF_CLASS_3;
+ req->qtcb->bottom.support.timeout = timeout;
+ zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
return 0;
}
@@ -1050,46 +1045,37 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
* @ct: pointer to struct zfcp_send_ct with data for request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
- * @erp_action: if non-null the Generic Service request sent within ERP
*/
-int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
- struct zfcp_erp_action *erp_action)
+int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+ struct zfcp_fsf_ct_els *ct, mempool_t *pool,
+ unsigned int timeout)
{
- struct zfcp_wka_port *wka_port = ct->wka_port;
- struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int ret = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
- ZFCP_REQ_AUTO_CLEANUP, pool);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+ SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
+
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
}
- ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ);
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
if (ret)
goto failed_send;
req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
- req->qtcb->bottom.support.service_class = FSF_CLASS_3;
- req->qtcb->bottom.support.timeout = ct->timeout;
req->data = ct;
- zfcp_san_dbf_event_ct_request(req);
-
- if (erp_action) {
- erp_action->fsf_req = req;
- req->erp_action = erp_action;
- zfcp_fsf_start_erp_timer(req);
- } else
- zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
ret = zfcp_fsf_req_send(req);
if (ret)
@@ -1099,17 +1085,14 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
failed_send:
zfcp_fsf_req_free(req);
- if (erp_action)
- erp_action->fsf_req = NULL;
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return ret;
}
static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_send_els *send_els = req->data;
- struct zfcp_port *port = send_els->port;
+ struct zfcp_fsf_ct_els *send_els = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
send_els->status = -EINVAL;
@@ -1119,7 +1102,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_san_dbf_event_els_response(req);
+ zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1128,9 +1111,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]){
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- if (port && (send_els->ls_code != ZFCP_LS_ADISC))
- zfcp_test_link(port);
- /*fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_RETRY_IF_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1142,11 +1122,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
case FSF_REQUEST_SIZE_TOO_LARGE:
case FSF_RESPONSE_SIZE_TOO_LARGE:
break;
- case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_port(req, port);
- break;
case FSF_SBAL_MISMATCH:
- /* should never occure, avoided in zfcp_fsf_send_els */
+ /* should never occur, avoided in zfcp_fsf_send_els */
/* fall through */
default:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1161,38 +1138,41 @@ skip_fsfstatus:
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
* @els: pointer to struct zfcp_send_els with data for the command
*/
-int zfcp_fsf_send_els(struct zfcp_send_els *els)
+int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+ struct zfcp_fsf_ct_els *els, unsigned int timeout)
{
struct zfcp_fsf_req *req;
- struct zfcp_adapter *adapter = els->adapter;
- struct fsf_qtcb_bottom_support *bottom;
+ struct zfcp_qdio *qdio = adapter->qdio;
int ret = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
- ZFCP_REQ_AUTO_CLEANUP, NULL);
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+ SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
+
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
}
- ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+
+ if (!zfcp_adapter_multi_buffer_active(adapter))
+ zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
+
+ ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
if (ret)
goto failed_send;
- bottom = &req->qtcb->bottom.support;
+ hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
- bottom->d_id = els->d_id;
- bottom->service_class = FSF_CLASS_3;
- bottom->timeout = 2 * R_A_TOV;
req->data = els;
- zfcp_san_dbf_event_els_request(req);
+ zfcp_dbf_san_req("fssels1", req, d_id);
- zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
@@ -1202,79 +1182,72 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
failed_send:
zfcp_fsf_req_free(req);
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return ret;
}
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter,
- FSF_QTCB_EXCHANGE_CONFIG_DATA,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
- FSF_FEATURE_CFDC |
- FSF_FEATURE_LUN_SHARING |
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
-int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
+int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
- 0, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_SFLAGS0_TYPE_READ, NULL);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
- FSF_FEATURE_CFDC |
- FSF_FEATURE_LUN_SHARING |
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT;
@@ -1283,16 +1256,15 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- wait_event(req->completion_wq,
- req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_for_completion(&req->completion);
zfcp_fsf_req_free(req);
return retval;
out_unlock:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
@@ -1303,66 +1275,66 @@ out_unlock:
*/
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
- struct zfcp_adapter *adapter = erp_action->adapter;
int retval = -EIO;
- if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+ if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
/**
* zfcp_fsf_exchange_port_data_sync - request information about local port
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* @data: pointer to struct fsf_qtcb_bottom_port
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
+int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
- if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+ if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
- NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_SFLAGS0_TYPE_READ, NULL);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
@@ -1371,24 +1343,22 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- wait_event(req->completion_wq,
- req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_for_completion(&req->completion);
+
zfcp_fsf_req_free(req);
return retval;
out_unlock:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
@@ -1396,23 +1366,21 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
- struct fsf_plogi *plogi;
+ struct fc_els_flogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- return;
+ goto out;
switch (header->fsf_status) {
case FSF_PORT_ALREADY_OPEN:
break;
- case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_port(req, port);
- break;
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Not enough FCP adapter resources to open "
"remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
- zfcp_erp_port_failed(port, "fsoph_1", req);
+ zfcp_erp_set_port_status(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1446,21 +1414,18 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
- plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
+ plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
if (req->qtcb->bottom.support.els1_length >=
- FSF_PLOGI_MIN_LEN) {
- if (plogi->serv_param.wwpn != port->wwpn)
- port->d_id = 0;
- else {
- port->wwnn = plogi->serv_param.wwnn;
+ FSF_PLOGI_MIN_LEN)
zfcp_fc_plogi_evaluate(port, plogi);
- }
- }
break;
case FSF_UNKNOWN_OP_SUBTYPE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
+
+out:
+ put_device(&port->dev);
}
/**
@@ -1470,42 +1435,43 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+ struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter,
- FSF_QTCB_OPEN_PORT_WITH_DID,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
- req->qtcb->bottom.support.d_id = erp_action->port->d_id;
- req->data = erp_action->port;
+ hton24(req->qtcb->bottom.support.d_id, port->d_id);
+ req->data = port;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
+ get_device(&port->dev);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
+ put_device(&port->dev);
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
@@ -1518,15 +1484,13 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
+ zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
break;
case FSF_GOOD:
- zfcp_erp_modify_port_status(port, "fscph_2", req,
- ZFCP_STATUS_COMMON_OPEN,
- ZFCP_CLEAR);
+ zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
break;
}
}
@@ -1538,51 +1502,50 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_wka_port *wka_port = req->data;
+ struct zfcp_fc_wka_port *wka_port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
@@ -1590,16 +1553,16 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
+ /* fall through */
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- case FSF_ACCESS_DENIED:
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
- break;
- case FSF_PORT_ALREADY_OPEN:
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
- wka_port->status = ZFCP_WKA_PORT_ONLINE;
+ /* fall through */
+ case FSF_PORT_ALREADY_OPEN:
+ wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
wake_up(&wka_port->completion_wq);
@@ -1607,35 +1570,33 @@ out:
/**
* zfcp_fsf_open_wka_port - create and send open wka-port request
- * @wka_port: pointer to struct zfcp_wka_port
+ * @wka_port: pointer to struct zfcp_fc_wka_port
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter,
- FSF_QTCB_OPEN_PORT_WITH_DID,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
- if (unlikely(IS_ERR(req))) {
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
- req->qtcb->bottom.support.d_id = wka_port->d_id;
+ hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1643,50 +1604,49 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
if (retval)
zfcp_fsf_req_free(req);
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_wka_port *wka_port = req->data;
+ struct zfcp_fc_wka_port *wka_port = req->data;
if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
+ zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
}
- wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
/**
* zfcp_fsf_close_wka_port - create and send close wka port request
- * @erp_action: pointer to struct zfcp_erp_action
+ * @wka_port: WKA port to open
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
+int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
- if (unlikely(IS_ERR(req))) {
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
@@ -1697,7 +1657,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
if (retval)
zfcp_fsf_req_free(req);
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
@@ -1705,29 +1665,28 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
- struct zfcp_unit *unit;
+ struct scsi_device *sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
+ zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
- case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_port(req, port);
- break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(port, "fscpph2", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- list_for_each_entry(unit, &port->unit_list_head, list)
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
- &unit->status);
+ shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+ &sdev_to_zfcp(sdev)->status);
+ zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fscpph2");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
@@ -1743,9 +1702,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- list_for_each_entry(unit, &port->unit_list_head, list)
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
- &unit->status);
+ shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+ &sdev_to_zfcp(sdev)->status);
break;
}
}
@@ -1757,104 +1717,95 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
-static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
+static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
- struct zfcp_unit *unit = req->data;
+ struct scsi_device *sdev = req->data;
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
- struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
- struct fsf_queue_designator *queue_designator =
- &header->fsf_status_qual.fsf_queue_designator;
- int exclusive, readwrite;
+ union fsf_status_qual *qual = &header->fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_COMMON_ACCESS_BOXED |
- ZFCP_STATUS_UNIT_SHARED |
- ZFCP_STATUS_UNIT_READONLY,
- &unit->status);
+ ZFCP_STATUS_COMMON_ACCESS_BOXED,
+ &zfcp_sdev->status);
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
/* fall through */
case FSF_LUN_ALREADY_OPEN:
break;
- case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_unit(req, unit);
- atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
- atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
- break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_port_status(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_port_reopen(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_SHARING_VIOLATION:
- if (header->fsf_status_qual.word[0])
- dev_warn(&adapter->ccw_device->dev,
+ if (qual->word[0])
+ dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"LUN 0x%Lx on port 0x%Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn,
- queue_designator->cssid,
- queue_designator->hla);
- else
- zfcp_act_eval_err(adapter,
- header->fsf_status_qual.word[2]);
- zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
- atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
- atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
+ zfcp_scsi_dev_lun(sdev),
+ (unsigned long long)zfcp_sdev->port->wwpn,
+ qual->fsf_queue_designator.cssid,
+ qual->fsf_queue_designator.hla);
+ zfcp_erp_set_lun_status(sdev,
+ ZFCP_STATUS_COMMON_ERP_FAILED |
+ ZFCP_STATUS_COMMON_ACCESS_DENIED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
dev_warn(&adapter->ccw_device->dev,
"No handle is available for LUN "
"0x%016Lx on port 0x%016Lx\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, "fsouh_4", req);
+ (unsigned long long)zfcp_scsi_dev_lun(sdev),
+ (unsigned long long)zfcp_sdev->port->wwpn);
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
/* fall through */
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1862,7 +1813,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- zfcp_test_link(unit->port);
+ zfcp_fc_test_link(zfcp_sdev->port);
/* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1871,91 +1822,46 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
break;
case FSF_GOOD:
- unit->handle = header->lun_handle;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
-
- if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
- (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
- (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
- exclusive = (bottom->lun_access_info &
- FSF_UNIT_ACCESS_EXCLUSIVE);
- readwrite = (bottom->lun_access_info &
- FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
-
- if (!exclusive)
- atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
- &unit->status);
-
- if (!readwrite) {
- atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
- &unit->status);
- dev_info(&adapter->ccw_device->dev,
- "SCSI device at LUN 0x%016Lx on port "
- "0x%016Lx opened read-only\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- }
-
- if (exclusive && !readwrite) {
- dev_err(&adapter->ccw_device->dev,
- "Exclusive read-only access not "
- "supported (unit 0x%016Lx, "
- "port 0x%016Lx)\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, "fsouh_5", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
- } else if (!exclusive && readwrite) {
- dev_err(&adapter->ccw_device->dev,
- "Shared read-write access not "
- "supported (unit 0x%016Lx, port "
- "0x%016Lx)\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_failed(unit, "fsouh_7", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR;
- zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
- }
- }
+ zfcp_sdev->lun_handle = header->lun_handle;
+ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
/**
- * zfcp_fsf_open_unit - open unit
+ * zfcp_fsf_open_lun - open LUN
* @erp_action: pointer to struct zfcp_erp_action
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+ SBAL_SFLAGS0_TYPE_READ,
+ adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
- req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
- req->handler = zfcp_fsf_open_unit_handler;
- req->data = erp_action->unit;
+ req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
+ req->handler = zfcp_fsf_open_lun_handler;
+ req->data = erp_action->sdev;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -1964,38 +1870,43 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
-static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
+static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_unit *unit = req->data;
+ struct scsi_device *sdev = req->data;
+ struct zfcp_scsi_dev *zfcp_sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_HANDLE_NOT_VALID:
- zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
+ zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_port_status(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_port_reopen(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- zfcp_test_link(unit->port);
+ zfcp_fc_test_link(zfcp_sdev->port);
/* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2003,53 +1914,54 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
}
break;
case FSF_GOOD:
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
+ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
/**
- * zfcp_fsf_close_unit - close zfcp unit
- * @erp_action: pointer to struct zfcp_unit
+ * zfcp_fsf_close_LUN - close LUN
+ * @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/
-int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
+int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_adapter *adapter = erp_action->adapter;
+ struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_erp);
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+ SBAL_SFLAGS0_TYPE_READ,
+ qdio->adapter->pool.erp_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
- req->qtcb->header.lun_handle = erp_action->unit->handle;
- req->handler = zfcp_fsf_close_unit_handler;
- req->data = erp_action->unit;
+ req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+ req->handler = zfcp_fsf_close_lun_handler;
+ req->data = erp_action->sdev;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
@@ -2060,355 +1972,300 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
lat_rec->max = max(lat_rec->max, lat);
}
-static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
-{
- struct fsf_qual_latency_info *lat_inf;
- struct latency_cont *lat;
- struct zfcp_unit *unit = req->unit;
-
- lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
-
- switch (req->qtcb->bottom.io.data_direction) {
- case FSF_DATADIR_READ:
- lat = &unit->latencies.read;
- break;
- case FSF_DATADIR_WRITE:
- lat = &unit->latencies.write;
- break;
- case FSF_DATADIR_CMND:
- lat = &unit->latencies.cmd;
- break;
- default:
- return;
- }
-
- spin_lock(&unit->latencies.lock);
- zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
- zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
- lat->counter++;
- spin_unlock(&unit->latencies.lock);
-}
-
-#ifdef CONFIG_BLK_DEV_IO_TRACE
-static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
+static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
- struct fsf_qual_latency_info *lat_inf;
- struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
- struct request *req = scsi_cmnd->request;
- struct zfcp_blk_drv_data trace;
- int ticks = fsf_req->adapter->timer_ticks;
-
- trace.flags = 0;
- trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
- if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
- trace.flags |= ZFCP_BLK_LAT_VALID;
- lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
- trace.channel_lat = lat_inf->channel_lat * ticks;
- trace.fabric_lat = lat_inf->fabric_lat * ticks;
- }
- if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
- trace.flags |= ZFCP_BLK_REQ_ERROR;
- trace.inb_usage = fsf_req->qdio_inb_usage;
- trace.outb_usage = fsf_req->qdio_outb_usage;
-
- blk_add_driver_data(req->q, req, &trace, sizeof(trace));
-}
-#else
-static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
-{
-}
-#endif
-
-static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
-{
- struct scsi_cmnd *scpnt;
- struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
- &(req->qtcb->bottom.io.fcp_rsp);
- u32 sns_len;
- char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
- unsigned long flags;
+ struct fsf_qual_latency_info *lat_in;
+ struct latency_cont *lat = NULL;
+ struct zfcp_scsi_dev *zfcp_sdev;
+ struct zfcp_blk_drv_data blktrc;
+ int ticks = req->adapter->timer_ticks;
- read_lock_irqsave(&req->adapter->abort_lock, flags);
-
- scpnt = req->data;
- if (unlikely(!scpnt)) {
- read_unlock_irqrestore(&req->adapter->abort_lock, flags);
- return;
- }
+ lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
- if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
- set_host_byte(scpnt, DID_SOFT_ERROR);
- goto skip_fsfstatus;
- }
-
- if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
- set_host_byte(scpnt, DID_ERROR);
- goto skip_fsfstatus;
- }
-
- set_msg_byte(scpnt, COMMAND_COMPLETE);
-
- scpnt->result |= fcp_rsp_iu->scsi_status;
-
- if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
- zfcp_fsf_req_latency(req);
-
- zfcp_fsf_trace_latency(req);
-
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
- if (fcp_rsp_info[3] == RSP_CODE_GOOD)
- set_host_byte(scpnt, DID_OK);
- else {
- set_host_byte(scpnt, DID_ERROR);
- goto skip_fsfstatus;
+ blktrc.flags = 0;
+ blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ blktrc.flags |= ZFCP_BLK_REQ_ERROR;
+ blktrc.inb_usage = 0;
+ blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
+
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ zfcp_sdev = sdev_to_zfcp(scsi->device);
+ blktrc.flags |= ZFCP_BLK_LAT_VALID;
+ blktrc.channel_lat = lat_in->channel_lat * ticks;
+ blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+
+ switch (req->qtcb->bottom.io.data_direction) {
+ case FSF_DATADIR_DIF_READ_STRIP:
+ case FSF_DATADIR_DIF_READ_CONVERT:
+ case FSF_DATADIR_READ:
+ lat = &zfcp_sdev->latencies.read;
+ break;
+ case FSF_DATADIR_DIF_WRITE_INSERT:
+ case FSF_DATADIR_DIF_WRITE_CONVERT:
+ case FSF_DATADIR_WRITE:
+ lat = &zfcp_sdev->latencies.write;
+ break;
+ case FSF_DATADIR_CMND:
+ lat = &zfcp_sdev->latencies.cmd;
+ break;
}
- }
-
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
- sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
- fcp_rsp_iu->fcp_rsp_len;
- sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
- sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
-
- memcpy(scpnt->sense_buffer,
- zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
- }
- if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
- scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
- if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
- scpnt->underflow)
- set_host_byte(scpnt, DID_ERROR);
+ if (lat) {
+ spin_lock(&zfcp_sdev->latencies.lock);
+ zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
+ zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
+ lat->counter++;
+ spin_unlock(&zfcp_sdev->latencies.lock);
+ }
}
-skip_fsfstatus:
- if (scpnt->result != 0)
- zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
- else if (scpnt->retries > 0)
- zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
- else
- zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
- scpnt->host_scribble = NULL;
- (scpnt->scsi_done) (scpnt);
- /*
- * We must hold this lock until scsi_done has been called.
- * Otherwise we may call scsi_done after abort regarding this
- * command has completed.
- * Note: scsi_done must not block!
- */
- read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+ blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
+ sizeof(blktrc));
}
-static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
+static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
{
- struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
- &(req->qtcb->bottom.io.fcp_rsp);
- char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
-
- if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
- (req->status & ZFCP_STATUS_FSFREQ_ERROR))
- req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
-}
-
-
-static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
-{
- struct zfcp_unit *unit;
+ struct scsi_cmnd *scmnd = req->data;
+ struct scsi_device *sdev = scmnd->device;
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
- if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
- unit = req->data;
- else
- unit = req->unit;
-
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
- goto skip_fsfstatus;
+ return;
+
+ zfcp_sdev = sdev_to_zfcp(sdev);
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCPLUN_NOT_VALID:
case FSF_LUN_HANDLE_NOT_VALID:
- zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
+ zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
break;
- case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_unit(req, unit);
- break;
case FSF_DIRECTION_INDICATOR_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "Incorrect direction %d, unit 0x%016Lx on port "
+ "Incorrect direction %d, LUN 0x%016Lx on port "
"0x%016Lx closed\n",
req->qtcb->bottom.io.data_direction,
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
- req);
+ (unsigned long long)zfcp_scsi_dev_lun(sdev),
+ (unsigned long long)zfcp_sdev->port->wwpn);
+ zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
+ "fssfch3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "Incorrect CDB length %d, unit 0x%016Lx on "
+ "Incorrect CDB length %d, LUN 0x%016Lx on "
"port 0x%016Lx closed\n",
req->qtcb->bottom.io.fcp_cmnd_length,
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
- req);
+ (unsigned long long)zfcp_scsi_dev_lun(sdev),
+ (unsigned long long)zfcp_sdev->port->wwpn);
+ zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
+ "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(unit->port, "fssfch5", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_port_status(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_port_reopen(zfcp_sdev->port,
+ ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
- zfcp_erp_unit_boxed(unit, "fssfch6", req);
- req->status |= ZFCP_STATUS_FSFREQ_ERROR |
- ZFCP_STATUS_FSFREQ_RETRY;
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+ zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fssfch6");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
if (header->fsf_status_qual.word[0] ==
FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
- zfcp_test_link(unit->port);
+ zfcp_fc_test_link(zfcp_sdev->port);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
-skip_fsfstatus:
- if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
- zfcp_fsf_send_fcp_ctm_handler(req);
- else {
- zfcp_fsf_send_fcp_command_task_handler(req);
- req->unit = NULL;
- zfcp_unit_put(unit);
- }
}
-static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
+static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
{
- u32 *fcp_dl_ptr;
+ struct scsi_cmnd *scpnt;
+ struct fcp_resp_with_ext *fcp_rsp;
+ unsigned long flags;
+
+ read_lock_irqsave(&req->adapter->abort_lock, flags);
+
+ scpnt = req->data;
+ if (unlikely(!scpnt)) {
+ read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+ return;
+ }
+
+ zfcp_fsf_fcp_handler_common(req);
+
+ if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
+ goto skip_fsfstatus;
+ }
+
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_INCONSISTENT_PROT_DATA:
+ case FSF_INVALID_PROT_PARM:
+ set_host_byte(scpnt, DID_ERROR);
+ goto skip_fsfstatus;
+ case FSF_BLOCK_GUARD_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x1);
+ goto skip_fsfstatus;
+ case FSF_APP_TAG_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x2);
+ goto skip_fsfstatus;
+ case FSF_REF_TAG_CHECK_FAILURE:
+ zfcp_scsi_dif_sense_error(scpnt, 0x3);
+ goto skip_fsfstatus;
+ }
+ fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
+
+skip_fsfstatus:
+ zfcp_fsf_req_trace(req, scpnt);
+ zfcp_dbf_scsi_result(scpnt, req);
+ scpnt->host_scribble = NULL;
+ (scpnt->scsi_done) (scpnt);
/*
- * fcp_dl_addr = start address of fcp_cmnd structure +
- * size of fixed part + size of dynamically sized add_dcp_cdb field
- * SEE FCP-2 documentation
+ * We must hold this lock until scsi_done has been called.
+ * Otherwise we may call scsi_done after abort regarding this
+ * command has completed.
+ * Note: scsi_done must not block!
*/
- fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
- (fcp_cmd->add_fcp_cdb_length << 2));
- *fcp_dl_ptr = fcp_dl;
+ read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+}
+
+static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
+{
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_NORMAL:
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_NONE:
+ *data_dir = FSF_DATADIR_CMND;
+ break;
+ case DMA_FROM_DEVICE:
+ *data_dir = FSF_DATADIR_READ;
+ break;
+ case DMA_TO_DEVICE:
+ *data_dir = FSF_DATADIR_WRITE;
+ break;
+ case DMA_BIDIRECTIONAL:
+ return -EINVAL;
+ }
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ *data_dir = FSF_DATADIR_DIF_READ_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
+ break;
+ case SCSI_PROT_READ_PASS:
+ *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
- * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
- * @unit: unit where command is sent to
+ * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
* @scsi_cmnd: scsi command to be sent
*/
-int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
- struct scsi_cmnd *scsi_cmnd)
+int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
{
struct zfcp_fsf_req *req;
- struct fcp_cmnd_iu *fcp_cmnd_iu;
- unsigned int sbtype;
- int real_bytes, retval = -EIO;
- struct zfcp_adapter *adapter = unit->port->adapter;
+ struct fcp_cmnd *fcp_cmnd;
+ u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
+ int retval = -EIO;
+ struct scsi_device *sdev = scsi_cmnd->device;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ struct fsf_qtcb_bottom_io *io;
+ unsigned long flags;
- if (unlikely(!(atomic_read(&unit->status) &
+ if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
- spin_lock(&adapter->req_q_lock);
- if (atomic_read(&adapter->req_q.count) <= 0) {
- atomic_inc(&adapter->qdio_outb_full);
+ spin_lock_irqsave(&qdio->req_q_lock, flags);
+ if (atomic_read(&qdio->req_q_free) <= 0) {
+ atomic_inc(&qdio->req_q_full);
goto out;
}
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
- ZFCP_REQ_AUTO_CLEANUP,
- adapter->pool.fsf_req_scsi);
+
+ if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+ sbtype = SBAL_SFLAGS0_TYPE_WRITE;
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ sbtype, adapter->pool.scsi_req);
+
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- zfcp_unit_get(unit);
- req->unit = unit;
- req->data = scsi_cmnd;
- req->handler = zfcp_fsf_send_fcp_command_handler;
- req->qtcb->header.lun_handle = unit->handle;
- req->qtcb->header.port_handle = unit->port->handle;
- req->qtcb->bottom.io.service_class = FSF_CLASS_3;
-
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
- fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
- fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
- /*
- * set depending on data direction:
- * data direction bits in SBALE (SB Type)
- * data direction bits in QTCB
- * data direction bits in FCP_CMND IU
- */
- switch (scsi_cmnd->sc_data_direction) {
- case DMA_NONE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
- sbtype = SBAL_FLAGS0_TYPE_READ;
- break;
- case DMA_FROM_DEVICE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
- sbtype = SBAL_FLAGS0_TYPE_READ;
- fcp_cmnd_iu->rddata = 1;
- break;
- case DMA_TO_DEVICE:
- req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
- fcp_cmnd_iu->wddata = 1;
- break;
- case DMA_BIDIRECTIONAL:
- default:
- retval = -EIO;
- goto failed_scsi_cmnd;
- }
+ io = &req->qtcb->bottom.io;
+ req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+ req->data = scsi_cmnd;
+ req->handler = zfcp_fsf_fcp_cmnd_handler;
+ req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+ req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+ io->service_class = FSF_CLASS_3;
+ io->fcp_cmnd_length = FCP_CMND_LEN;
- if (likely((scsi_cmnd->device->simple_tags) ||
- ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
- (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
- fcp_cmnd_iu->task_attribute = SIMPLE_Q;
- else
- fcp_cmnd_iu->task_attribute = UNTAGGED;
+ if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
+ io->data_block_length = scsi_cmnd->device->sector_size;
+ io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+ }
- if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
- fcp_cmnd_iu->add_fcp_cdb_length =
- (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
+ if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
+ goto failed_scsi_cmnd;
- memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
+ if (scsi_prot_sg_count(scsi_cmnd)) {
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ scsi_prot_sg_count(scsi_cmnd));
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ scsi_prot_sglist(scsi_cmnd));
+ if (retval)
+ goto failed_scsi_cmnd;
+ io->prot_data_length = zfcp_qdio_real_bytes(
+ scsi_prot_sglist(scsi_cmnd));
+ }
- real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
- scsi_sglist(scsi_cmnd),
- FSF_MAX_SBALS_PER_REQ);
- if (unlikely(real_bytes < 0)) {
- if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
- retval = -EIO;
- else {
- dev_err(&adapter->ccw_device->dev,
- "Oversize data package, unit 0x%016Lx "
- "on port 0x%016Lx closed\n",
- (unsigned long long)unit->fcp_lun,
- (unsigned long long)unit->port->wwpn);
- zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
- retval = -EINVAL;
- }
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+ scsi_sglist(scsi_cmnd));
+ if (unlikely(retval))
goto failed_scsi_cmnd;
- }
- zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
+ zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+ if (zfcp_adapter_multi_buffer_active(adapter))
+ zfcp_qdio_set_scount(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
@@ -2417,58 +2274,71 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
failed_scsi_cmnd:
- zfcp_unit_put(unit);
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
- spin_unlock(&adapter->req_q_lock);
+ spin_unlock_irqrestore(&qdio->req_q_lock, flags);
return retval;
}
+static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
+{
+ struct fcp_resp_with_ext *fcp_rsp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ zfcp_fsf_fcp_handler_common(req);
+
+ fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+ rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+
+ if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
+ (req->status & ZFCP_STATUS_FSFREQ_ERROR))
+ req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+}
+
/**
- * zfcp_fsf_send_fcp_ctm - send SCSI task management command
- * @unit: pointer to struct zfcp_unit
+ * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
+ * @scmnd: SCSI command to send the task management command for
* @tm_flags: unsigned byte for task management flags
* Returns: on success pointer to struct fsf_req, NULL otherwise
*/
-struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
+struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
+ u8 tm_flags)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
- struct fcp_cmnd_iu *fcp_cmnd_iu;
- struct zfcp_adapter *adapter = unit->port->adapter;
+ struct fcp_cmnd *fcp_cmnd;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+ struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
- if (unlikely(!(atomic_read(&unit->status) &
+ if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return NULL;
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
+ spin_lock_irq(&qdio->req_q_lock);
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0,
- adapter->pool.fsf_req_scsi);
+
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ SBAL_SFLAGS0_TYPE_WRITE,
+ qdio->adapter->pool.scsi_req);
+
if (IS_ERR(req)) {
req = NULL;
goto out;
}
- req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
- req->data = unit;
- req->handler = zfcp_fsf_send_fcp_command_handler;
- req->qtcb->header.lun_handle = unit->handle;
- req->qtcb->header.port_handle = unit->port->handle;
+ req->data = scmnd;
+ req->handler = zfcp_fsf_fcp_task_mgmt_handler;
+ req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+ req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
- req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- sizeof(u32);
+ req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
- fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
- fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
- fcp_cmnd_iu->task_management_flags = tm_flags;
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
@@ -2477,78 +2347,44 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
return req;
}
-static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
-{
-}
-
/**
- * zfcp_fsf_control_file - control file upload/download
+ * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
* @adapter: pointer to struct zfcp_adapter
- * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
- * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
+ * @sbal_idx: response queue index of SBAL to be processed
*/
-struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
- struct zfcp_fsf_cfdc *fsf_cfdc)
+void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
{
+ struct zfcp_adapter *adapter = qdio->adapter;
+ struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
struct qdio_buffer_element *sbale;
- struct zfcp_fsf_req *req = NULL;
- struct fsf_qtcb_bottom_support *bottom;
- int direction, retval = -EIO, bytes;
-
- if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
- return ERR_PTR(-EOPNOTSUPP);
-
- switch (fsf_cfdc->command) {
- case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
- direction = SBAL_FLAGS0_TYPE_WRITE;
- break;
- case FSF_QTCB_UPLOAD_CONTROL_FILE:
- direction = SBAL_FLAGS0_TYPE_READ;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_bh(&adapter->req_q_lock);
- if (zfcp_fsf_req_sbal_get(adapter))
- goto out;
-
- req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
- if (IS_ERR(req)) {
- retval = -EPERM;
- goto out;
- }
-
- req->handler = zfcp_fsf_control_file_handler;
-
- sbale = zfcp_qdio_sbale_req(req);
- sbale[0].flags |= direction;
-
- bottom = &req->qtcb->bottom.support;
- bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
- bottom->option = fsf_cfdc->option;
-
- bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
- FSF_MAX_SBALS_PER_REQ);
- if (bytes != ZFCP_CFDC_MAX_SIZE) {
- retval = -ENOMEM;
- zfcp_fsf_req_free(req);
- goto out;
- }
+ struct zfcp_fsf_req *fsf_req;
+ unsigned long req_id;
+ int idx;
+
+ for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
+
+ sbale = &sbal->element[idx];
+ req_id = (unsigned long) sbale->addr;
+ fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
+
+ if (!fsf_req) {
+ /*
+ * Unknown request means that we have potentially memory
+ * corruption and must stop the machine immediately.
+ */
+ zfcp_qdio_siosl(adapter);
+ panic("error: unknown req_id (%lx) on adapter %s.\n",
+ req_id, dev_name(&adapter->ccw_device->dev));
+ }
- zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
- retval = zfcp_fsf_req_send(req);
-out:
- spin_unlock_bh(&adapter->req_q_lock);
+ fsf_req->qdio_req.sbal_response = sbal_idx;
+ zfcp_fsf_req_complete(fsf_req);
- if (!retval) {
- wait_event(req->completion_wq,
- req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
- return req;
+ if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
+ break;
}
- return ERR_PTR(retval);
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index df7f232faba..57ae3ae1046 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,13 +3,15 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef FSF_H
#define FSF_H
#include <linux/pfn.h>
+#include <linux/scatterlist.h>
+#include <scsi/libfc.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001
@@ -34,13 +36,6 @@
#define FSF_CONFIG_COMMAND 0x00000003
#define FSF_PORT_COMMAND 0x00000004
-/* FSF control file upload/download operations' subtype and options */
-#define FSF_CFDC_OPERATION_SUBTYPE 0x00020001
-#define FSF_CFDC_OPTION_NORMAL_MODE 0x00000000
-#define FSF_CFDC_OPTION_FORCE 0x00000001
-#define FSF_CFDC_OPTION_FULL_ACCESS 0x00000002
-#define FSF_CFDC_OPTION_RESTRICTED_ACCESS 0x00000004
-
/* FSF protocol states */
#define FSF_PROT_GOOD 0x00000001
#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
@@ -62,7 +57,6 @@
#define FSF_HANDLE_MISMATCH 0x00000005
#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
#define FSF_FCPLUN_NOT_VALID 0x00000009
-#define FSF_ACCESS_DENIED 0x00000010
#define FSF_LUN_SHARING_VIOLATION 0x00000012
#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
@@ -78,11 +72,15 @@
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
#define FSF_SBAL_MISMATCH 0x00000063
+#define FSF_INCONSISTENT_PROT_DATA 0x00000070
+#define FSF_INVALID_PROT_PARM 0x00000071
+#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
+#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
+#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
-/* #define FSF_ERROR 0x000000FF */
#define FSF_PROT_STATUS_QUAL_SIZE 16
#define FSF_STATUS_QUALIFIER_SIZE 16
@@ -124,7 +122,6 @@
#define FSF_STATUS_READ_LINK_DOWN 0x00000005
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
-#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes for link down */
@@ -134,7 +131,6 @@
/* status subtypes for unsolicited status notification lost */
#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
-#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
/* topologie that is detected by the adapter */
#define FSF_TOPO_P2P 0x00000001
@@ -145,24 +141,28 @@
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_CMND 0x00000004
+#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
+#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
+#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
+#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
+
+/* data protection control flags */
+#define FSF_APP_TAG_CHECK_ENABLE 0x10
/* fc service class */
#define FSF_CLASS_3 0x00000003
-/* SBAL chaining */
-#define FSF_MAX_SBALS_PER_REQ 36
-
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
/* channel features */
-#define FSF_FEATURE_CFDC 0x00000002
-#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
+#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -170,20 +170,6 @@
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
-/* open LUN access flags*/
-#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
-#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
-
-/* FSF interface for CFDC */
-#define ZFCP_CFDC_MAX_SIZE 127 * 1024
-#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE)
-
-struct zfcp_fsf_cfdc {
- struct scatterlist sg[ZFCP_CFDC_PAGES];
- u32 command;
- u32 option;
-};
-
struct fsf_queue_designator {
u8 cssid;
u8 chpid;
@@ -227,7 +213,8 @@ struct fsf_status_read_buffer {
u32 length;
u32 res1;
struct fsf_queue_designator queue_designator;
- u32 d_id;
+ u8 res2;
+ u8 d_id[3];
u32 class;
u64 fcp_lun;
u8 res3[24];
@@ -308,22 +295,7 @@ struct fsf_qtcb_header {
u8 res4[16];
} __attribute__ ((packed));
-struct fsf_nport_serv_param {
- u8 common_serv_param[16];
- u64 wwpn;
- u64 wwnn;
- u8 class1_serv_param[16];
- u8 class2_serv_param[16];
- u8 class3_serv_param[16];
- u8 class4_serv_param[16];
- u8 vendor_version_level[16];
-} __attribute__ ((packed));
-
#define FSF_PLOGI_MIN_LEN 112
-struct fsf_plogi {
- u32 code;
- struct fsf_nport_serv_param serv_param;
-} __attribute__ ((packed));
#define FSF_FCP_CMND_SIZE 288
#define FSF_FCP_RSP_SIZE 128
@@ -331,9 +303,14 @@ struct fsf_plogi {
struct fsf_qtcb_bottom_io {
u32 data_direction;
u32 service_class;
- u8 res1[8];
+ u8 res1;
+ u8 data_prot_flags;
+ u16 app_tag_value;
+ u32 ref_tag_value;
u32 fcp_cmnd_length;
- u8 res2[12];
+ u32 data_block_length;
+ u32 prot_data_length;
+ u8 res2[4];
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
u8 res3[64];
@@ -341,8 +318,8 @@ struct fsf_qtcb_bottom_io {
struct fsf_qtcb_bottom_support {
u32 operation_subtype;
- u8 res1[12];
- u32 d_id;
+ u8 res1[13];
+ u8 d_id[3];
u32 option;
u64 fcp_lun;
u64 res2;
@@ -359,6 +336,8 @@ struct fsf_qtcb_bottom_support {
u8 els[256];
} __attribute__ ((packed));
+#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
+
struct fsf_qtcb_bottom_config {
u32 lic_version;
u32 feature_selection;
@@ -371,18 +350,18 @@ struct fsf_qtcb_bottom_config {
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
- u32 peer_d_id;
- u8 res1[2];
+ u8 res0;
+ u8 peer_d_id[3];
+ u16 status_read_buf_num;
u16 timer_interval;
- u8 res2[8];
- u32 s_id;
- struct fsf_nport_serv_param nport_serv_param;
- u8 reserved_nport_serv_param[16];
+ u8 res2[9];
+ u8 s_id[3];
+ u8 nport_serv_param[128];
u8 res3[8];
u32 adapter_ports;
u32 hardware_version;
u8 serial_number[32];
- struct fsf_nport_serv_param plogi_payload;
+ u8 plogi_payload[112];
struct fsf_statistics_info stat_info;
u8 res4[112];
} __attribute__ ((packed));
@@ -449,4 +428,22 @@ struct zfcp_blk_drv_data {
u64 fabric_lat;
} __attribute__ ((packed));
+/**
+ * struct zfcp_fsf_ct_els - zfcp data for ct or els request
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_fsf_ct_els {
+ struct scatterlist *req;
+ struct scatterlist *resp;
+ void (*handler)(void *);
+ void *handler_data;
+ struct zfcp_port *port;
+ int status;
+};
+
#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e0a215309df..06025cdaa4a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,16 +3,23 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/slab.h>
+#include <linux/module.h>
#include "zfcp_ext.h"
+#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+static bool enable_multibuffer = 1;
+module_param_named(datarouter, enable_multibuffer, bool, 0400);
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
+
static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
{
int pos;
@@ -28,40 +35,21 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
-static struct qdio_buffer_element *
-zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
-{
- return &q->sbal[sbal_idx]->element[sbale_idx];
-}
-
-/**
- * zfcp_qdio_free - free memory used by request- and resposne queue
- * @adapter: pointer to the zfcp_adapter structure
- */
-void zfcp_qdio_free(struct zfcp_adapter *adapter)
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+ unsigned int qdio_err)
{
- struct qdio_buffer **sbal_req, **sbal_resp;
- int p;
+ struct zfcp_adapter *adapter = qdio->adapter;
- if (adapter->ccw_device)
- qdio_free(adapter->ccw_device);
-
- sbal_req = adapter->req_q.sbal;
- sbal_resp = adapter->resp_q.sbal;
-
- for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
- free_page((unsigned long) sbal_req[p]);
- free_page((unsigned long) sbal_resp[p]);
- }
-}
-
-static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
-{
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
+ if (qdio_err & QDIO_ERROR_SLSB_STATE) {
+ zfcp_qdio_siosl(adapter);
+ zfcp_erp_adapter_shutdown(adapter, 0, id);
+ return;
+ }
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
+ ZFCP_STATUS_COMMON_ERP_FAILED, id);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -75,101 +63,70 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
/* this needs to be called prior to updating the queue fill level */
-static void zfcp_qdio_account(struct zfcp_adapter *adapter)
+static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
- ktime_t now;
- s64 span;
- int free, used;
-
- spin_lock(&adapter->qdio_stat_lock);
- now = ktime_get();
- span = ktime_us_delta(now, adapter->req_q_time);
- free = max(0, atomic_read(&adapter->req_q.count));
- used = QDIO_MAX_BUFFERS_PER_Q - free;
- adapter->req_q_util += used * span;
- adapter->req_q_time = now;
- spin_unlock(&adapter->qdio_stat_lock);
+ unsigned long long now, span;
+ int used;
+
+ now = get_tod_clock_monotonic();
+ span = (now - qdio->req_q_time) >> 12;
+ used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
+ qdio->req_q_util += used * span;
+ qdio->req_q_time = now;
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
- int queue_no, int first, int count,
+ int queue_no, int idx, int count,
unsigned long parm)
{
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
- struct zfcp_qdio_queue *queue = &adapter->req_q;
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
if (unlikely(qdio_err)) {
- zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
- zfcp_qdio_handler_error(adapter, "qdireq1");
+ zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
return;
}
/* cleanup all SBALs being program-owned now */
- zfcp_qdio_zero_sbals(queue->sbal, first, count);
-
- zfcp_qdio_account(adapter);
- atomic_add(count, &queue->count);
- wake_up(&adapter->request_wq);
-}
-
-static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
- unsigned long req_id, int sbal_idx)
-{
- struct zfcp_fsf_req *fsf_req;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- fsf_req = zfcp_reqlist_find(adapter, req_id);
-
- if (!fsf_req)
- /*
- * Unknown request means that we have potentially memory
- * corruption and must stop the machine immediatly.
- */
- panic("error: unknown request id (%lx) on adapter %s.\n",
- req_id, dev_name(&adapter->ccw_device->dev));
-
- zfcp_reqlist_remove(adapter, fsf_req);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
- fsf_req->sbal_response = sbal_idx;
- fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
- zfcp_fsf_req_complete(fsf_req);
-}
-
-static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
-{
- struct zfcp_qdio_queue *queue = &adapter->resp_q;
- struct ccw_device *cdev = adapter->ccw_device;
- u8 count, start = queue->first;
- unsigned int retval;
-
- count = atomic_read(&queue->count) + processed;
+ zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
- retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
-
- if (unlikely(retval)) {
- atomic_set(&queue->count, count);
- /* FIXME: Recover this with an adapter reopen? */
- } else {
- queue->first += count;
- queue->first %= QDIO_MAX_BUFFERS_PER_Q;
- atomic_set(&queue->count, 0);
- }
+ spin_lock_irq(&qdio->stat_lock);
+ zfcp_qdio_account(qdio);
+ spin_unlock_irq(&qdio->stat_lock);
+ atomic_add(count, &qdio->req_q_free);
+ wake_up(&qdio->req_q_wq);
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
- int queue_no, int first, int count,
+ int queue_no, int idx, int count,
unsigned long parm)
{
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
- struct zfcp_qdio_queue *queue = &adapter->resp_q;
- struct qdio_buffer_element *sbale;
- int sbal_idx, sbale_idx, sbal_no;
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ int sbal_no, sbal_idx;
if (unlikely(qdio_err)) {
- zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
- zfcp_qdio_handler_error(adapter, "qdires1");
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
+ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+ struct qdio_buffer_element *sbale;
+ u64 req_id;
+ u8 scount;
+
+ memset(pl, 0,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ sbale = qdio->res_q[idx]->element;
+ req_id = (u64) sbale->addr;
+ scount = min(sbale->scount + 1,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+ /* incl. signaling SBAL */
+
+ for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+ sbal_idx = (idx + sbal_no) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ pl[sbal_no] = qdio->res_q[sbal_idx];
+ }
+ zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
+ }
+ zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
@@ -178,318 +135,377 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
* returned by QDIO layer
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
- sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
-
+ sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
- for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
- sbale_idx++) {
- sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
- zfcp_qdio_reqid_check(adapter,
- (unsigned long) sbale->addr,
- sbal_idx);
- if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
- break;
- };
-
- if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
- dev_warn(&adapter->ccw_device->dev,
- "A QDIO protocol error occurred, "
- "operations continue\n");
+ zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
- * put range of SBALs back to response queue
- * (including SBALs which have already been free before)
+ * put SBALs back to response queue
*/
- zfcp_qdio_resp_put_back(adapter, count);
-}
-
-/**
- * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
-{
- return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
-}
-
-/**
- * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
-{
- return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
- req->sbale_curr);
-}
-
-static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
-{
- int count = atomic_read(&fsf_req->adapter->req_q.count);
- count = min(count, max_sbals);
- fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
- % QDIO_MAX_BUFFERS_PER_Q;
+ if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
- sbale = zfcp_qdio_sbale_curr(fsf_req);
- sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
- if (fsf_req->sbal_last == fsf_req->sbal_limit)
+ if (q_req->sbal_last == q_req->sbal_limit)
return NULL;
/* set chaining flag in first SBALE of current SBAL */
- sbale = zfcp_qdio_sbale_req(fsf_req);
- sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
- fsf_req->sbal_last++;
- fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+ q_req->sbal_last++;
+ q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
/* keep this requests number of SBALs up-to-date */
- fsf_req->sbal_number++;
+ q_req->sbal_number++;
+ BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
/* start at first SBALE of new SBAL */
- fsf_req->sbale_curr = 0;
+ q_req->sbale_curr = 0;
/* set storage-block type for new SBAL */
- sbale = zfcp_qdio_sbale_curr(fsf_req);
- sbale->flags |= sbtype;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->sflags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
-{
- if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
- return zfcp_qdio_sbal_chain(fsf_req, sbtype);
- fsf_req->sbale_curr++;
- return zfcp_qdio_sbale_curr(fsf_req);
-}
-
-static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
- int first = fsf_req->sbal_first;
- int last = fsf_req->sbal_last;
- int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
- QDIO_MAX_BUFFERS_PER_Q + 1;
- zfcp_qdio_zero_sbals(sbal, first, count);
+ if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
+ return zfcp_qdio_sbal_chain(qdio, q_req);
+ q_req->sbale_curr++;
+ return zfcp_qdio_sbale_curr(qdio, q_req);
}
-static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
- unsigned int sbtype, void *start_addr,
- unsigned int total_length)
+/**
+ * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
+ * @sg: scatter-gather list
+ * @max_sbals: upper bound for number of SBALs to be used
+ * Returns: zero or -EINVAL on error
+ */
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
- unsigned long remaining, length;
- void *addr;
- /* split segment up */
- for (addr = start_addr, remaining = total_length; remaining > 0;
- addr += length, remaining -= length) {
- sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
+ /* set storage-block type for this request */
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->sflags |= q_req->sbtype;
+
+ for (; sg; sg = sg_next(sg)) {
+ sbale = zfcp_qdio_sbale_next(qdio, q_req);
if (!sbale) {
- atomic_inc(&fsf_req->adapter->qdio_outb_full);
- zfcp_qdio_undo_sbals(fsf_req);
+ atomic_inc(&qdio->req_q_full);
+ zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+ q_req->sbal_number);
return -EINVAL;
}
-
- /* new piece must not exceed next page boundary */
- length = min(remaining,
- (PAGE_SIZE - ((unsigned long)addr &
- (PAGE_SIZE - 1))));
- sbale->addr = addr;
- sbale->length = length;
+ sbale->addr = sg_virt(sg);
+ sbale->length = sg->length;
}
return 0;
}
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+ if (atomic_read(&qdio->req_q_free) ||
+ !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return 1;
+ return 0;
+}
+
/**
- * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
- * @sg: scatter-gather list
- * @max_sbals: upper bound for number of SBALs to be used
- * Returns: number of bytes, or error (negativ)
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
*/
-int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
- struct scatterlist *sg, int max_sbals)
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
- struct qdio_buffer_element *sbale;
- int retval, bytes = 0;
+ long ret;
- /* figure out last allowed SBAL */
- zfcp_qdio_sbal_limit(fsf_req, max_sbals);
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
- /* set storage-block type for this request */
- sbale = zfcp_qdio_sbale_req(fsf_req);
- sbale->flags |= sbtype;
+ if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return -EIO;
- for (; sg; sg = sg_next(sg)) {
- retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
- sg->length);
- if (retval < 0)
- return retval;
- bytes += sg->length;
- }
+ if (ret > 0)
+ return 0;
- /* assume that no other SBALEs are to follow in the same SBAL */
- sbale = zfcp_qdio_sbale_curr(fsf_req);
- sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+ if (!ret) {
+ atomic_inc(&qdio->req_q_full);
+ /* assume hanging outbound queue, try queue recovery */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
+ }
- return bytes;
+ return -EIO;
}
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
-int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- struct zfcp_adapter *adapter = fsf_req->adapter;
- struct zfcp_qdio_queue *req_q = &adapter->req_q;
- int first = fsf_req->sbal_first;
- int count = fsf_req->sbal_number;
int retval;
- unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+ u8 sbal_number = q_req->sbal_number;
- zfcp_qdio_account(adapter);
+ spin_lock(&qdio->stat_lock);
+ zfcp_qdio_account(qdio);
+ spin_unlock(&qdio->stat_lock);
+
+ retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
+ q_req->sbal_first, sbal_number);
- retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count);
if (unlikely(retval)) {
- zfcp_qdio_zero_sbals(req_q->sbal, first, count);
+ zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+ sbal_number);
return retval;
}
/* account for transferred buffers */
- atomic_sub(count, &req_q->count);
- req_q->first += count;
- req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
+ atomic_sub(sbal_number, &qdio->req_q_free);
+ qdio->req_q_idx += sbal_number;
+ qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
+
return 0;
}
+
+static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
+ struct zfcp_qdio *qdio)
+{
+ memset(id, 0, sizeof(*id));
+ id->cdev = qdio->adapter->ccw_device;
+ id->q_format = QDIO_ZFCP_QFMT;
+ memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
+ ASCEBC(id->adapter_name, 8);
+ id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+ if (enable_multibuffer)
+ id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+ id->no_input_qs = 1;
+ id->no_output_qs = 1;
+ id->input_handler = zfcp_qdio_int_resp;
+ id->output_handler = zfcp_qdio_int_req;
+ id->int_parm = (unsigned long) qdio;
+ id->input_sbal_addr_array = (void **) (qdio->res_q);
+ id->output_sbal_addr_array = (void **) (qdio->req_q);
+ id->scan_threshold =
+ QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
+}
+
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @adapter: pointer to struct zfcp_adapter
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
-int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
+static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
- struct qdio_initialize *init_data;
+ struct qdio_initialize init_data;
- if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
- zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
+ if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
+ zfcp_qdio_buffers_enqueue(qdio->res_q))
return -ENOMEM;
- init_data = &adapter->qdio_init_data;
-
- init_data->cdev = adapter->ccw_device;
- init_data->q_format = QDIO_ZFCP_QFMT;
- memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
- ASCEBC(init_data->adapter_name, 8);
- init_data->qib_param_field_format = 0;
- init_data->qib_param_field = NULL;
- init_data->input_slib_elements = NULL;
- init_data->output_slib_elements = NULL;
- init_data->no_input_qs = 1;
- init_data->no_output_qs = 1;
- init_data->input_handler = zfcp_qdio_int_resp;
- init_data->output_handler = zfcp_qdio_int_req;
- init_data->int_parm = (unsigned long) adapter;
- init_data->flags = QDIO_INBOUND_0COPY_SBALS |
- QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
- init_data->input_sbal_addr_array =
- (void **) (adapter->resp_q.sbal);
- init_data->output_sbal_addr_array =
- (void **) (adapter->req_q.sbal);
-
- return qdio_allocate(init_data);
+ zfcp_qdio_setup_init_data(&init_data, qdio);
+ init_waitqueue_head(&qdio->req_q_wq);
+
+ return qdio_allocate(&init_data);
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
+ * @qdio: pointer to structure zfcp_qdio
*/
-void zfcp_qdio_close(struct zfcp_adapter *adapter)
+void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
- struct zfcp_qdio_queue *req_q;
- int first, count;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ int idx, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
- req_q = &adapter->req_q;
- spin_lock_bh(&adapter->req_q_lock);
+ spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
- spin_unlock_bh(&adapter->req_q_lock);
+ spin_unlock_irq(&qdio->req_q_lock);
+
+ wake_up(&qdio->req_q_wq);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
- count = atomic_read(&req_q->count);
+ count = atomic_read(&qdio->req_q_free);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
- first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
+ idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
- zfcp_qdio_zero_sbals(req_q->sbal, first, count);
+ zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
}
- req_q->first = 0;
- atomic_set(&req_q->count, 0);
- adapter->resp_q.first = 0;
- atomic_set(&adapter->resp_q.count, 0);
+ qdio->req_q_idx = 0;
+ atomic_set(&qdio->req_q_free, 0);
}
/**
* zfcp_qdio_open - prepare and initialize response queue
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, otherwise -EIO
*/
-int zfcp_qdio_open(struct zfcp_adapter *adapter)
+int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer_element *sbale;
+ struct qdio_initialize init_data;
+ struct zfcp_adapter *adapter = qdio->adapter;
+ struct ccw_device *cdev = adapter->ccw_device;
+ struct qdio_ssqd_desc ssqd;
int cc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
- if (qdio_establish(&adapter->qdio_init_data))
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ &qdio->adapter->status);
+
+ zfcp_qdio_setup_init_data(&init_data, qdio);
+
+ if (qdio_establish(&init_data))
goto failed_establish;
- if (qdio_activate(adapter->ccw_device))
+ if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+ goto failed_qdio;
+
+ if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+ &qdio->adapter->status);
+
+ if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
+ } else {
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
+ }
+
+ qdio->max_sbale_per_req =
+ ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
+ - 2;
+ if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
- sbale = &(adapter->resp_q.sbal[cc]->element[0]);
+ sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
- sbale->flags = SBAL_FLAGS_LAST_ENTRY;
+ sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
+ sbale->sflags = 0;
sbale->addr = NULL;
}
- if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
- QDIO_MAX_BUFFERS_PER_Q))
+ if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
- /* set index of first avalable SBALS / number of available SBALS */
- adapter->req_q.first = 0;
- atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
+ /* set index of first available SBALS / number of available SBALS */
+ qdio->req_q_idx = 0;
+ atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+
+ if (adapter->scsi_host) {
+ adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
+ adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
+ }
return 0;
failed_qdio:
- qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
- dev_err(&adapter->ccw_device->dev,
+ dev_err(&cdev->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
+
+void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
+{
+ int p;
+
+ if (!qdio)
+ return;
+
+ if (qdio->adapter->ccw_device)
+ qdio_free(qdio->adapter->ccw_device);
+
+ for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
+ free_page((unsigned long) qdio->req_q[p]);
+ free_page((unsigned long) qdio->res_q[p]);
+ }
+
+ kfree(qdio);
+}
+
+int zfcp_qdio_setup(struct zfcp_adapter *adapter)
+{
+ struct zfcp_qdio *qdio;
+
+ qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
+ if (!qdio)
+ return -ENOMEM;
+
+ qdio->adapter = adapter;
+
+ if (zfcp_qdio_allocate(qdio)) {
+ zfcp_qdio_destroy(qdio);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&qdio->req_q_lock);
+ spin_lock_init(&qdio->stat_lock);
+
+ adapter->qdio = qdio;
+ return 0;
+}
+
+/**
+ * zfcp_qdio_siosl - Trigger logging in FCP channel
+ * @adapter: The zfcp_adapter where to trigger logging
+ *
+ * Call the cio siosl function to trigger hardware logging. This
+ * wrapper function sets a flag to ensure hardware logging is only
+ * triggered once before going through qdio shutdown.
+ *
+ * The triggers are always run from qdio tasklet context, so no
+ * additional synchronization is necessary.
+ */
+void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
+{
+ int rc;
+
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
+ return;
+
+ rc = ccw_device_siosl(adapter->ccw_device);
+ if (!rc)
+ atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ &adapter->status);
+}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 00000000000..497cd379b0d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,271 @@
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corp. 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
+
+/* Max SBALS for chaining */
+#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @res_q: response queue
+ * @req_q: request queue
+ * @req_q_idx: index of next free buffer
+ * @req_q_free: number of free buffers in queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+ struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
+ u8 req_q_idx;
+ atomic_t req_q_free;
+ spinlock_t stat_lock;
+ spinlock_t req_q_lock;
+ unsigned long long req_q_time;
+ u64 req_q_util;
+ atomic_t req_q_full;
+ wait_queue_head_t req_q_wq;
+ struct zfcp_adapter *adapter;
+ u16 max_sbale_per_sbal;
+ u16 max_sbale_per_req;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @sbal_response: sbal used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ */
+struct zfcp_qdio_req {
+ u8 sbtype;
+ u8 sbal_number;
+ u8 sbal_first;
+ u8 sbal_last;
+ u8 sbal_limit;
+ u8 sbale_curr;
+ u8 sbal_response;
+ u16 qdio_outb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return &qdio->req_q[q_req->sbal_last]->element[0];
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
+}
+
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ unsigned long req_id, u8 sbtype, void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+ int count = min(atomic_read(&qdio->req_q_free),
+ ZFCP_QDIO_MAX_SBALS_PER_REQ);
+
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
+ q_req->sbal_number = 1;
+ q_req->sbtype = sbtype;
+ q_req->sbale_curr = 1;
+ q_req->sbal_limit = (q_req->sbal_first + count - 1)
+ % QDIO_MAX_BUFFERS_PER_Q;
+
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->addr = (void *) req_id;
+ sbale->eflags = 0;
+ sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
+
+ if (unlikely(!data))
+ return;
+ sbale++;
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
+ q_req->sbale_curr++;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ * 0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+ return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
+}
+
+/**
+ * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @max_sbals: maximum number of SBALs allowed
+ */
+static inline
+void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req, int max_sbals)
+{
+ int count = min(atomic_read(&qdio->req_q_free), max_sbals);
+
+ q_req->sbal_limit = (q_req->sbal_first + count - 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+}
+
+/**
+ * zfcp_qdio_set_data_div - set data division count
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @count: The data division count
+ */
+static inline
+void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req, u32 count)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = qdio->req_q[q_req->sbal_first]->element;
+ sbale->length = count;
+}
+
+/**
+ * zfcp_qdio_sbale_count - count sbale used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
+{
+ unsigned int count = 0;
+
+ for (; sg; sg = sg_next(sg))
+ count++;
+
+ return count;
+}
+
+/**
+ * zfcp_qdio_real_bytes - count bytes used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
+{
+ unsigned int real_bytes = 0;
+
+ for (; sg; sg = sg_next(sg))
+ real_bytes += sg->length;
+
+ return real_bytes;
+}
+
+/**
+ * zfcp_qdio_set_scount - set SBAL count value
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = qdio->req_q[q_req->sbal_first]->element;
+ sbale->scount = q_req->sbal_number - 1;
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 00000000000..7c2c6194dfc
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corp. 2009
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+ spinlock_t lock;
+ struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+ return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+ unsigned int i;
+ struct zfcp_reqlist *rl;
+
+ rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+ if (!rl)
+ return NULL;
+
+ spin_lock_init(&rl->lock);
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ INIT_LIST_HEAD(&rl->buckets[i]);
+
+ return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ if (!list_empty(&rl->buckets[i]))
+ return 0;
+ return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+ /* sanity check */
+ BUG_ON(!zfcp_reqlist_isempty(rl));
+
+ kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ struct zfcp_fsf_req *req;
+ unsigned int i;
+
+ i = zfcp_reqlist_hash(req_id);
+ list_for_each_entry(req, &rl->buckets[i], list)
+ if (req->req_id == req_id)
+ return req;
+ return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ if (req)
+ list_del(&req->list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+ struct zfcp_fsf_req *req)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ i = zfcp_reqlist_hash(req->req_id);
+
+ spin_lock_irqsave(&rl->lock, flags);
+ list_add_tail(&req->list, &rl->buckets[i]);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+ struct list_head *list)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_splice_init(&rl->buckets[i], list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e8fbeaeb5fb..7b353647cb9 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,38 +3,70 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_eh.h>
+#include <linux/atomic.h>
#include "zfcp_ext.h"
-#include <asm/atomic.h>
+#include "zfcp_dbf.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
-/* Find start of Sense Information in FCP response unit*/
-char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
-{
- char *fcp_sns_info_ptr;
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
+static bool enable_dif;
+module_param_named(dif, enable_dif, bool, 0400);
+MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
- fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
- if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
- fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
+static bool allow_lun_scan = 1;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
- return fcp_sns_info_ptr;
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
+ int reason)
+{
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ scsi_track_queue_full(sdev, depth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return sdev->queue_depth;
}
-static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
- struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
- unit->device = NULL;
- zfcp_unit_put(unit);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
+ zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+ put_device(&zfcp_sdev->port->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
- scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
+ scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
return 0;
@@ -43,58 +75,51 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
- if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
- zfcp_scsi_dbf_event_result("fail", 4,
- (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
- scpnt, NULL);
- /* return directly */
+ zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
}
-static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
- void (*done) (struct scsi_cmnd *))
+static
+int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit;
- struct zfcp_adapter *adapter;
- int status, scsi_result, ret;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
+ int status, scsi_result, ret;
/* reset the status for this request */
scpnt->result = 0;
scpnt->host_scribble = NULL;
- scpnt->scsi_done = done;
-
- /*
- * figure out adapter and target device
- * (stored there by zfcp_scsi_slave_alloc)
- */
- adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
- unit = scpnt->device->hostdata;
-
- BUG_ON(!adapter || (adapter != unit->port->adapter));
- BUG_ON(!scpnt->scsi_done);
-
- if (unlikely(!unit)) {
- zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
- return 0;
- }
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
- zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL);
+ zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
- status = atomic_read(&unit->status);
- if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
- !(status & ZFCP_STATUS_COMMON_RUNNING))) {
+ status = atomic_read(&zfcp_sdev->status);
+ if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
+ !(atomic_read(&zfcp_sdev->port->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED)) {
+ /* only LUN access denied, but port is good
+ * not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
- return 0;;
+ return 0;
+ }
+
+ if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+ /* This could be either
+ * open LUN pending: this is temporary, will result in
+ * open LUN or ERP_FAILED, so retry command
+ * call to rport_delete pending: mimic retry from
+ * fc_remote_port_chkready until rport is BLOCKED
+ */
+ zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
+ return 0;
}
- ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
+ ret = zfcp_fsf_fcp_cmnd(scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
@@ -103,50 +128,43 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return ret;
}
-static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
- int channel, unsigned int id,
- unsigned int lun)
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *) sdev->host->hostdata[0];
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port;
struct zfcp_unit *unit;
- int scsi_lun;
-
- list_for_each_entry(port, &adapter->port_list_head, list) {
- if (!port->rport || (id != port->rport->scsi_target_id))
- continue;
- list_for_each_entry(unit, &port->unit_list_head, list) {
- scsi_lun = scsilun_to_int(
- (struct scsi_lun *)&unit->fcp_lun);
- if (lun == scsi_lun)
- return unit;
- }
- }
+ int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
- return NULL;
-}
+ port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+ if (!port)
+ return -ENXIO;
-static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
-{
- struct zfcp_adapter *adapter;
- struct zfcp_unit *unit;
- unsigned long flags;
- int retval = -ENXIO;
-
- adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
- if (!adapter)
- goto out;
-
- read_lock_irqsave(&zfcp_data.config_lock, flags);
- unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
- if (unit) {
- sdp->hostdata = unit;
- unit->device = sdp;
- zfcp_unit_get(unit);
- retval = 0;
+ unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
+ if (unit)
+ put_device(&unit->dev);
+
+ if (!unit && !(allow_lun_scan && npiv)) {
+ put_device(&port->dev);
+ return -ENXIO;
}
- read_unlock_irqrestore(&zfcp_data.config_lock, flags);
-out:
- return retval;
+
+ zfcp_sdev->port = port;
+ zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
+ zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
+ zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
+ zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
+ zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
+ zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
+ spin_lock_init(&zfcp_sdev->latencies.lock);
+
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
+ zfcp_erp_wait(port->adapter);
+
+ return 0;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -154,23 +172,20 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct Scsi_Host *scsi_host = scpnt->device->host;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
- struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
- unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
- int retval = SUCCESS;
+ unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
+ int retval = SUCCESS, ret;
int retry = 3;
+ char *dbf_tag;
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
- spin_lock(&adapter->req_list_lock);
- old_req = zfcp_reqlist_find(adapter, old_req_id);
- spin_unlock(&adapter->req_list_lock);
+ old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
- zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
- old_req_id);
+ zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
@@ -179,71 +194,76 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
write_unlock_irqrestore(&adapter->abort_lock, flags);
while (retry--) {
- abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit);
+ abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
if (abrt_req)
break;
zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret) {
+ zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
+ return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
- old_req_id);
+ zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
return SUCCESS;
}
}
- if (!abrt_req)
+ if (!abrt_req) {
+ zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
return FAILED;
+ }
- wait_event(abrt_req->completion_wq,
- abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_for_completion(&abrt_req->completion);
if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
- zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0);
+ dbf_tag = "abrt_ok";
else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
- zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0);
+ dbf_tag = "abrt_nn";
else {
- zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0);
+ dbf_tag = "abrt_fa";
retval = FAILED;
}
+ zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
zfcp_fsf_req_free(abrt_req);
return retval;
}
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
- struct zfcp_unit *unit = scpnt->device->hostdata;
- struct zfcp_adapter *adapter = unit->port->adapter;
- struct zfcp_fsf_req *fsf_req;
- int retval = SUCCESS;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+ struct zfcp_fsf_req *fsf_req = NULL;
+ int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
- fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags);
+ fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
if (fsf_req)
break;
zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
+
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit,
- scpnt);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
return SUCCESS;
}
}
if (!fsf_req)
return FAILED;
- wait_event(fsf_req->completion_wq,
- fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+ wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
- retval = FAILED;
- } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
- zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
} else
- zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
zfcp_fsf_req_free(fsf_req);
return retval;
@@ -251,26 +271,64 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET);
+ return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET);
+ return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit = scpnt->device->hostdata;
- struct zfcp_adapter *adapter = unit->port->adapter;
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+ int ret;
- zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
+ zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
return SUCCESS;
}
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "zfcp",
+ .queuecommand = zfcp_scsi_queuecommand,
+ .eh_abort_handler = zfcp_scsi_eh_abort_handler,
+ .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+ .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+ .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
+ .slave_alloc = zfcp_scsi_slave_alloc,
+ .slave_configure = zfcp_scsi_slave_configure,
+ .slave_destroy = zfcp_scsi_slave_destroy,
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+ .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
+ /* GCD, adjusted later */
+ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
+ /* GCD, adjusted later */
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+ .shost_attrs = zfcp_sysfs_shost_attrs,
+ .sdev_attrs = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
@@ -279,7 +337,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
- adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
@@ -289,12 +347,12 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
}
/* tell the SCSI stack some characteristics of this adapter */
- adapter->scsi_host->max_id = 1;
- adapter->scsi_host->max_lun = 1;
+ adapter->scsi_host->max_id = 511;
+ adapter->scsi_host->max_lun = 0xFFFFFFFF;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
- adapter->scsi_host->max_cmd_len = 255;
- adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+ adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
+ adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -306,7 +364,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
return 0;
}
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
@@ -315,18 +377,15 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
if (!shost)
return;
- read_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->rport)
- port->rport = NULL;
+ read_lock_irq(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ port->rport = NULL;
+ read_unlock_irq(&adapter->port_list_lock);
- read_unlock_irq(&zfcp_data.config_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
-
- return;
}
static struct fc_host_statistics*
@@ -338,7 +397,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return NULL;
- adapter->fc_stats = fc_stats; /* freed in adater_dequeue */
+ adapter->fc_stats = fc_stats; /* freed in adapter_release */
}
memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
return adapter->fc_stats;
@@ -418,7 +477,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
if (!data)
return NULL;
- ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
+ ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) {
kfree(data);
return NULL;
@@ -447,14 +506,14 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
if (!data)
return;
- ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
+ ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
kfree(adapter->stats_reset_data);
adapter->stats_reset_data = data; /* finally freed in
- adapter_dequeue */
+ adapter_release */
}
}
@@ -481,41 +540,27 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
}
/**
- * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
- * @rport: The rport that is about to be deleted.
- */
-static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
-{
- struct zfcp_port *port;
-
- write_lock_irq(&zfcp_data.config_lock);
- port = rport->dd_data;
- if (port)
- port->rport = NULL;
- write_unlock_irq(&zfcp_data.config_lock);
-}
-
-/**
* zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
- * port. Using a reopen for avoids a conflict with a shutdown
- * overwriting a reopen.
+ * port. Using a reopen avoids a conflict with a shutdown
+ * overwriting a reopen. The "forced" ensures that a disappeared port
+ * is not opened again as valid due to the cached plogi data in
+ * non-NPIV mode.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port;
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *)shost->hostdata[0];
- write_lock_irq(&zfcp_data.config_lock);
- port = rport->dd_data;
- if (port)
- zfcp_port_get(port);
- write_unlock_irq(&zfcp_data.config_lock);
+ port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port) {
- zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
- zfcp_port_put(port);
+ zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
+ put_device(&port->dev);
}
}
@@ -524,6 +569,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
struct fc_rport_identifiers ids;
struct fc_rport *rport;
+ if (port->rport)
+ return;
+
ids.node_name = port->wwnn;
ids.port_name = port->wwpn;
ids.port_id = port->d_id;
@@ -537,44 +585,54 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
return;
}
- rport->dd_data = port;
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
+ port->starget_id = rport->scsi_target_id;
+
+ zfcp_unit_queue_scsi_scan(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
{
struct fc_rport *rport = port->rport;
- if (rport)
+ if (rport) {
fc_remote_port_delete(rport);
+ port->rport = NULL;
+ }
}
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->dev);
port->rport_task = RPORT_ADD;
- if (!queue_work(zfcp_data.work_queue, &port->rport_work))
- zfcp_port_put(port);
+ if (!queue_work(port->adapter->work_queue, &port->rport_work))
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
- zfcp_port_get(port);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
- if (!queue_work(zfcp_data.work_queue, &port->rport_work))
- zfcp_port_put(port);
+ if (port->rport && queue_work(port->adapter->work_queue,
+ &port->rport_work))
+ return;
+
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
{
+ unsigned long flags;
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list)
zfcp_scsi_schedule_rport_block(port);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_scsi_rport_work(struct work_struct *work)
@@ -592,25 +650,53 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
}
- zfcp_port_put(port);
+ put_device(&port->dev);
}
-
-void zfcp_scsi_scan(struct work_struct *work)
+/**
+ * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
+ * @adapter: The adapter where to configure DIF/DIX for the SCSI host
+ */
+void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
{
- struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
- scsi_work);
- struct fc_rport *rport;
-
- flush_work(&unit->port->rport_work);
- rport = unit->port->rport;
+ unsigned int mask = 0;
+ unsigned int data_div;
+ struct Scsi_Host *shost = adapter->scsi_host;
+
+ data_div = atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
+
+ if (enable_dif &&
+ adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
+ mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+ if (enable_dif && data_div &&
+ adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
+ mask |= SHOST_DIX_TYPE1_PROTECTION;
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+ shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
+ shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
+ shost->max_sectors = shost->sg_tablesize * 8;
+ }
- if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
- scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
- scsilun_to_int((struct scsi_lun *)
- &unit->fcp_lun), 0);
+ scsi_host_set_prot(shost, mask);
+}
- zfcp_unit_put(unit);
+/**
+ * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
+ * @scmd: The SCSI command to report the error for
+ * @ascq: The ASCQ to put in the sense buffer
+ *
+ * See the error handling in sd_done for the sense codes used here.
+ * Set DID_SOFT_ERROR to retry the request, if possible.
+ */
+void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
+{
+ scsi_build_sense_buffer(1, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x10, ascq);
+ set_driver_byte(scmd, DRIVER_SENSE);
+ scmd->result |= SAM_STAT_CHECK_CONDITION;
+ set_host_byte(scmd, DID_SOFT_ERROR);
}
struct fc_function_template zfcp_transport_functions = {
@@ -624,6 +710,7 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_port_name = 1,
.show_host_permanent_port_name = 1,
.show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
@@ -631,37 +718,16 @@ struct fc_function_template zfcp_transport_functions = {
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
- .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
+ .show_host_active_fc4s = 1,
+ .bsg_request = zfcp_fc_exec_bsg_job,
+ .bsg_timeout = zfcp_fc_timeout_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
- .disable_target_scan = 1,
-};
-
-struct zfcp_data zfcp_data = {
- .scsi_host_template = {
- .name = "zfcp",
- .module = THIS_MODULE,
- .proc_name = "zfcp",
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
- .queuecommand = zfcp_scsi_queuecommand,
- .eh_abort_handler = zfcp_scsi_eh_abort_handler,
- .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
- .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .can_queue = 4096,
- .this_id = -1,
- .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
- .cmd_per_lun = 1,
- .use_clustering = 1,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
- .shost_attrs = zfcp_sysfs_shost_attrs,
- },
+ .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3e51e64d110..672b57219e1 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,12 +3,13 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/slab.h>
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -19,30 +20,53 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
- atomic_read(&adapter->status));
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
- (unsigned long long) adapter->peer_wwnn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
- (unsigned long long) adapter->peer_wwpn);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
- adapter->peer_d_id);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
- adapter->hydra_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
- adapter->fsf_lic_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
- adapter->hardware_version);
-ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
- (atomic_read(&adapter->status) &
- ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ return sprintf(buf, _format, _value); \
+} \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
+ zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
+#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ struct ccw_device *cdev = to_ccwdev(dev); \
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
+ int i; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ i = sprintf(buf, _format, _value); \
+ zfcp_ccw_adapter_put(adapter); \
+ return i; \
+} \
+static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
+ zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
+ (unsigned long long) adapter->peer_wwnn);
+ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
+ (unsigned long long) adapter->peer_wwpn);
+ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
+ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
@@ -54,126 +78,205 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
- atomic_read(&unit->status));
+ zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
- (atomic_read(&unit->status) &
+ (zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
- (atomic_read(&unit->status) &
+ (zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
-ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
- (atomic_read(&unit->status) &
- ZFCP_STATUS_UNIT_SHARED) != 0);
-ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
- (atomic_read(&unit->status) &
- ZFCP_STATUS_UNIT_READONLY) != 0);
-
-#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \
-static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
- \
- if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
- return sprintf(buf, "1\n"); \
- else \
- return sprintf(buf, "0\n"); \
-} \
-static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
- struct device_attribute *attr,\
- const char *buf, size_t count)\
-{ \
- struct _feat_def *_feat = dev_get_drvdata(dev); \
- unsigned long val; \
- int retval = 0; \
- \
- down(&zfcp_data.config_sema); \
- if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
- retval = -EBUSY; \
- goto out; \
- } \
- \
- if (strict_strtoul(buf, 0, &val) || val != 0) { \
- retval = -EINVAL; \
- goto out; \
- } \
- \
- zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
- ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
- zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
- _reopen_id, NULL); \
- zfcp_erp_wait(_adapter); \
-out: \
- up(&zfcp_data.config_sema); \
- return retval ? retval : (ssize_t) count; \
-} \
-static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
- zfcp_sysfs_##_feat##_failed_show, \
- zfcp_sysfs_##_feat##_failed_store);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
+
+static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ return sprintf(buf, "1\n");
+
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || val != 0)
+ return -EINVAL;
+
+ zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
+ zfcp_erp_wait(port->adapter);
-ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
-ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
-ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
+ return count;
+}
+static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
+ zfcp_sysfs_port_failed_show,
+ zfcp_sysfs_port_failed_store);
+
+static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+ struct scsi_device *sdev;
+ unsigned int status, failed = 1;
+
+ sdev = zfcp_unit_sdev(unit);
+ if (sdev) {
+ status = atomic_read(&sdev_to_zfcp(sdev)->status);
+ failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+ scsi_device_put(sdev);
+ }
+
+ return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+ unsigned long val;
+ struct scsi_device *sdev;
+
+ if (kstrtoul(buf, 0, &val) || val != 0)
+ return -EINVAL;
+
+ sdev = zfcp_unit_sdev(unit);
+ if (sdev) {
+ zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "syufai2");
+ zfcp_erp_wait(unit->port->adapter);
+ } else
+ zfcp_unit_scsi_scan(unit);
+
+ return count;
+}
+static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
+ zfcp_sysfs_unit_failed_show,
+ zfcp_sysfs_unit_failed_store);
+
+static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int i;
+
+ if (!adapter)
+ return -ENODEV;
+
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ i = sprintf(buf, "1\n");
+ else
+ i = sprintf(buf, "0\n");
+
+ zfcp_ccw_adapter_put(adapter);
+ return i;
+}
+
+static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ unsigned long val;
+ int retval = 0;
+
+ if (!adapter)
+ return -ENODEV;
+
+ if (kstrtoul(buf, 0, &val) || val != 0) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "syafai2");
+ zfcp_erp_wait(adapter);
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
+ zfcp_sysfs_adapter_failed_show,
+ zfcp_sysfs_adapter_failed_store);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(dev);
- int ret;
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return -ENODEV;
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
- return -EBUSY;
+ /* sync the user-space- with the kernel-invocation of scan_work */
+ queue_work(adapter->work_queue, &adapter->scan_work);
+ flush_work(&adapter->scan_work);
+ zfcp_ccw_adapter_put(adapter);
- ret = zfcp_scan_ports(adapter);
- return ret ? ret : (ssize_t) count;
+ return (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_adapter *adapter = dev_get_drvdata(dev);
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
- int retval = 0;
- LIST_HEAD(port_remove_lh);
+ int retval = -EINVAL;
- down(&zfcp_data.config_sema);
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
+ if (!adapter)
+ return -ENODEV;
- if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
- retval = -EINVAL;
+ if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
- }
- write_lock_irq(&zfcp_data.config_lock);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
- if (port && (atomic_read(&port->refcount) == 0)) {
- zfcp_port_get(port);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
- list_move(&port->list, &port_remove_lh);
- } else
- port = NULL;
- write_unlock_irq(&zfcp_data.config_lock);
+ if (!port)
+ goto out;
+ else
+ retval = 0;
- if (!port) {
- retval = -ENXIO;
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0) {
+ retval = -EBUSY;
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
goto out;
}
+ /* port is about to be removed, so no more unit_add */
+ atomic_set(&port->units, -1);
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
- zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
- zfcp_erp_wait(adapter);
- zfcp_port_put(port);
- zfcp_port_dequeue(port);
+ write_lock_irq(&adapter->port_list_lock);
+ list_del(&port->list);
+ write_unlock_irq(&adapter->port_list_lock);
+
+ put_device(&port->dev);
+
+ zfcp_erp_port_shutdown(port, 0, "syprs_1");
+ device_unregister(&port->dev);
out:
- up(&zfcp_data.config_sema);
+ zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,32 +305,18 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = dev_get_drvdata(dev);
- struct zfcp_unit *unit;
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
- int retval = -EINVAL;
-
- down(&zfcp_data.config_sema);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
-
- if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
- goto out;
+ int retval;
- unit = zfcp_unit_enqueue(port, fcp_lun);
- if (IS_ERR(unit))
- goto out;
+ if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ return -EINVAL;
- retval = 0;
+ retval = zfcp_unit_add(port, fcp_lun);
+ if (retval)
+ return retval;
- zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
- zfcp_erp_wait(unit->port->adapter);
- zfcp_unit_put(unit);
-out:
- up(&zfcp_data.config_sema);
- return retval ? retval : (ssize_t) count;
+ return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -235,55 +324,16 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = dev_get_drvdata(dev);
- struct zfcp_unit *unit;
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
- int retval = 0;
- LIST_HEAD(unit_remove_lh);
- down(&zfcp_data.config_sema);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
- retval = -EBUSY;
- goto out;
- }
+ if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ return -EINVAL;
- if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
- retval = -EINVAL;
- goto out;
- }
-
- write_lock_irq(&zfcp_data.config_lock);
- unit = zfcp_get_unit_by_lun(port, fcp_lun);
- if (unit) {
- write_unlock_irq(&zfcp_data.config_lock);
- /* wait for possible timeout during SCSI probe */
- flush_work(&unit->scsi_work);
- write_lock_irq(&zfcp_data.config_lock);
-
- if (atomic_read(&unit->refcount) == 0) {
- zfcp_unit_get(unit);
- atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
- &unit->status);
- list_move(&unit->list, &unit_remove_lh);
- } else {
- unit = NULL;
- }
- }
-
- write_unlock_irq(&zfcp_data.config_lock);
-
- if (!unit) {
- retval = -ENXIO;
- goto out;
- }
+ if (zfcp_unit_remove(port, fcp_lun))
+ return -EINVAL;
- zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
- zfcp_erp_wait(unit->port->adapter);
- zfcp_unit_put(unit);
- zfcp_unit_dequeue(unit);
-out:
- up(&zfcp_data.config_sema);
- return retval ? retval : (ssize_t) count;
+ return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
@@ -296,13 +346,13 @@ static struct attribute *zfcp_port_attrs[] = {
&dev_attr_port_access_denied.attr,
NULL
};
-
-/**
- * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
- */
-struct attribute_group zfcp_sysfs_port_attrs = {
+static struct attribute_group zfcp_port_attr_group = {
.attrs = zfcp_port_attrs,
};
+const struct attribute_group *zfcp_port_attr_groups[] = {
+ &zfcp_port_attr_group,
+ NULL,
+};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
@@ -313,10 +363,13 @@ static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_access_readonly.attr,
NULL
};
-
-struct attribute_group zfcp_sysfs_unit_attrs = {
+static struct attribute_group zfcp_unit_attr_group = {
.attrs = zfcp_unit_attrs,
};
+const struct attribute_group *zfcp_unit_attr_groups[] = {
+ &zfcp_unit_attr_group,
+ NULL,
+};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
@@ -324,9 +377,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
- struct zfcp_unit *unit = sdev->hostdata; \
- struct zfcp_latencies *lat = &unit->latencies; \
- struct zfcp_adapter *adapter = unit->port->adapter; \
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
+ struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
@@ -355,8 +408,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
- struct zfcp_unit *unit = sdev->hostdata; \
- struct zfcp_latencies *lat = &unit->latencies; \
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
+ struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
@@ -384,19 +437,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct zfcp_unit *unit = sdev->hostdata; \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
+ struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
- dev_name(&unit->port->adapter->ccw_device->dev));
+ dev_name(&port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
- (unsigned long long) unit->port->wwpn);
-ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
- (unsigned long long) unit->fcp_lun);
+ (unsigned long long) port->wwpn);
+
+static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+}
+static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
@@ -425,7 +487,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
if (!qtcb_port)
return -ENOMEM;
- retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+ retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
@@ -451,7 +513,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
if (!qtcb_config)
return -ENOMEM;
- retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+ retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
@@ -492,11 +554,16 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
- struct zfcp_adapter *adapter =
- (struct zfcp_adapter *) scsi_host->hostdata[0];
+ struct zfcp_qdio *qdio =
+ ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
+ u64 util;
+
+ spin_lock_bh(&qdio->stat_lock);
+ util = qdio->req_q_util;
+ spin_unlock_bh(&qdio->stat_lock);
- return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
- (unsigned long long)adapter->req_q_util);
+ return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
+ (unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 00000000000..39f5446f721
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,255 @@
+/*
+ * zfcp device driver
+ *
+ * Tracking of manually configured LUNs and helper functions to
+ * register the LUNs with the SCSI midlayer.
+ *
+ * Copyright IBM Corp. 2010
+ */
+
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+
+/**
+ * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
+ * @unit: The zfcp LUN/unit to register
+ *
+ * When the SCSI midlayer is not allowed to automatically scan and
+ * attach SCSI devices, zfcp has to register the single devices with
+ * the SCSI midlayer.
+ */
+void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
+{
+ struct fc_rport *rport = unit->port->rport;
+ unsigned int lun;
+
+ lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+
+ if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
+ scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
+}
+
+static void zfcp_unit_scsi_scan_work(struct work_struct *work)
+{
+ struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
+ scsi_work);
+
+ zfcp_unit_scsi_scan(unit);
+ put_device(&unit->dev);
+}
+
+/**
+ * zfcp_unit_queue_scsi_scan - Register configured units on port
+ * @port: The zfcp_port where to register units
+ *
+ * After opening a port, all units configured on this port have to be
+ * registered with the SCSI midlayer. This function should be called
+ * after calling fc_remote_port_add, so that the fc_rport is already
+ * ONLINE and the call to scsi_scan_target runs the same way as the
+ * call in the FC transport class.
+ */
+void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
+{
+ struct zfcp_unit *unit;
+
+ read_lock_irq(&port->unit_list_lock);
+ list_for_each_entry(unit, &port->unit_list, list) {
+ get_device(&unit->dev);
+ if (scsi_queue_work(port->adapter->scsi_host,
+ &unit->scsi_work) <= 0)
+ put_device(&unit->dev);
+ }
+ read_unlock_irq(&port->unit_list_lock);
+}
+
+static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+ struct zfcp_unit *unit;
+
+ list_for_each_entry(unit, &port->unit_list, list)
+ if (unit->fcp_lun == fcp_lun) {
+ get_device(&unit->dev);
+ return unit;
+ }
+
+ return NULL;
+}
+
+/**
+ * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
+ * @port: zfcp_port where to look for the unit
+ * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
+ *
+ * If zfcp_unit is found, a reference is acquired that has to be
+ * released later.
+ *
+ * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
+ * with the specified FCP LUN.
+ */
+struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+ struct zfcp_unit *unit;
+
+ read_lock_irq(&port->unit_list_lock);
+ unit = _zfcp_unit_find(port, fcp_lun);
+ read_unlock_irq(&port->unit_list_lock);
+ return unit;
+}
+
+/**
+ * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
+ * @dev: pointer to device in zfcp_unit
+ */
+static void zfcp_unit_release(struct device *dev)
+{
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+ atomic_dec(&unit->port->units);
+ kfree(unit);
+}
+
+/**
+ * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * @port: pointer to port where unit is added
+ * @fcp_lun: FCP LUN of unit to be enqueued
+ * Returns: 0 success
+ *
+ * Sets up some unit internal structures and creates sysfs entry.
+ */
+int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+{
+ struct zfcp_unit *unit;
+ int retval = 0;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) == -1) {
+ /* port is already gone */
+ retval = -ENODEV;
+ goto out;
+ }
+
+ unit = zfcp_unit_find(port, fcp_lun);
+ if (unit) {
+ put_device(&unit->dev);
+ retval = -EEXIST;
+ goto out;
+ }
+
+ unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+ if (!unit) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ unit->port = port;
+ unit->fcp_lun = fcp_lun;
+ unit->dev.parent = &port->dev;
+ unit->dev.release = zfcp_unit_release;
+ unit->dev.groups = zfcp_unit_attr_groups;
+ INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
+
+ if (dev_set_name(&unit->dev, "0x%016llx",
+ (unsigned long long) fcp_lun)) {
+ kfree(unit);
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
+ write_lock_irq(&port->unit_list_lock);
+ list_add_tail(&unit->list, &port->unit_list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ zfcp_unit_scsi_scan(unit);
+
+out:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return retval;
+}
+
+/**
+ * zfcp_unit_sdev - Return SCSI device for zfcp_unit
+ * @unit: The zfcp_unit where to get the SCSI device for
+ *
+ * Returns: scsi_device pointer on success, NULL if there is no SCSI
+ * device for this zfcp_unit
+ *
+ * On success, the caller also holds a reference to the SCSI device
+ * that must be released with scsi_device_put.
+ */
+struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
+{
+ struct Scsi_Host *shost;
+ struct zfcp_port *port;
+ unsigned int lun;
+
+ lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+ port = unit->port;
+ shost = port->adapter->scsi_host;
+ return scsi_device_lookup(shost, 0, port->starget_id, lun);
+}
+
+/**
+ * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
+ * @unit: The unit to lookup the SCSI device for
+ *
+ * Returns the zfcp LUN status field of the SCSI device if the SCSI device
+ * for the zfcp_unit exists, 0 otherwise.
+ */
+unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
+{
+ unsigned int status = 0;
+ struct scsi_device *sdev;
+ struct zfcp_scsi_dev *zfcp_sdev;
+
+ sdev = zfcp_unit_sdev(unit);
+ if (sdev) {
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ status = atomic_read(&zfcp_sdev->status);
+ scsi_device_put(sdev);
+ }
+
+ return status;
+}
+
+/**
+ * zfcp_unit_remove - Remove entry from list of configured units
+ * @port: The port where to remove the unit from the configuration
+ * @fcp_lun: The 64 bit LUN of the unit to remove
+ *
+ * Returns: -EINVAL if a unit with the specified LUN does not exist,
+ * 0 on success.
+ */
+int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
+{
+ struct zfcp_unit *unit;
+ struct scsi_device *sdev;
+
+ write_lock_irq(&port->unit_list_lock);
+ unit = _zfcp_unit_find(port, fcp_lun);
+ if (unit)
+ list_del(&unit->list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ if (!unit)
+ return -EINVAL;
+
+ sdev = zfcp_unit_sdev(unit);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+
+ put_device(&unit->dev);
+
+ device_unregister(&unit->dev);
+
+ return 0;
+}