aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/airq.c334
-rw-r--r--drivers/s390/cio/blacklist.c50
-rw-r--r--drivers/s390/cio/ccwgroup.c564
-rw-r--r--drivers/s390/cio/ccwreq.c61
-rw-r--r--drivers/s390/cio/chp.c123
-rw-r--r--drivers/s390/cio/chp.h18
-rw-r--r--drivers/s390/cio/chsc.c713
-rw-r--r--drivers/s390/cio/chsc.h187
-rw-r--r--drivers/s390/cio/chsc_sch.c223
-rw-r--r--drivers/s390/cio/cio.c428
-rw-r--r--drivers/s390/cio/cio.h30
-rw-r--r--drivers/s390/cio/cmf.c16
-rw-r--r--drivers/s390/cio/crw.c32
-rw-r--r--drivers/s390/cio/css.c418
-rw-r--r--drivers/s390/cio/css.h23
-rw-r--r--drivers/s390/cio/device.c585
-rw-r--r--drivers/s390/cio/device.h23
-rw-r--r--drivers/s390/cio/device_fsm.c129
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_ops.c99
-rw-r--r--drivers/s390/cio/device_pgid.c176
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/eadm_sch.c418
-rw-r--r--drivers/s390/cio/eadm_sch.h22
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/idset.c29
-rw-r--r--drivers/s390/cio/idset.h5
-rw-r--r--drivers/s390/cio/io_sch.h142
-rw-r--r--drivers/s390/cio/ioasm.h49
-rw-r--r--drivers/s390/cio/itcw.c64
-rw-r--r--drivers/s390/cio/orb.h91
-rw-r--r--drivers/s390/cio/qdio.h263
-rw-r--r--drivers/s390/cio/qdio_debug.c247
-rw-r--r--drivers/s390/cio/qdio_debug.h53
-rw-r--r--drivers/s390/cio/qdio_main.c963
-rw-r--r--drivers/s390/cio/qdio_perf.c147
-rw-r--r--drivers/s390/cio/qdio_perf.h61
-rw-r--r--drivers/s390/cio/qdio_setup.c201
-rw-r--r--drivers/s390/cio/qdio_thinint.c283
-rw-r--r--drivers/s390/cio/scm.c288
41 files changed, 4931 insertions, 2642 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index d033414f759..8c4a386e97f 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -8,7 +8,9 @@ ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
-qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 65d2e769dfa..00bfbee0af9 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/airq.c
* Support for adapter interruptions
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
@@ -10,142 +9,267 @@
*/
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
#include <linux/slab.h>
-#include <linux/rcupdate.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
+#include "ioasm.h"
-#define NR_AIRQS 32
-#define NR_AIRQS_PER_WORD sizeof(unsigned long)
-#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD)
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
-union indicator_t {
- unsigned long word[NR_AIRQ_WORDS];
- unsigned char byte[NR_AIRQS];
-} __attribute__((packed));
+/**
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ *
+ * Returns 0 on success, or -EINVAL.
+ */
+int register_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
-struct airq_t {
- adapter_int_handler_t handler;
- void *drv_data;
-};
+ if (!airq->handler || airq->isc > MAX_ISC)
+ return -EINVAL;
+ if (!airq->lsi_ptr) {
+ airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+ if (!airq->lsi_ptr)
+ return -ENOMEM;
+ airq->flags |= AIRQ_PTR_ALLOCATED;
+ }
+ if (!airq->lsi_mask)
+ airq->lsi_mask = 0xff;
+ snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ isc_register(airq->isc);
+ spin_lock(&airq_lists_lock);
+ hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+ spin_unlock(&airq_lists_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_adapter_interrupt);
-static union indicator_t indicators[MAX_ISC+1];
-static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
+/**
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ */
+void unregister_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
-static int register_airq(struct airq_t *airq, u8 isc)
+ if (hlist_unhashed(&airq->list))
+ return;
+ snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ spin_lock(&airq_lists_lock);
+ hlist_del_rcu(&airq->list);
+ spin_unlock(&airq_lists_lock);
+ synchronize_rcu();
+ isc_unregister(airq->isc);
+ if (airq->flags & AIRQ_PTR_ALLOCATED) {
+ kfree(airq->lsi_ptr);
+ airq->lsi_ptr = NULL;
+ airq->flags &= ~AIRQ_PTR_ALLOCATED;
+ }
+}
+EXPORT_SYMBOL(unregister_adapter_interrupt);
+
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{
- int i;
+ struct tpi_info *tpi_info;
+ struct airq_struct *airq;
+ struct hlist_head *head;
+
+ __this_cpu_write(s390_idle.nohz_delay, 1);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ head = &airq_lists[tpi_info->isc];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(airq, head, list)
+ if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ airq->handler(airq);
+ rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+ .name = "AIO",
+ .handler = do_airq_interrupt,
+};
- for (i = 0; i < NR_AIRQS; i++)
- if (!cmpxchg(&airqs[isc][i], NULL, airq))
- return i;
- return -ENOMEM;
+void __init init_airq_interrupts(void)
+{
+ irq_set_chip_and_handler(THIN_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
/**
- * s390_register_adapter_interrupt() - register adapter interrupt handler
- * @handler: adapter handler to be registered
- * @drv_data: driver data passed with each call to the handler
- * @isc: isc for which the handler should be called
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
*
- * Returns:
- * Pointer to the indicator to be used on success
- * ERR_PTR() if registration failed
+ * Returns a pointer to an interrupt vector structure
*/
-void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
- void *drv_data, u8 isc)
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
{
- struct airq_t *airq;
- char dbf_txt[16];
- int ret;
-
- if (isc > MAX_ISC)
- return ERR_PTR(-EINVAL);
- airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
- if (!airq) {
- ret = -ENOMEM;
+ struct airq_iv *iv;
+ unsigned long size;
+
+ iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+ if (!iv)
goto out;
+ iv->bits = bits;
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ if (flags & AIRQ_IV_ALLOC) {
+ iv->avail = kmalloc(size, GFP_KERNEL);
+ if (!iv->avail)
+ goto out_free;
+ memset(iv->avail, 0xff, size);
+ iv->end = 0;
+ } else
+ iv->end = bits;
+ if (flags & AIRQ_IV_BITLOCK) {
+ iv->bitlock = kzalloc(size, GFP_KERNEL);
+ if (!iv->bitlock)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_PTR) {
+ size = bits * sizeof(unsigned long);
+ iv->ptr = kzalloc(size, GFP_KERNEL);
+ if (!iv->ptr)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_DATA) {
+ size = bits * sizeof(unsigned int);
+ iv->data = kzalloc(size, GFP_KERNEL);
+ if (!iv->data)
+ goto out_free;
}
- airq->handler = handler;
- airq->drv_data = drv_data;
+ spin_lock_init(&iv->lock);
+ return iv;
- ret = register_airq(airq, isc);
+out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+ kfree(iv->vector);
+ kfree(iv);
out:
- snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
- CIO_TRACE_EVENT(4, dbf_txt);
- if (ret < 0) {
- kfree(airq);
- return ERR_PTR(ret);
- } else
- return &indicators[isc].byte[ret];
+ return NULL;
}
-EXPORT_SYMBOL(s390_register_adapter_interrupt);
+EXPORT_SYMBOL(airq_iv_create);
/**
- * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
- * @ind: indicator for which the handler is to be unregistered
- * @isc: interruption subclass
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
*/
-void s390_unregister_adapter_interrupt(void *ind, u8 isc)
+void airq_iv_release(struct airq_iv *iv)
{
- struct airq_t *airq;
- char dbf_txt[16];
- int i;
-
- i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
- snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
- CIO_TRACE_EVENT(4, dbf_txt);
- indicators[isc].byte[i] = 0;
- airq = xchg(&airqs[isc][i], NULL);
- /*
- * Allow interrupts to complete. This will ensure that the airq handle
- * is no longer referenced by any interrupt handler.
- */
- synchronize_sched();
- kfree(airq);
+ kfree(iv->data);
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->vector);
+ kfree(iv->avail);
+ kfree(iv);
}
-EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
-
-#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
+EXPORT_SYMBOL(airq_iv_release);
-void do_adapter_IO(u8 isc)
+/**
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
+ *
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
+ */
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
- int w;
- int i;
- unsigned long word;
- struct airq_t *airq;
-
- /*
- * Access indicator array in word-sized chunks to minimize storage
- * fetch operations.
- */
- for (w = 0; w < NR_AIRQ_WORDS; w++) {
- word = indicators[isc].word[w];
- i = w * NR_AIRQS_PER_WORD;
- /*
- * Check bytes within word for active indicators.
- */
- while (word) {
- if (word & INDICATOR_MASK) {
- airq = airqs[isc][i];
- /* Make sure gcc reads from airqs only once. */
- barrier();
- if (likely(airq))
- airq->handler(&indicators[isc].byte[i],
- airq->drv_data);
- else
- /*
- * Reset ill-behaved indicator.
- */
- indicators[isc].byte[i] = 0;
- }
- word <<= 8;
- i++;
+ unsigned long bit, i, flags;
+
+ if (!iv->avail || num == 0)
+ return -1UL;
+ spin_lock_irqsave(&iv->lock, flags);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
}
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
+ bit = -1UL;
+ spin_unlock_irqrestore(&iv->lock, flags);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_alloc);
+
+/**
+ * airq_iv_free - free irq bits of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
+ */
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
+{
+ unsigned long i, flags;
+
+ if (!iv->avail || num == 0)
+ return;
+ spin_lock_irqsave(&iv->lock, flags);
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
+ /* Find new end of bit-field */
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
}
+ spin_unlock_irqrestore(&iv->lock, flags);
+}
+EXPORT_SYMBOL(airq_iv_free);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end)
+{
+ unsigned long bit;
+
+ /* Find non-zero bit starting from 'ivs->next'. */
+ bit = find_next_bit_inv(iv->vector, end, start);
+ if (bit >= end)
+ return -1UL;
+ clear_bit_inv(bit, iv->vector);
+ return bit;
}
+EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 7eab9ab9f40..b3f791b2c1f 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,9 +1,7 @@
/*
- * drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
*
- * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 1999, 2013
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -14,14 +12,14 @@
#include <linux/init.h>
#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
-#include <asm/cio.h>
#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
#include "blacklist.h"
#include "cio.h"
@@ -80,17 +78,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
- unsigned int value;
diff = 0;
*val = 0;
- while (isxdigit(**cp) && (diff <= max_digit)) {
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
- if (isdigit(**cp))
- value = **cp - '0';
- else
- value = tolower(**cp) - 'a' + 10;
+ if (value < 0)
+ break;
*val = *val * 16 + value;
(*cp)++;
diff++;
@@ -177,6 +173,29 @@ static int blacklist_parse_parameters(char *str, range_action action,
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
+ } else if (strcmp(parm, "ipldev") == 0) {
+ if (ipl_info.type == IPL_TYPE_CCW) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.ccw.dev_id.ssid;
+ from = ipl_info.data.ccw.dev_id.devno;
+ } else if (ipl_info.type == IPL_TYPE_FCP ||
+ ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.fcp.dev_id.ssid;
+ from = ipl_info.data.fcp.dev_id.devno;
+ } else {
+ continue;
+ }
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ } else if (strcmp(parm, "condev") == 0) {
+ if (console_devno == -1)
+ continue;
+
+ from_cssid = to_cssid = 0;
+ from_ssid = to_ssid = 0;
+ from = to = console_devno;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
@@ -241,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
parm = strsep(&buf, " ");
- if (strcmp("free", parm) == 0)
+ if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- else if (strcmp("add", parm) == 0)
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
- css_schedule_reprobe();
return rc;
}
@@ -338,10 +357,9 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
- buf = vmalloc (user_len + 1); /* maybe better use the stack? */
+ buf = vzalloc(user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
- memset(buf, 0, user_len + 1);
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f774..e443b0d0b23 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
/*
* bus driver for ccwgroup
*
- * Copyright IBM Corp. 2002, 2009
+ * Copyright IBM Corp. 2002, 2012
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -15,10 +15,13 @@
#include <linux/ctype.h>
#include <linux/dcache.h>
+#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
-#define CCW_BUS_ID_SIZE 20
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE 10
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
@@ -27,68 +30,161 @@
* to devices that use multiple subchannels.
*/
-/* a device matches a driver if all its slave devices match the same
- * entry of the driver */
-static int
-ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
+ int i;
+ char str[8];
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(drv);
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ }
+}
- if (gdev->creator_id == gdrv->driver_id)
- return 1;
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+ struct ccw_device *cdev;
+ int i;
- return 0;
+ for (i = 0; i < gdev->count; i++) {
+ cdev = gdev->cdev[i];
+ if (!cdev)
+ continue;
+ spin_lock_irq(cdev->ccwlock);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irq(cdev->ccwlock);
+ gdev->cdev[i] = NULL;
+ put_device(&cdev->dev);
+ }
}
-static int
-ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
+
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
- /* TODO */
- return 0;
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE)
+ goto out;
+ if (gdrv->set_online)
+ ret = gdrv->set_online(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_online);
-static struct bus_type ccwgroup_bus_type;
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+ if (gdrv->set_offline)
+ ret = gdrv->set_offline(gdev);
+ if (ret)
+ goto out;
-static void
-__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+ gdev->state = CCWGROUP_OFFLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_offline);
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int i;
- char str[8];
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ unsigned long value;
+ int ret;
- for (i = 0; i < gdev->count; i++) {
- sprintf(str, "cdev%d", i);
- sysfs_remove_link(&gdev->dev.kobj, str);
- sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
}
-
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ goto out;
+
+ if (value == 1)
+ ret = ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ret = ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+out:
+ device_unlock(dev);
+ return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int online;
+
+ online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
-static void ccwgroup_ungroup_callback(struct device *dev)
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
+ device_unregister(&gdev->dev);
+ __ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
-static ssize_t
-ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct ccwgroup_device *gdev;
- int rc;
-
- gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int rc = 0;
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -97,49 +193,57 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
rc = -EINVAL;
goto out;
}
- /* Note that we cannot unregister the device from one of its
- * attribute methods, so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
+ else
+ rc = -ENODEV;
out:
if (rc) {
- if (rc != -EAGAIN)
- /* Release onoff "lock" when ungrouping failed. */
- atomic_set(&gdev->onoff, 0);
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
-
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_ungroup.attr,
+ NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+ .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+ &ccwgroup_attr_group,
+ NULL,
+};
-static void
-ccwgroup_release (struct device *dev)
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
{
- struct ccwgroup_device *gdev;
- int i;
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
- gdev = to_ccwgroupdev(dev);
+ ccwgroup_ungroup(gdev);
+ put_device(&gdev->dev);
+}
- for (i = 0; i < gdev->count; i++) {
- if (gdev->cdev[i]) {
- if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
- dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
- put_device(&gdev->cdev[i]->dev);
- }
- }
- kfree(gdev);
+static void ccwgroup_release(struct device *dev)
+{
+ kfree(to_ccwgroupdev(dev));
}
-static int
-__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
- rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
- "group_device");
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+ &gdev->dev.kobj, "group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
@@ -149,8 +253,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
- rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
- str);
+ rc = sysfs_create_link(&gdev->dev.kobj,
+ &gdev->cdev[i]->dev.kobj, str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
@@ -165,9 +269,10 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
return 0;
}
-static int __get_next_bus_id(const char **buf, char *bus_id)
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
{
- int rc, len;
+ unsigned int cssid, ssid, devno;
+ int ret = 0, len;
char *start, *end;
start = (char *)*buf;
@@ -182,49 +287,40 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
len = end - start + 1;
end++;
}
- if (len < CCW_BUS_ID_SIZE) {
- strlcpy(bus_id, start, len);
- rc = 0;
+ if (len <= CCW_BUS_ID_SIZE) {
+ if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+ ret = -EINVAL;
} else
- rc = -EINVAL;
- *buf = end;
- return rc;
-}
-
-static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
-{
- int cssid, ssid, devno;
+ ret = -EINVAL;
- /* Must be of form %x.%x.%04x */
- if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
- return 0;
- return 1;
+ if (!ret) {
+ id->ssid = ssid;
+ id->devno = devno;
+ }
+ *buf = end;
+ return ret;
}
/**
- * ccwgroup_create_from_string() - create and register a ccw group device
- * @root: parent device for the new device
- * @creator_id: identifier of creating driver
- * @cdrv: ccw driver of slave devices
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
- * Create and register a new ccw group device as a child of @root. Slave
- * devices are obtained from the list of bus ids given in @buf and must all
- * belong to @cdrv.
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
-int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
- struct ccw_driver *cdrv, int num_devices,
- const char *buf)
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+ int num_devices, const char *buf)
{
struct ccwgroup_device *gdev;
+ struct ccw_dev_id dev_id;
int rc, i;
- char tmp_bus_id[CCW_BUS_ID_SIZE];
- const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
@@ -234,77 +330,76 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
- gdev->creator_id = creator_id;
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
- gdev->dev.parent = root;
+ gdev->dev.parent = parent;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
- curr_buf = buf;
- for (i = 0; i < num_devices && curr_buf; i++) {
- rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
+ for (i = 0; i < num_devices && buf; i++) {
+ rc = __get_next_id(&buf, &dev_id);
if (rc != 0)
goto error;
- if (!__is_valid_bus_id(tmp_bus_id)) {
- rc = -EINVAL;
- goto error;
- }
- gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
+ gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
- if (!gdev->cdev[i]
- || gdev->cdev[i]->id.driver_info !=
+ if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+ gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+ gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
- if (i < num_devices && !curr_buf) {
+ if (i < num_devices) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
- if (i == num_devices && strlen(curr_buf) > 0) {
+ if (i == num_devices && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+ gdev->dev.groups = ccwgroup_attr_groups;
+ if (gdrv) {
+ gdev->dev.driver = &gdrv->driver;
+ rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+ if (rc)
+ goto error;
+ }
rc = device_add(&gdev->dev);
if (rc)
goto error;
- get_device(&gdev->dev);
- rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
-
+ rc = __ccwgroup_create_symlinks(gdev);
if (rc) {
- device_unregister(&gdev->dev);
+ device_del(&gdev->dev);
goto error;
}
-
- rc = __ccwgroup_create_symlinks(gdev);
- if (!rc) {
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
- return 0;
- }
- device_remove_file(&gdev->dev, &dev_attr_ungroup);
- device_unregister(&gdev->dev);
+ mutex_unlock(&gdev->reg_mutex);
+ return 0;
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
@@ -312,10 +407,20 @@ error:
put_device(&gdev->dev);
return rc;
}
-EXPORT_SYMBOL(ccwgroup_create_from_string);
+EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data);
+ void *data)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ get_device(&gdev->dev);
+ schedule_work(&gdev->ungroup_work);
+ }
+
+ return NOTIFY_OK;
+}
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
@@ -347,138 +452,13 @@ module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
-static int
-ccwgroup_set_online(struct ccwgroup_device *gdev)
-{
- struct ccwgroup_driver *gdrv;
- int ret;
-
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_ONLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
- goto out;
-
- gdev->state = CCWGROUP_ONLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
-}
-
-static int
-ccwgroup_set_offline(struct ccwgroup_device *gdev)
-{
- struct ccwgroup_driver *gdrv;
- int ret;
-
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_OFFLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
- goto out;
-
- gdev->state = CCWGROUP_OFFLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
-}
-
-static ssize_t
-ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
- unsigned long value;
- int ret;
-
- if (!dev->driver)
- return -ENODEV;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if (!try_module_get(gdrv->owner))
- return -EINVAL;
-
- ret = strict_strtoul(buf, 0, &value);
- if (ret)
- goto out;
-
- if (value == 1)
- ret = ccwgroup_set_online(gdev);
- else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
- else
- ret = -EINVAL;
-out:
- module_put(gdrv->owner);
- return (ret == 0) ? count : ret;
-}
-
-static ssize_t
-ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
-{
- int online;
-
- online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
-
- return sprintf(buf, online ? "1\n" : "0\n");
-}
-
-static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
-
-static int
-ccwgroup_probe (struct device *dev)
-{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- int ret;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if ((ret = device_create_file(dev, &dev_attr_online)))
- return ret;
-
- ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
- if (ret)
- device_remove_file(dev, &dev_attr_online);
-
- return ret;
-}
-
-static int
-ccwgroup_remove (struct device *dev)
+static int ccwgroup_remove(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- device_remove_file(dev, &dev_attr_online);
- device_remove_file(dev, &dev_attr_ungroup);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return 0;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
if (gdrv->remove)
gdrv->remove(gdev);
@@ -487,15 +467,11 @@ ccwgroup_remove (struct device *dev)
static void ccwgroup_shutdown(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
@@ -560,7 +536,7 @@ static int ccwgroup_pm_restore(struct device *dev)
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
-static struct dev_pm_ops ccwgroup_pm_ops = {
+static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
@@ -570,27 +546,11 @@ static struct dev_pm_ops ccwgroup_pm_ops = {
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
- .match = ccwgroup_bus_match,
- .uevent = ccwgroup_uevent,
- .probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
-
-static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- struct device *dev = data;
-
- if (action == BUS_NOTIFY_UNBIND_DRIVER)
- device_schedule_callback(dev, ccwgroup_ungroup_callback);
-
- return NOTIFY_OK;
-}
-
-
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
@@ -601,14 +561,12 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
- cdriver->driver.name = cdriver->name;
- cdriver->driver.owner = cdriver->owner;
return driver_register(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_register);
-static int
-__ccwgroup_match_all(struct device *dev, void *data)
+static int __ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
@@ -624,20 +582,16 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
- get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- mutex_lock(&gdev->reg_mutex);
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
- mutex_unlock(&gdev->reg_mutex);
+ ccwgroup_ungroup(gdev);
put_device(dev);
}
- put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
@@ -652,25 +606,7 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
-
-static struct ccwgroup_device *
-__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
-{
- struct ccwgroup_device *gdev;
-
- gdev = dev_get_drvdata(&cdev->dev);
- if (gdev) {
- if (get_device(&gdev->dev)) {
- mutex_lock(&gdev->reg_mutex);
- if (device_is_registered(&gdev->dev))
- return gdev;
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
- }
- return NULL;
- }
- return NULL;
-}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
@@ -687,17 +623,19 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
- gdev = __ccwgroup_get_gdev_by_cdev(cdev);
- if (gdev) {
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(&gdev->dev);
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
+ spin_lock_irq(cdev->ccwlock);
+ gdev = dev_get_drvdata(&cdev->dev);
+ if (!gdev) {
+ spin_unlock_irq(cdev->ccwlock);
+ return;
}
+ /* Get ccwgroup device reference for local processing. */
+ get_device(&gdev->dev);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Unregister group device. */
+ ccwgroup_ungroup(gdev);
+ /* Release ccwgroup device reference for local processing. */
+ put_device(&gdev->dev);
}
-
-MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ccwgroup_driver_register);
-EXPORT_SYMBOL(ccwgroup_driver_unregister);
-EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 9509e386093..07676c22d51 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -1,10 +1,13 @@
/*
* Handling of internal CCW device requests.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2011
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/types.h>
#include <linux/err.h>
#include <asm/ccwdev.h>
@@ -38,9 +41,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
req->retries = req->maxretries;
- req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
-
+ req->mask = lpm_adjust(req->mask >> 1, req->lpm);
+out:
return req->mask;
}
@@ -49,7 +56,6 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
*/
static void ccwreq_stop(struct ccw_device *cdev, int rc)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
if (req->done)
@@ -57,7 +63,6 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
req->done = 1;
ccw_device_set_timeout(cdev, 0);
memset(&cdev->private->irb, 0, sizeof(struct irb));
- sch->lpm = sch->schib.pmcw.pam;
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -80,7 +85,6 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- sch->lpm = 0xff;
memset(&cdev->private->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
@@ -116,8 +120,12 @@ void ccw_request_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- /* Try all paths twice to counter link flapping. */
- req->mask = 0x8080;
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0;
@@ -162,6 +170,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
struct irb *irb = &cdev->private->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
/* Perform BASIC SENSE if needed. */
if (ccw_device_accumulate_and_sense(cdev, lcirb))
@@ -181,6 +190,22 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
+ /* Ask the driver what to do */
+ if (cdev->drv && cdev->drv->uc_handler) {
+ todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
+ switch (todo) {
+ case UC_TODO_RETRY:
+ return IO_STATUS_ERROR;
+ case UC_TODO_RETRY_ON_NEW_PATH:
+ return IO_PATH_ERROR;
+ case UC_TODO_STOP:
+ return IO_REJECTED;
+ default:
+ return IO_STATUS_ERROR;
+ }
+ }
/* Assume that unexpected SENSE data implies an error. */
return IO_STATUS_ERROR;
}
@@ -227,8 +252,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
*/
void ccw_request_handler(struct ccw_device *cdev)
{
+ struct irb *irb = &__get_cpu_var(cio_irb);
struct ccw_request *req = &cdev->private->req;
- struct irb *irb = (struct irb *) __LC_IRB;
enum io_status status;
int rc = -EOPNOTSUPP;
@@ -301,7 +326,21 @@ void ccw_request_timeout(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- int rc;
+ int rc = -ENODEV, chp;
+
+ if (cio_update_schib(sch))
+ goto err;
+
+ for (chp = 0; chp < 8; chp++) {
+ if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+ pr_warning("%s: No interrupt was received within %lus "
+ "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
+ }
if (!ccwreq_next_path(cdev)) {
/* set the final return code for this request */
@@ -320,7 +359,7 @@ err:
* ccw_request_notoper - notoper handler for I/O request procedure
* @cdev: ccw device
*
- * Handle timeout during I/O request procedure.
+ * Handle notoper during I/O request procedure.
*/
void ccw_request_notoper(struct ccw_device *cdev)
{
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index c268a2e5b7c..d497aa05a72 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.c
- *
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -10,11 +8,14 @@
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/crw.h>
@@ -53,12 +54,6 @@ static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static wait_queue_head_t cfg_wait_queue;
-/* Return channel_path struct for given chpid. */
-static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
-{
- return channel_subsystems[chpid.cssid]->chps[chpid.id];
-}
-
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
@@ -134,7 +129,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -181,7 +177,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -239,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ int status;
- if (!chp)
- return 0;
- return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
- sprintf(buf, "offline\n"));
+ mutex_lock(&chp->lock);
+ status = chp->state;
+ mutex_unlock(&chp->lock);
+
+ return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
@@ -259,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
if (!num_args)
return count;
- if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
+ if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
- else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
+ mutex_unlock(&cp->lock);
+ } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 0);
- else
+ mutex_unlock(&cp->lock);
+ } else
error = -EINVAL;
return error < 0 ? error : count;
-
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -313,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ u8 type;
- if (!chp)
- return 0;
- return sprintf(buf, "%x\n", chp->desc.desc);
+ mutex_lock(&chp->lock);
+ type = chp->desc.desc;
+ mutex_unlock(&chp->lock);
+ return sprintf(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -349,18 +352,57 @@ static ssize_t chp_shared_show(struct device *dev,
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
+ &dev_attr_chid.attr,
+ &dev_attr_chid_external.attr,
NULL,
};
-
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
static void chp_release(struct device *dev)
{
@@ -371,6 +413,26 @@ static void chp_release(struct device *dev)
}
/**
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+ * Update the channel-path description of the specified channel-path.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+
+ return rc;
+}
+
+/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
@@ -392,10 +454,12 @@ int chp_new(struct chp_id chpid)
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
+ mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
- ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
@@ -420,16 +484,10 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
- goto out;
- }
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
@@ -451,7 +509,7 @@ out:
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
-void *chp_get_chp_desc(struct chp_id chpid)
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
struct channel_path_desc *desc;
@@ -462,7 +520,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
if (!desc)
return NULL;
+
+ mutex_lock(&chp->lock);
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ mutex_unlock(&chp->lock);
return desc;
}
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d224617..4efd5b867cc 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.h
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
@@ -10,6 +8,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
@@ -40,22 +39,31 @@ static inline int chp_test_bit(u8 *bitmap, int num)
struct channel_path {
+ struct device dev;
struct chp_id chpid;
+ struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
void *cmg_chars;
- struct device dev;
};
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
+}
+
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
-void *chp_get_chp_desc(struct chp_id chpid);
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e56764..e3bf885f4a6 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -15,11 +14,13 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/crw.h>
+#include <asm/isc.h>
#include "css.h"
#include "cio.h"
@@ -29,6 +30,8 @@
#include "chsc.h"
static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
@@ -47,9 +50,16 @@ int chsc_error_from_response(int response)
case 0x0007:
case 0x0008:
case 0x000a:
+ case 0x0104:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
+ case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
default:
return -EIO;
}
@@ -82,17 +92,15 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- unsigned long page;
struct chsc_ssd_area *ssd_area;
int ccode;
int ret;
int i;
int mask;
- page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- ssd_area = (struct chsc_ssd_area *) page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
@@ -103,25 +111,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out_free;
+ goto out;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- goto out_free;
+ goto out;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
- goto out_free;
+ goto out;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
- goto out_free;
+ goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
@@ -133,11 +141,70 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
-out_free:
- free_page(page);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+ memset(ssqd, 0, sizeof(*ssqd));
+ ssqd->request.length = 0x0010;
+ ssqd->request.code = 0x0024;
+ ssqd->first_sch = schid.sch_no;
+ ssqd->last_sch = schid.sch_no;
+ ssqd->ssid = schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+
+ return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+ memset(scssc, 0, sizeof(*scssc));
+ scssc->request.length = 0x0fe0;
+ scssc->request.code = 0x0021;
+ scssc->operation_code = 0;
+
+ scssc->summary_indicator_addr = summary_indicator_addr;
+ scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+ scssc->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc->kc = PAGE_DEFAULT_KEY >> 4;
+ scssc->isc = QDIO_AIRQ_ISC;
+ scssc->schid = schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc->word_with_d_bit = 0x10000000;
+
+ if (chsc(scssc))
+ return -EIO;
+
+ return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -171,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
-static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
-
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -221,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
- for_each_subchannel_staged(__s390_process_res_acc,
- s390_process_res_acc_new_sch, link);
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
}
static int
@@ -255,26 +302,46 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
-struct chsc_sei_area {
- struct chsc_header request;
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 vf; /* validity flags */
- u8 rs; /* reporting source */
- u8 cc; /* content code */
- u16 fla; /* full link address */
- u16 rsid; /* reporting source id */
- u32 reserved5;
- u32 reserved6;
- u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
-} __attribute__ ((packed));
-
-static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 :24;
+ u8 nt;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@@ -293,7 +360,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@@ -325,13 +392,43 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *data;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+ if (sei_area->rs != 0)
+ return;
+ data = sei_area->ccdf;
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data, num))
+ continue;
+ chpid.id = num;
+
+ CIO_CRW_EVENT(4, "Update information for channel path "
+ "%x.%02x\n", chpid.cssid, chpid.id);
+ chp = chpid_to_chp(chpid);
+ if (!chp) {
+ chp_new(chpid);
+ continue;
+ }
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+}
+
struct chp_config_data {
u8 map[32];
u8 op;
u8 pc;
};
-static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
@@ -363,34 +460,139 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
- /* Check if we might have lost some information. */
- if (sei_area->flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
}
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area);
break;
- case 2: /* i/o resource accessibiliy */
+ case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 7: /* channel-path-availability information */
+ chsc_process_sei_chp_avail(sei_area);
+ break;
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
}
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+ static int ntsm_unsupported;
+
+ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
+ break;
+ }
+
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
+}
+
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei_area *sei_area;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -400,29 +602,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei_area = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- do {
- memset(sei_area, 0, sizeof(*sei_area));
- sei_area->request.length = 0x0010;
- sei_area->request.code = 0x000e;
- if (chsc(sei_area))
- break;
-
- if (sei_area->response.code == 0x0001) {
- CIO_CRW_EVENT(4, "chsc: sei successful\n");
- chsc_process_sei(sei_area);
- } else {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei_area->response.code);
- break;
- }
- } while (sei_area->flags & 0x80);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
@@ -440,6 +622,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
@@ -474,19 +657,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
-static int
-__s390_vary_chpid_on(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
@@ -494,22 +664,22 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
- struct chp_link link;
+ struct channel_path *chp = chpid_to_chp(chpid);
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
-
- if (on)
+ if (on) {
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
- else
+ NULL, &chpid);
+ css_schedule_reprobe();
+ } else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
@@ -549,7 +719,7 @@ cleanup:
return ret;
}
-int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
{
struct {
struct chsc_header request;
@@ -570,19 +740,23 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
} __attribute__ ((packed)) *secm_area;
int ret, ccode;
- secm_area = page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
- secm_area->key = PAGE_DEFAULT_KEY;
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
- if (ccode > 0)
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
switch (secm_area->response.code) {
case 0x0102:
@@ -595,37 +769,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
- void *secm_area;
int ret;
- secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!secm_area)
- return -ENOMEM;
-
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
- free_page((unsigned long)secm_area);
return -ENOMEM;
}
}
- ret = __chsc_do_secm(css, enable, secm_area);
+ ret = __chsc_do_secm(css, enable);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
- memset(secm_area, 0, PAGE_SIZE);
- __chsc_do_secm(css, 0, secm_area);
+ __chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
@@ -635,44 +804,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
- free_page((unsigned long)secm_area);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp)
+ int c, int m, void *page)
{
+ struct chsc_scpd *scpd_area;
int ccode, ret;
- struct {
- struct chsc_header request;
- u32 : 2;
- u32 m : 1;
- u32 c : 1;
- u32 fmt : 4;
- u32 cssid : 8;
- u32 : 4;
- u32 rfmt : 4;
- u32 first_chpid : 8;
- u32 : 24;
- u32 last_chpid : 8;
- u32 zeroes1;
- struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
- } __attribute__ ((packed)) *scpd_area;
-
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scpd_area)
- return -ENOMEM;
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
-
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
@@ -682,20 +831,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
- if (ccode > 0) {
- ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out;
- }
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
ret = chsc_error_from_response(scpd_area->response.code);
- if (ret == 0)
- /* Success. */
- memcpy(resp, &scpd_area->response, scpd_area->response.length);
- else
+ if (ret)
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
-out:
- free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -704,17 +846,39 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
- if (!chsc_resp)
- return -ENOMEM;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
+ if (ret)
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc)
+{
+ struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
- goto out_free;
- memcpy(desc, &chsc_resp->data, chsc_resp->length);
-out_free:
- kfree(chsc_resp);
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -722,33 +886,22 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- switch (chp->cmg) {
- case 2:
- case 3:
- chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
- GFP_KERNEL);
- if (chp->cmg_chars) {
- int i, mask;
- struct cmg_chars *cmg_chars;
-
- cmg_chars = chp->cmg_chars;
- for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
- mask = 0x80 >> (i + 3);
- if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
- else
- cmg_chars->values[i] = 0;
- }
- }
- break;
- default:
- /* No cmg-dependent data. */
- break;
+ struct cmg_chars *cmg_chars;
+ int i, mask;
+
+ cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ cmg_chars->values[i] = chars->values[i];
+ else
+ cmg_chars->values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -772,13 +925,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scmc_area)
+ chp->cmg_chars = NULL;
+ cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+ if (!cmg_chars)
return -ENOMEM;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
-
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
@@ -789,52 +945,63 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
}
ret = chsc_error_from_response(scmc_area->response.code);
- if (ret == 0) {
- /* Success. */
- if (!scmc_area->not_valid) {
- chp->cmg = scmc_area->cmg;
- chp->shared = scmc_area->shared;
- chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
- (struct cmg_chars *)
- &scmc_area->data);
- } else {
- chp->cmg = -1;
- chp->shared = -1;
- }
- } else {
+ if (ret) {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid) {
+ chp->cmg = -1;
+ chp->shared = -1;
+ goto out;
}
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
+ }
+ chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
out:
- free_page((unsigned long)scmc_area);
+ spin_unlock_irq(&chsc_page_lock);
+ if (!chp->cmg_chars)
+ kfree(cmg_chars);
+
return ret;
}
-int __init chsc_alloc_sei_area(void)
+int __init chsc_init(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sei_page) {
- CIO_MSG_EVENT(0, "Can't allocate page for processing of "
- "chsc machine checks!\n");
- return -ENOMEM;
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
}
ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
- kfree(sei_page);
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
return ret;
}
-void __init chsc_free_sei_area(void)
+void __init chsc_init_cleanup(void)
{
crw_unregister_handler(CRW_RSC_CSS);
- kfree(sei_page);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
}
-int __init
-chsc_enable_facility(int operation_code)
+int chsc_enable_facility(int operation_code)
{
+ unsigned long flags;
int ret;
struct {
struct chsc_header request;
@@ -851,9 +1018,9 @@ chsc_enable_facility(int operation_code)
u32 reserved6:24;
} __attribute__ ((packed)) *sda_area;
- sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!sda_area)
- return -ENOMEM;
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
@@ -874,8 +1041,8 @@ chsc_enable_facility(int operation_code)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
- out:
- free_page((unsigned long)sda_area);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -894,13 +1061,12 @@ chsc_determine_css_characteristics(void)
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
- u32 chsc_char[518];
+ u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area)
- return -ENOMEM;
-
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
@@ -920,7 +1086,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- free_page ((unsigned long) scsc_area);
+ spin_unlock_irq(&chsc_page_lock);
return result;
}
@@ -975,3 +1141,110 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+int chsc_siosl(struct subchannel_id schid)
+{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } __attribute__ ((packed)) *siosl_area;
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
+
+ ccode = chsc(siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area->response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area->response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 37aa611d4ac..76c9b50700b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -3,17 +3,14 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <asm/css_chars.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/schid.h>
+#include <asm/qdio.h>
#define CHSC_SDA_OC_MSS 0x2
-struct chsc_header {
- u16 length;
- u16 code;
-} __attribute__ ((packed));
-
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
@@ -24,15 +21,22 @@ struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
} __attribute__ ((packed));
-struct channel_path_desc {
+struct channel_path_desc_fmt1 {
u8 flags;
u8 lsn;
u8 desc;
u8 chpid;
- u8 swla;
- u8 zeroes;
- u8 chla;
+ u32:24;
u8 chpp;
+ u32 unused[2];
+ u16 chid;
+ u32:16;
+ u16 mdc;
+ u16:13;
+ u8 r:1;
+ u8 s:1;
+ u8 f:1;
+ u32 zeros[2];
} __attribute__ ((packed));
struct channel_path;
@@ -46,7 +50,9 @@ struct css_chsc_char {
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
- u32 : 19;
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
}__attribute__((packed));
extern struct css_chsc_char css_chsc_characteristics;
@@ -57,27 +63,176 @@ struct chsc_ssd_info {
struct chp_id chpid[8];
u16 fla[8];
};
+
+struct chsc_ssqd_area {
+ struct chsc_header request;
+ u16:10;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 first_sch;
+ u16:16;
+ u16 last_sch;
+ u32:32;
+ struct chsc_header response;
+ u32:32;
+ struct qdio_ssqd_desc qdio_ssqd;
+} __packed;
+
+struct chsc_scssc_area {
+ struct chsc_header request;
+ u16 operation_code;
+ u16:16;
+ u32:32;
+ u32:32;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ u32:32;
+ struct subchannel_id schid;
+ u32 reserved[1004];
+ struct chsc_header response;
+ u32:32;
+} __packed;
+
+struct chsc_scpd {
+ struct chsc_header request;
+ u32:2;
+ u32 m:1;
+ u32 c:1;
+ u32 fmt:4;
+ u32 cssid:8;
+ u32:4;
+ u32 rfmt:4;
+ u32 first_chpid:8;
+ u32:24;
+ u32 last_chpid:8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+} __attribute__ ((packed));
+
+
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
-extern int chsc_alloc_sei_area(void);
-extern void chsc_free_sei_area(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
-int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp);
+ int c, int m, void *page);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc);
void chsc_chp_online(struct chp_id chpid);
void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
-
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr);
int chsc_error_from_response(int response);
+int chsc_siosl(struct subchannel_id schid);
+
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+ u64 sa;
+ u32 p:4;
+ u32 op_state:4;
+ u32 data_state:4;
+ u32 rank:4;
+ u32 r:1;
+ u32:7;
+ u32 rid:8;
+ u32:32;
+} __packed;
+
+struct chsc_scm_info {
+ struct chsc_header request;
+ u32:32;
+ u64 reqtok;
+ u32 reserved1[4];
+ struct chsc_header response;
+ u64:56;
+ u8 rq;
+ u32 mbc;
+ u64 msa;
+ u16 is;
+ u16 mmc;
+ u32 mci;
+ u64 nr_scm_ini;
+ u64 nr_scm_unini;
+ u32 reserved2[10];
+ u64 restok;
+ struct sale scmal[248];
+} __packed;
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+struct chsc_brinfo_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+ struct chsc_brinfo_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_brinfo_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_brinfo_naihdr naihdr;
+ union {
+ struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+ struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+ struct qdio_brinfo_entry_l2 l2[0];
+ } entries;
+} __packed;
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+int scm_process_availability_information(void);
+#else /* CONFIG_SCM_BUS */
+static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
+#endif /* CONFIG_SCM_BUS */
+
+
#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index cc5144b6f9d..3d22d2a4ce1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,17 +1,21 @@
/*
* Driver for s390 chsc subchannels
*
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2011
*
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
*/
+#include <linux/slab.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
@@ -25,6 +29,10 @@
static debug_info_t *chsc_debug_msg_id;
static debug_info_t *chsc_debug_log_id;
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
#define CHSC_MSG(imp, args...) do { \
debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
} while (0)
@@ -48,12 +56,14 @@ MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
- struct chsc_private *private = sch->private;
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
- struct irb *irb = (struct irb *)__LC_IRB;
+ struct irb *irb = &__get_cpu_var(cio_irb);
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
+ inc_irq_stat(IRQIO_CSC);
+
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
@@ -78,13 +88,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
kfree(private);
} else {
- sch->private = private;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -98,8 +109,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
struct chsc_private *private;
cio_disable_subchannel(sch);
- private = sch->private;
- sch->private = NULL;
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
@@ -122,7 +133,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch)
* since we don't have a way to clear the subchannel and
* cannot disable it with a request running.
*/
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
if (!cc && scsw_stctl(&schib.scsw))
return -EAGAIN;
return 0;
@@ -145,7 +156,10 @@ static struct css_device_id chsc_subchannel_ids[] = {
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
@@ -155,13 +169,11 @@ static struct css_driver chsc_subchannel_driver = {
.freeze = chsc_subchannel_freeze,
.thaw = chsc_subchannel_restore,
.restore = chsc_subchannel_restore,
- .name = "chsc_subchannel",
};
static int __init chsc_init_dbfs(void)
{
- chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
- 16 * sizeof(long));
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
@@ -236,10 +248,10 @@ static int chsc_async(struct chsc_async_area *chsc_area,
int ret = -ENODEV;
char dbf[10];
- chsc_area->header.key = PAGE_DEFAULT_KEY;
+ chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
- private = sch->private;
+ private = dev_get_drvdata(&sch->dev);
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
@@ -249,7 +261,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
CHSC_LOG(2, "schid");
CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
cc = chsc(chsc_area);
- sprintf(dbf, "cc:%d", cc);
+ snprintf(dbf, sizeof(dbf), "cc:%d", cc);
CHSC_LOG(2, dbf);
switch (cc) {
case 0:
@@ -278,11 +290,11 @@ static int chsc_async(struct chsc_async_area *chsc_area,
return ret;
}
-static void chsc_log_command(struct chsc_async_area *chsc_area)
+static void chsc_log_command(void *chsc_area)
{
char dbf[10];
- sprintf(dbf, "CHSC:%x", chsc_area->header.code);
+ snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
CHSC_LOG(0, dbf);
CHSC_LOG_HEX(0, chsc_area, 32);
}
@@ -346,13 +358,106 @@ static int chsc_ioctl_start(void __user *user_area)
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
out_free:
- sprintf(dbf, "ret:%d", ret);
+ snprintf(dbf, sizeof(dbf), "ret:%d", ret);
CHSC_LOG(0, dbf);
kfree(request);
free_page((unsigned long)chsc_area);
return ret;
}
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (on_close_chsc_area) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+ if (!on_close_request) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!on_close_chsc_area) {
+ ret = -ENOMEM;
+ goto out_free_request;
+ }
+ if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_chsc;
+ }
+ ret = 0;
+ goto out_unlock;
+
+out_free_chsc:
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+out_free_request:
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+ ret = 0;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+ struct chsc_sync_area *chsc_area;
+ int ret, ccode;
+
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!chsc_area)
+ return -ENOMEM;
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ if (chsc_area->header.code & 0x4000) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ ccode = chsc(chsc_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
static int chsc_ioctl_info_channel_path(void __user *user_cd)
{
struct chsc_chp_cd *cd;
@@ -686,25 +791,31 @@ out_free:
static int chsc_ioctl_chpd(void __user *user_chpd)
{
+ struct chsc_scpd *scpd_area;
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- if (!chpd)
- return -ENOMEM;
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area || !chpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
- &chpd->chpdb);
+ scpd_area);
if (ret)
goto out_free;
+ memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
+ free_page((unsigned long)scpd_area);
return ret;
}
@@ -770,33 +881,89 @@ out_free:
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp;
+
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
- return chsc_ioctl_start((void __user *)arg);
+ return chsc_ioctl_start(argp);
+ case CHSC_START_SYNC:
+ return chsc_ioctl_start_sync(argp);
case CHSC_INFO_CHANNEL_PATH:
- return chsc_ioctl_info_channel_path((void __user *)arg);
+ return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
- return chsc_ioctl_info_cu((void __user *)arg);
+ return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
- return chsc_ioctl_info_sch_cu((void __user *)arg);
+ return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
- return chsc_ioctl_conf_info((void __user *)arg);
+ return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
- return chsc_ioctl_conf_comp_list((void __user *)arg);
+ return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
- return chsc_ioctl_chpd((void __user *)arg);
+ return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
- return chsc_ioctl_dcal((void __user *)arg);
+ return chsc_ioctl_dcal(argp);
+ case CHSC_ON_CLOSE_SET:
+ return chsc_ioctl_on_close_set(argp);
+ case CHSC_ON_CLOSE_REMOVE:
+ return chsc_ioctl_on_close_remove();
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
}
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+ if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+ atomic_inc(&chsc_ready_for_use);
+ return -EBUSY;
+ }
+ return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area)
+ goto out_unlock;
+ init_completion(&on_close_request->completion);
+ CHSC_LOG(0, "on_close");
+ chsc_log_command(on_close_chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(on_close_chsc_area, on_close_request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&on_close_request->completion);
+ ret = chsc_examine_irb(on_close_request);
+ }
+ snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+ CHSC_LOG(0, dbf);
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ atomic_inc(&chsc_ready_for_use);
+ return 0;
+}
+
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
+ .open = chsc_open,
+ .release = chsc_release,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
+ .llseek = no_llseek,
};
static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 126f240715a..2905d8b0ec9 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -19,6 +18,7 @@
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -29,7 +29,7 @@
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -46,6 +46,9 @@ debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
debug_info_t *cio_debug_crw_id;
+DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
+EXPORT_PER_CPU_SYMBOL(cio_irb);
+
/*
* Function: cio_debug_init
* Initializes three debug logs for common I/O:
@@ -55,7 +58,7 @@ debug_info_t *cio_debug_crw_id;
*/
static int __init cio_debug_init(void)
{
- cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
@@ -65,7 +68,7 @@ static int __init cio_debug_init(void)
goto out_unregister;
debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(cio_debug_trace_id, 2);
- cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
@@ -84,29 +87,14 @@ out_unregister:
arch_initcall (cio_debug_init);
-int
-cio_set_options (struct subchannel *sch, int flags)
+int cio_set_options(struct subchannel *sch, int flags)
{
- sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
- sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
- sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
- return 0;
-}
+ struct io_subchannel_private *priv = to_io_private(sch);
-/* FIXME: who wants to use this? */
-int
-cio_get_options (struct subchannel *sch)
-{
- int flags;
-
- flags = 0;
- if (sch->options.suspend)
- flags |= DOIO_ALLOW_SUSPEND;
- if (sch->options.prefetch)
- flags |= DOIO_DENY_PREFETCH;
- if (sch->options.inter)
- flags |= DOIO_SUPPRESS_INTER;
- return flags;
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
}
static int
@@ -139,21 +127,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
int ccode;
- union orb *orb;
CIO_TRACE_EVENT(5, "stIO");
CIO_TRACE_EVENT(5, dev_name(&sch->dev));
- orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
- orb->cmd.pfch = sch->options.prefetch == 0;
- orb->cmd.spnd = sch->options.suspend;
- orb->cmd.ssic = sch->options.suspend && sch->options.inter;
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
@@ -358,10 +346,11 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
for (retry = 0; retry < 5; retry++) {
@@ -372,7 +361,7 @@ int cio_commit_config(struct subchannel *sch)
return ccode;
switch (ccode) {
case 0: /* successful */
- if (stsch(sch->schid, &schib) ||
+ if (stsch_err(sch->schid, &schib) ||
!css_sch_is_valid(&schib))
return -ENODEV;
if (cio_check_config(sch, &schib)) {
@@ -383,7 +372,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -404,7 +396,7 @@ int cio_update_schib(struct subchannel *sch)
{
struct schib schib;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
@@ -419,7 +411,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -434,20 +425,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -460,7 +445,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -472,30 +456,13 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-int cio_create_sch_lock(struct subchannel *sch)
-{
- sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (!sch->lock)
- return -ENOMEM;
- spin_lock_init(sch->lock);
- return 0;
-}
-
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
@@ -552,32 +519,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
- /* Nuke all fields. */
- memset(sch, 0, sizeof(struct subchannel));
-
- sch->schid = schid;
- if (cio_is_console(schid)) {
- sch->lock = cio_get_console_lock();
- } else {
- err = cio_create_sch_lock(sch);
- if (err)
- goto out;
- }
- mutex_init(&sch->reg_mutex);
-
/*
* The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
+ * indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
- * is not valid.
+ * is not valid.
*/
- ccode = stsch_err (schid, &sch->schib);
+ ccode = stsch_err(schid, &sch->schib);
if (ccode) {
err = (ccode == 3) ? -ENXIO : ccode;
goto out;
}
- /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
+ sch->schid = schid;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
@@ -594,267 +548,179 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
- return 0;
out:
- if (!cio_is_console(schid))
- kfree(sch->lock);
- sch->lock = NULL;
return err;
}
/*
- * do_IRQ() handles all normal I/O device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
+ * do_cio_interrupt() handles all normal I/O device IRQ's
*/
-void __irq_entry do_IRQ(struct pt_regs *regs)
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
- struct pt_regs *old_regs;
-
- old_regs = set_irq_regs(regs);
- s390_idle_check();
- irq_enter();
- __get_cpu_var(s390_idle).nohz_delay = 1;
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
- /* Serve timer interrupts first. */
- clock_comparator_work();
- /*
- * Get interrupt information from lowcore
- */
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- irb = (struct irb *) __LC_IRB;
- do {
- kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
- /*
- * Non I/O-subchannel thin interrupts are processed differently
- */
- if (tpi_info->adapter_IO == 1 &&
- tpi_info->int_type == IO_INTERRUPT_TYPE) {
- do_adapter_IO(tpi_info->isc);
- continue;
- }
- sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch) {
- /* Clear pending interrupt condition. */
- tsch(tpi_info->schid, irb);
- continue;
- }
- spin_lock(sch->lock);
- /* Store interrupt response block to lowcore. */
- if (tsch(tpi_info->schid, irb) == 0) {
- /* Keep subchannel information word up to date. */
- memcpy (&sch->schib.scsw, &irb->scsw,
- sizeof (irb->scsw));
- /* Call interrupt handler if there is one. */
- if (sch->driver && sch->driver->irq)
- sch->driver->irq(sch);
- }
- spin_unlock(sch->lock);
- /*
- * Are more interrupts pending?
- * If so, the tpi instruction will update the lowcore
- * to hold the info for the next interrupt.
- * We don't do this for VM because a tpi drops the cpu
- * out of the sie which costs more cycles than it saves.
- */
- } while (!MACHINE_IS_VM && tpi (NULL) != 0);
- irq_exit();
- set_irq_regs(old_regs);
+
+ __this_cpu_write(s390_idle.nohz_delay, 1);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ irb = &__get_cpu_var(cio_irb);
+ sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+ if (!sch) {
+ /* Clear pending interrupt condition. */
+ inc_irq_stat(IRQIO_CIO);
+ tsch(tpi_info->schid, irb);
+ return IRQ_HANDLED;
+ }
+ spin_lock(sch->lock);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(tpi_info->schid, irb) == 0) {
+ /* Keep subchannel information word up to date. */
+ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+ /* Call interrupt handler if there is one. */
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ } else
+ inc_irq_stat(IRQIO_CIO);
+ spin_unlock(sch->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction io_interrupt = {
+ .name = "IO",
+ .handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+ irq_set_chip_and_handler(IO_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(IO_INTERRUPT, &io_interrupt);
}
#ifdef CONFIG_CCW_CONSOLE
-static struct subchannel console_subchannel;
-static struct io_subchannel_private console_priv;
-static int console_subchannel_in_use;
+static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
/*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the subchannel's lock held.
*/
-static int cio_tpi(void)
+void cio_tsch(struct subchannel *sch)
{
- struct tpi_info *tpi_info;
- struct subchannel *sch;
struct irb *irb;
int irq_context;
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- if (tpi(NULL) != 1)
- return 0;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
/* Store interrupt response block to lowcore. */
- if (tsch(tpi_info->schid, irb) != 0)
+ if (tsch(sch->schid, irb) != 0)
/* Not status pending or not operational. */
- return 1;
- sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch)
- return 1;
+ return;
+ memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ /* Call interrupt handler with updated status. */
irq_context = in_interrupt();
- if (!irq_context)
+ if (!irq_context) {
local_bh_disable();
- irq_enter();
- spin_lock(sch->lock);
- memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ irq_enter();
+ }
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
- spin_unlock(sch->lock);
- irq_exit();
- if (!irq_context)
+ else
+ inc_irq_stat(IRQIO_CIO);
+ if (!irq_context) {
+ irq_exit();
_local_bh_enable();
- return 1;
-}
-
-void *cio_get_console_priv(void)
-{
- return &console_priv;
+ }
}
-/*
- * busy wait for the next interrupt on the console
- */
-void wait_cons_dev(void)
- __releases(console_subchannel.lock)
- __acquires(console_subchannel.lock)
+static int cio_test_for_console(struct subchannel_id schid, void *data)
{
- unsigned long cr6 __attribute__ ((aligned (8)));
- unsigned long save_cr6 __attribute__ ((aligned (8)));
-
- /*
- * before entering the spinlock we may already have
- * processed the interrupt on a different CPU...
- */
- if (!console_subchannel_in_use)
- return;
-
- /* disable all but the console isc */
- __ctl_store (save_cr6, 6, 6);
- cr6 = 1UL << (31 - CONSOLE_ISC);
- __ctl_load (cr6, 6, 6);
-
- do {
- spin_unlock(console_subchannel.lock);
- if (!cio_tpi())
- cpu_relax();
- spin_lock(console_subchannel.lock);
- } while (console_subchannel.schib.scsw.cmd.actl != 0);
- /*
- * restore previous isc value
- */
- __ctl_load (save_cr6, 6, 6);
-}
+ struct schib schib;
-static int
-cio_test_for_console(struct subchannel_id schid, void *data)
-{
- if (stsch_err(schid, &console_subchannel.schib) != 0)
+ if (stsch_err(schid, &schib) != 0)
return -ENXIO;
- if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
- console_subchannel.schib.pmcw.dnv &&
- (console_subchannel.schib.pmcw.dev == console_devno)) {
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
-
-static int
-cio_get_console_sch_no(void)
+static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
-
+ struct schib schib;
+
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch(schid, &console_subchannel.schib) != 0 ||
- (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
- !console_subchannel.schib.pmcw.dnv)
+ if (stsch_err(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
- console_devno = console_subchannel.schib.pmcw.dev;
+ console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
- if (console_irq == -1)
- return -1;
- } else {
- /* unlike in 2.4, we cannot autoprobe here, since
- * the channel subsystem is not fully initialized.
- * With some luck, the HWC console can take over */
- return -1;
}
return console_irq;
}
-struct subchannel *
-cio_probe_console(void)
+struct subchannel *cio_probe_console(void)
{
- int sch_no, ret;
struct subchannel_id schid;
+ struct subchannel *sch;
+ int sch_no, ret;
- if (xchg(&console_subchannel_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
- console_subchannel_in_use = 0;
pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
- memset(&console_subchannel, 0, sizeof(struct subchannel));
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- ret = cio_validate_subchannel(&console_subchannel, schid);
- if (ret) {
- console_subchannel_in_use = 0;
- return ERR_PTR(-ENODEV);
- }
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return sch;
- /*
- * enable console I/O-interrupt subclass
- */
+ lockdep_set_class(sch->lock, &console_sch_key);
isc_register(CONSOLE_ISC);
- console_subchannel.config.isc = CONSOLE_ISC;
- console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
- ret = cio_commit_config(&console_subchannel);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ put_device(&sch->dev);
return ERR_PTR(ret);
}
- return &console_subchannel;
+ console_sch = sch;
+ return sch;
}
-void
-cio_release_console(void)
-{
- console_subchannel.config.intparm = 0;
- cio_commit_config(&console_subchannel);
- isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
-}
-
-/* Bah... hack to catch console special sausages. */
-int
-cio_is_console(struct subchannel_id schid)
+int cio_is_console(struct subchannel_id schid)
{
- if (!console_subchannel_in_use)
+ if (!console_sch)
return 0;
- return schid_equal(&schid, &console_subchannel.schid);
+ return schid_equal(&schid, &console_sch->schid);
}
-struct subchannel *
-cio_get_console_subchannel(void)
+void cio_register_early_subchannels(void)
{
- if (!console_subchannel_in_use)
- return NULL;
- return &console_subchannel;
+ int ret;
+
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
}
+#endif /* CONFIG_CCW_CONSOLE */
-#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
@@ -863,10 +729,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
cc = 0;
for (retry=0;retry<3;retry++) {
schib->pmcw.ena = 0;
- cc = msch(schid, schib);
+ cc = msch_err(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
+ if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
return -ENODEV;
if (!schib->pmcw.ena)
return 0;
@@ -885,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.schid, (struct irb *)__LC_IRB);
+ tsch(ti.schid, &__get_cpu_var(cio_irb));
if (schid_equal(&ti.schid, &schid))
return 0;
}
@@ -913,7 +779,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
pgm_check_occured = 0;
s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
+ rc = stsch_err(schid, addr);
s390_base_pgm_handler_fn = NULL;
/* The program check handler could have changed pgm_check_occured. */
@@ -950,7 +816,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
/* No default clear strategy */
break;
}
- stsch(schid, &schib);
+ stsch_err(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
out:
@@ -1003,9 +869,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_clock() > timeout)
+ if (get_tod_clock_fast() > timeout)
break;
cpu_relax();
}
@@ -1070,9 +936,9 @@ extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
void reipl_ccw_dev(struct ccw_dev_id *devid)
{
- struct subchannel_id schid;
+ struct subchannel_id uninitialized_var(schid);
- s390_reset_system();
+ s390_reset_system(NULL, NULL);
if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
do_reipl_asm(*((__u32*)&schid));
@@ -1083,10 +949,10 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
struct subchannel_id schid;
struct schib schib;
- schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
+ schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
- if (stsch(schid, &schib))
+ if (stsch_err(schid, &schib))
return -ENODEV;
if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
return -ENODEV;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a33..a01376ae174 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
@@ -84,13 +89,6 @@ struct subchannel {
SUBCHANNEL_TYPE_MSG = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
-
- struct {
- unsigned int suspend:1; /* allow suspend */
- unsigned int prefetch:1;/* deny prefetch */
- unsigned int inter:1; /* suppress intermediate interrupts */
- } __attribute__ ((packed)) options;
-
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
@@ -99,13 +97,12 @@ struct subchannel {
struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
- void *private; /* private per subchannel type data */
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
-#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
+DECLARE_PER_CPU(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
@@ -120,30 +117,21 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
-extern int cio_get_options (struct subchannel *);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
-int cio_create_sch_lock(struct subchannel *);
-void do_adapter_IO(u8 isc);
-void do_IRQ(struct pt_regs *);
-
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
-extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
-extern struct subchannel *cio_get_console_subchannel(void);
-extern spinlock_t * cio_get_console_lock(void);
-extern void *cio_get_console_priv(void);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
-#define cio_get_console_subchannel() NULL
-#define cio_get_console_lock() NULL
-#define cio_get_console_priv() NULL
+static inline void cio_register_early_subchannels(void) {}
#endif
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2985eb43948..23054f8fa9f 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/cmf.c
- *
* Linux on zSeries Channel Measurement Facility support
*
- * Copyright 2000,2006 IBM Corporation
+ * Copyright IBM Corp. 2000, 2006
*
* Authors: Arnd Bergmann <arndb@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -35,7 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/timex.h> /* get_clock() */
+#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -98,7 +96,7 @@ enum cmb_format {
* enum cmb_format.
*/
static int format = CMF_AUTODETECT;
-module_param(format, bool, 0444);
+module_param(format, bint, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
@@ -328,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
- cmb_data->last_update = get_clock();
+ cmb_data->last_update = get_tod_clock();
kfree(reference_buf);
return 0;
}
@@ -430,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
- cdev->private->cmb_start_time = get_clock();
+ cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
@@ -1184,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
@@ -1341,7 +1339,7 @@ module_init(init_cmf);
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright 2003 IBM Corporation\n");
+ "Copyright IBM Corp. 2003\n");
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e7..0f8a25f98b1 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,22 +1,24 @@
/*
* Channel report handling code
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
+#include <linux/wait.h>
#include <asm/crw.h>
+#include <asm/ctl_reg.h>
-static struct semaphore crw_semaphore;
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/**
* crw_register_handler() - register a channel report word handler
@@ -59,12 +61,14 @@ void crw_unregister_handler(int rsc)
static int crw_collect_info(void *unused)
{
struct crw crw[2];
- int ccode;
+ int ccode, signal;
unsigned int chain;
- int ignore;
repeat:
- ignore = down_interruptible(&crw_semaphore);
+ signal = wait_event_interruptible(crw_handler_wait_q,
+ atomic_read(&crw_nr_req) > 0);
+ if (unlikely(signal))
+ atomic_inc(&crw_nr_req);
chain = 0;
while (1) {
crw_handler_t handler;
@@ -122,25 +126,23 @@ repeat:
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
+ if (atomic_dec_and_test(&crw_nr_req))
+ wake_up(&crw_handler_wait_q);
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
- up(&crw_semaphore);
+ atomic_inc(&crw_nr_req);
+ wake_up(&crw_handler_wait_q);
}
-/*
- * Separate initcall needed for semaphore initialization since
- * crw_handle_channel_report might be called before crw_machine_check_init.
- */
-static int __init crw_init_semaphore(void)
+void crw_wait_for_channel_report(void)
{
- init_MUTEX_LOCKED(&crw_semaphore);
- return 0;
+ crw_handle_channel_report();
+ wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
-pure_initcall(crw_init_semaphore);
/*
* Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92ff88ac110..0268e5fd59b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
/*
* driver for channel subsystem
*
- * Copyright IBM Corp. 2002, 2009
+ * Copyright IBM Corp. 2002, 2010
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
+#include <linux/proc_fs.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -34,6 +35,7 @@ int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -67,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
struct cb_data *cb = data;
int rc = 0;
- idset_sch_del(cb->set, sch->schid);
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
@@ -113,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
@@ -135,37 +145,53 @@ out:
static void css_sch_todo(struct work_struct *work);
-static struct subchannel *
-css_alloc_subchannel(struct subchannel_id schid)
+static int css_sch_create_locks(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->lock);
+ kfree(sch);
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
- sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
- if (sch == NULL)
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, schid);
- if (ret < 0) {
- kfree(sch);
- return ERR_PTR(ret);
- }
+
+ ret = cio_validate_subchannel(sch, schid);
+ if (ret < 0)
+ goto err;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
return sch;
-}
-
-static void
-css_subchannel_release(struct device *dev)
-{
- struct subchannel *sch;
- sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid)) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
@@ -175,7 +201,7 @@ static int css_sch_device_register(struct subchannel *sch)
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
- ret = device_register(&sch->dev);
+ ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
@@ -193,51 +219,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(slow_path_wq, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -271,16 +252,11 @@ void css_update_ssd_info(struct subchannel *sch)
{
int ret;
- if (cio_is_console(sch->schid)) {
- /* Console is initialized too early for functions requiring
- * memory allocation. */
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- } else {
- ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
- if (ret)
- ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- ssd_register_chpids(&sch->ssd_info);
- }
+
+ ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -318,14 +294,13 @@ static const struct attribute_group *default_subch_attr_groups[] = {
NULL,
};
-static int css_register_subchannel(struct subchannel *sch)
+int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
- sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
@@ -357,23 +332,19 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
-int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid)
{
- int ret;
struct subchannel *sch;
+ int ret;
+
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
- if (cio_is_console(schid))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- return PTR_ERR(sch);
- }
ret = css_register_subchannel(sch);
- if (ret) {
- if (!cio_is_console(schid))
- put_device(&sch->dev);
- }
+ if (ret)
+ put_device(&sch->dev);
+
return ret;
}
@@ -420,7 +391,11 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
+ if (stsch_err(schid, &schib)) {
+ /* Subchannel is not provided. */
+ return -ENXIO;
+ }
+ if (!css_sch_is_valid(&schib)) {
/* Unusable - ignore. */
return 0;
}
@@ -464,6 +439,66 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
@@ -519,10 +554,16 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
case -ENOMEM:
case -EIO:
/* These should abort looping */
+ spin_lock_irq(&slow_subchannel_lock);
+ idset_sch_del_subseq(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
break;
default:
rc = 0;
}
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
}
return rc;
}
@@ -542,8 +583,8 @@ static void css_slow_path_func(struct work_struct *unused)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static DECLARE_WORK(slow_path_work, css_slow_path_func);
-struct workqueue_struct *slow_path_wq;
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
@@ -552,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -563,7 +604,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -576,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-void css_schedule_eval_all_unreg(void)
+void css_schedule_eval_all_unreg(unsigned long delay)
{
unsigned long flags;
struct idset *unreg_set;
@@ -594,20 +635,21 @@ void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
- flush_workqueue(slow_path_wq);
+ flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
- css_schedule_eval_all_unreg();
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
@@ -617,6 +659,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
+ struct subchannel *sch;
if (overflow) {
css_schedule_eval_all();
@@ -634,8 +677,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
- mchk_schid.ssid = (crw1->rsid >> 8) & 3;
+ mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+ if (crw0->erc == CRW_ERC_PMOD) {
+ sch = get_subchannel_by_schid(mchk_schid);
+ if (sch) {
+ css_update_ssd_info(sch);
+ put_device(&sch->dev);
+ }
+ }
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
@@ -647,6 +697,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
+ struct cpuid cpu_id;
+
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -657,8 +709,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
- css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -701,7 +754,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
@@ -737,7 +790,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
- ret = cio_create_sch_lock(css->pseudo_subchannel);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
@@ -747,7 +800,7 @@ static int __init setup_css(int nr)
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_clock() >> 32);
+ tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
@@ -786,7 +839,6 @@ static struct notifier_block css_reboot_notifier = {
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- void *secm_area;
int ret, i;
switch (event) {
@@ -802,15 +854,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- secm_area = (void *)get_zeroed_page(GFP_KERNEL |
- GFP_DMA);
- if (secm_area) {
- if (__chsc_do_secm(css, 0, secm_area))
- ret = NOTIFY_BAD;
- free_page((unsigned long)secm_area);
- } else
- ret = NOTIFY_BAD;
-
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
break;
@@ -826,15 +871,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- secm_area = (void *)get_zeroed_page(GFP_KERNEL |
- GFP_DMA);
- if (secm_area) {
- if (__chsc_do_secm(css, 1, secm_area))
- ret = NOTIFY_BAD;
- free_page((unsigned long)secm_area);
- } else
- ret = NOTIFY_BAD;
-
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
@@ -852,32 +890,23 @@ static struct notifier_block css_power_notifier = {
/*
* Now that the driver core is running, we can setup our channel subsystem.
- * The struct subchannel's are created during probing (except for the
- * static console subchannel).
+ * The struct subchannel's are created during probing.
*/
static int __init css_bus_init(void)
{
int ret, i;
- ret = chsc_determine_css_characteristics();
- if (ret == -ENOMEM)
- goto out;
-
- ret = chsc_alloc_sei_area();
+ ret = chsc_init();
if (ret)
- goto out;
+ return ret;
+ chsc_determine_css_characteristics();
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
- switch (ret) {
- case 0: /* Success. */
- max_ssid = __MAX_SSID;
- break;
- case -ENOMEM:
- goto out;
- default:
+ if (ret)
max_ssid = 0;
- }
+ else /* Success. */
+ max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
@@ -957,9 +986,9 @@ out_unregister:
}
bus_unregister(&css_bus_type);
out:
- crw_unregister_handler(CRW_RSC_CSS);
- chsc_free_sei_area();
+ crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
@@ -979,9 +1008,9 @@ static void __init css_bus_cleanup(void)
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
- crw_unregister_handler(CRW_RSC_CSS);
- chsc_free_sei_area();
+ crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
isc_unregister(IO_SCH_ISC);
}
@@ -992,12 +1021,21 @@ static int __init channel_subsystem_init(void)
ret = css_bus_init();
if (ret)
return ret;
-
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
ret = io_subchannel_init();
if (ret)
- css_bus_cleanup();
+ goto out_wq;
return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
}
subsys_initcall(channel_subsystem_init);
@@ -1006,25 +1044,85 @@ static int css_settle(struct device_driver *drv, void *unused)
struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle)
- cssdrv->settle();
+ return cssdrv->settle();
return 0;
}
+int css_complete_work(void)
+{
+ int ret;
+
+ /* Wait for the evaluation of subchannels to finish. */
+ ret = wait_event_interruptible(css_eval_wq,
+ atomic_read(&css_eval_scheduled) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
/*
* Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts.
*/
static int __init channel_subsystem_init_sync(void)
{
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
- /* Wait for the evaluation of subchannels to finish. */
- wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
- /* Wait for the subchannel type specific initialization to finish */
- return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+ css_complete_work();
+ return 0;
}
subsys_initcall_sync(channel_subsystem_init_sync);
+void channel_subsystem_reinit(void)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+
+ chsc_enable_facility(CHSC_SDA_OC_MSS);
+ chp_id_for_each(&chpid) {
+ chp = chpid_to_chp(chpid);
+ if (chp)
+ chp_update_desc(chp);
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ /* Handle pending CRW's. */
+ crw_wait_for_channel_report();
+ ret = css_complete_work();
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
+ .write = cio_settle_write,
+ .llseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_settle", S_IWUSR, NULL,
+ &cio_settle_proc_fops);
+ if (!entry)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
@@ -1142,13 +1240,14 @@ static int css_pm_restore(struct device *dev)
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
+ css_update_ssd_info(sch);
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->restore ? drv->restore(sch) : 0;
}
-static struct dev_pm_ops css_pm_ops = {
+static const struct dev_pm_ops css_pm_ops = {
.prepare = css_pm_prepare,
.complete = css_pm_complete,
.freeze = css_pm_freeze,
@@ -1156,7 +1255,7 @@ static struct dev_pm_ops css_pm_ops = {
.restore = css_pm_restore,
};
-struct bus_type css_bus_type = {
+static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
@@ -1175,9 +1274,7 @@ struct bus_type css_bus_type = {
*/
int css_driver_register(struct css_driver *cdrv)
{
- cdrv->drv.name = cdrv->name;
cdrv->drv.bus = &css_bus_type;
- cdrv->drv.owner = cdrv->owner;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
@@ -1195,4 +1292,3 @@ void css_driver_unregister(struct css_driver *cdrv)
EXPORT_SYMBOL_GPL(css_driver_unregister);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index fe84b92cde6..2c9107e2025 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -63,7 +63,6 @@ struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
- * @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
@@ -78,10 +77,8 @@ struct chp_link;
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @settle: wait for asynchronous work to finish
- * @name: name of the device driver
*/
struct css_driver {
- struct module *owner;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
@@ -95,22 +92,17 @@ struct css_driver {
int (*freeze)(struct subchannel *);
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
- void (*settle)(void);
- const char *name;
+ int (*settle)(void);
};
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
-/*
- * all css_drivers have the css_bus_type
- */
-extern struct bus_type css_bus_type;
-
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
-extern int css_probe_device(struct subchannel_id);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
@@ -118,12 +110,8 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
-#define __MAX_SUBCHANNEL 65535
-#define __MAX_SSID 3
-
struct channel_subsystem {
u8 cssid;
int valid;
@@ -140,18 +128,19 @@ struct channel_subsystem {
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
-extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
+int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
struct schib;
int css_sch_is_valid(struct schib *);
-extern struct workqueue_struct *slow_path_wq;
+extern struct workqueue_struct *cio_work_q;
void css_wait_for_slow_path(void);
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9fecfb4223a..dfef5e63cb7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device.c
* bus driver for ccw devices
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -20,7 +19,9 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <linux/timer.h>
+#include <linux/kernel_stat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -36,12 +37,17 @@
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
+#include "chsc.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
@@ -126,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-struct bus_type ccw_bus_type;
-
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
@@ -136,9 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
-struct workqueue_struct *ccw_device_work;
-wait_queue_head_t ccw_device_init_wq;
-atomic_t ccw_device_init_count;
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -159,17 +160,24 @@ static int io_subchannel_prepare(struct subchannel *sch)
return 0;
}
-static void io_subchannel_settle(void)
+static int io_subchannel_settle(void)
{
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- flush_workqueue(ccw_device_work);
+ int ret;
+
+ ret = wait_event_interruptible(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ return 0;
}
static struct css_driver io_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
.subchannel_type = io_subchannel_ids,
- .name = "io_subchannel",
.irq = io_subchannel_irq,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
@@ -184,31 +192,14 @@ int __init io_subchannel_init(void)
{
int ret;
- init_waitqueue_head(&ccw_device_init_wq);
- atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
-
- ccw_device_work = create_singlethread_workqueue("cio");
- if (!ccw_device_work)
- return -ENOMEM;
- slow_path_wq = create_singlethread_workqueue("kslowcrw");
- if (!slow_path_wq) {
- ret = -ENOMEM;
- goto out_err;
- }
- if ((ret = bus_register (&ccw_bus_type)))
- goto out_err;
-
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
ret = css_driver_register(&io_subchannel_driver);
if (ret)
- goto out_err;
+ bus_unregister(&ccw_bus_type);
- return 0;
-out_err:
- if (ccw_device_work)
- destroy_workqueue(ccw_device_work);
- if (slow_path_wq)
- destroy_workqueue(slow_path_wq);
return ret;
}
@@ -342,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret != 0)
return ret;
}
- cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -455,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
+
+ spin_lock_irq(cdev->ccwlock);
cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
rollback:
@@ -496,9 +490,11 @@ static int online_store_handle_offline(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
spin_unlock_irq(cdev->ccwlock);
- } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
- return 0;
+ return -EINVAL;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -515,8 +511,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
- return 0;
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
}
static int online_store_handle_online(struct ccw_device *cdev, int force)
@@ -546,15 +542,19 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if (!dev_fsm_final_state(cdev) &&
- cdev->private->state != DEV_STATE_DISCONNECTED)
- return -EAGAIN;
+ /* Prevent conflict between multiple on-/offline processing requests. */
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
-
- if (cdev->drv && !try_module_get(cdev->drv->owner)) {
- atomic_set(&cdev->private->onoff, 0);
- return -EINVAL;
+ /* Prevent conflict between internal I/Os and on-/offline processing. */
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /* Prevent conflict between pending work and on-/offline processing.*/
+ if (work_pending(&cdev->private->todo_work)) {
+ ret = -EAGAIN;
+ goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
@@ -562,10 +562,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
ret = 0;
} else {
force = 0;
- ret = strict_strtoul(buf, 16, &i);
+ ret = kstrtoul(buf, 16, &i);
}
if (ret)
goto out;
+
+ device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
@@ -576,9 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
+ device_unlock(dev);
+
out:
- if (cdev->drv)
- module_put(cdev->drv->owner);
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -608,6 +610,33 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
}
}
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warning("Logging for subchannel 0.%x.%04x failed with "
+ "errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -615,10 +644,14 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
+ &dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
NULL,
};
@@ -645,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
NULL,
};
-/* this is a simple abstraction for device_register that sets the
- * correct bus type and adds the bus specific files */
-static int ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_add(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
- int ret;
dev->bus = &ccw_bus_type;
- ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- if (ret)
- return ret;
return device_add(dev);
}
@@ -668,7 +694,17 @@ static int match_dev_id(struct device *dev, void *data)
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
-static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ * If a device is found its reference count is increased and returned;
+ * else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{
struct device *dev;
@@ -676,6 +712,7 @@ static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
return dev ? to_ccwdev(dev) : NULL;
}
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
@@ -720,21 +757,46 @@ static void ccw_device_todo(struct work_struct *work);
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
- cdev->private->cdev = cdev;
- atomic_set(&cdev->private->onoff, 0);
+ struct ccw_device_private *priv = cdev->private;
+ int ret;
+
+ priv->cdev = cdev;
+ priv->int_class = IRQIO_CIO;
+ priv->state = DEV_STATE_NOT_OPER;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+ priv->schid = sch->schid;
+
+ INIT_WORK(&priv->todo_work, ccw_device_todo);
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
+
+ atomic_set(&priv->onoff, 0);
+ cdev->ccwlock = sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
- INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ goto out_put;
if (!get_device(&sch->dev)) {
- /* Release reference from device_initialize(). */
- put_device(&cdev->dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put;
}
- cdev->private->flags.initialized = 1;
+ priv->flags.initialized = 1;
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
return 0;
+
+out_put:
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ return ret;
}
static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -774,7 +836,7 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
static void io_subchannel_register(struct ccw_device *cdev)
{
struct subchannel *sch;
- int ret;
+ int ret, adjust_init_count = 1;
unsigned long flags;
sch = to_subchannel(cdev->dev.parent);
@@ -803,6 +865,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
}
+ adjust_init_count = 0;
goto out;
}
/*
@@ -812,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
/* make it known to the system */
- ret = ccw_device_register(cdev);
+ ret = ccw_device_add(cdev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
@@ -828,7 +891,7 @@ out:
cdev->private->flags.recog_done = 1;
wake_up(&cdev->private->wait_q);
out_err:
- if (atomic_dec_and_test(&ccw_device_init_count))
+ if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
@@ -877,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev)
static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
- struct ccw_device_private *priv;
-
- cdev->ccwlock = sch->lock;
-
- /* Init private data. */
- priv = cdev->private;
- priv->dev_id.devno = sch->schib.pmcw.dev;
- priv->dev_id.ssid = sch->schid.ssid;
- priv->schid = sch->schid;
- priv->state = DEV_STATE_NOT_OPER;
- INIT_LIST_HEAD(&priv->cmb_list);
- init_waitqueue_head(&priv->wait_q);
- init_timer(&priv->timer);
-
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
}
@@ -983,6 +1031,8 @@ static void io_subchannel_irq(struct subchannel *sch)
CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+ else
+ inc_irq_stat(IRQIO_CIO);
}
void io_subchannel_init_config(struct subchannel *sch)
@@ -1015,6 +1065,7 @@ static void io_subchannel_init_fields(struct subchannel *sch)
*/
static int io_subchannel_probe(struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
int rc;
@@ -1034,19 +1085,14 @@ static int io_subchannel_probe(struct subchannel *sch)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
- cdev->dev.groups = ccwdev_attr_groups;
- device_initialize(&cdev->dev);
- cdev->private->flags.initialized = 1;
- ccw_device_register(cdev);
- /*
- * Check if the device is already online. If it is
- * the reference count needs to be corrected since we
- * didn't obtain a reference in ccw_device_set_online.
- */
- if (cdev->private->state != DEV_STATE_NOT_OPER &&
- cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_BOXED)
- get_device(&cdev->dev);
+ rc = ccw_device_add(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
@@ -1058,10 +1104,11 @@ static int io_subchannel_probe(struct subchannel *sch)
if (rc)
goto out_schedule;
/* Allocate I/O subchannel private data. */
- sch->private = kzalloc(sizeof(struct io_subchannel_private),
- GFP_KERNEL | GFP_DMA);
- if (!sch->private)
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
goto out_schedule;
+
+ set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1075,6 +1122,7 @@ out_schedule:
static int
io_subchannel_remove (struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
@@ -1084,11 +1132,12 @@ io_subchannel_remove (struct subchannel *sch)
/* Set ccw device to not operational and drop reference. */
spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
out_free:
- kfree(sch->private);
+ kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
@@ -1132,6 +1181,7 @@ err:
static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
+ struct ccw_device *cdev = sch_get_cdev(sch);
int mask;
mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1141,22 +1191,30 @@ static int io_subchannel_chp_event(struct subchannel *sch,
case CHP_VARY_OFF:
sch->opm &= ~mask;
sch->lpm &= ~mask;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_VARY_ON:
sch->opm |= mask;
sch->lpm |= mask;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
if (cio_update_schib(sch))
return -ENODEV;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
if (cio_update_schib(sch))
return -ENODEV;
sch->lpm |= mask & sch->opm;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
}
@@ -1181,6 +1239,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
while (ret == -EBUSY) {
cdev->private->state = DEV_STATE_QUIESCE;
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
@@ -1280,10 +1339,12 @@ static int purge_fn(struct device *dev, void *data)
spin_lock_irq(cdev->ccwlock);
if (is_blacklisted(id->ssid, id->devno) &&
- (cdev->private->state == DEV_STATE_OFFLINE)) {
+ (cdev->private->state == DEV_STATE_OFFLINE) &&
+ (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
id->devno);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
}
spin_unlock_irq(cdev->ccwlock);
/* Abort loop in case of pending signal. */
@@ -1348,7 +1409,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
/* Not operational. */
if (!cdev)
return IO_SCH_UNREG;
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_ORPH_UNREG;
}
@@ -1356,19 +1417,21 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
if (!cdev)
return IO_SCH_ATTACH;
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG_ATTACH;
return IO_SCH_ORPH_ATTACH;
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
- if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
return IO_SCH_REPROBE;
- if (cdev->online)
+ if (cdev->online && !cdev->private->flags.resuming)
return IO_SCH_VERIFY;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return IO_SCH_UNREG_ATTACH;
return IO_SCH_NOP;
}
@@ -1424,7 +1487,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
break;
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
- if (cdev)
+ if (!cdev)
+ break;
+ if (cdev->private->state == DEV_STATE_SENSE_ID) {
+ /*
+ * Note: delayed work triggered by this event
+ * and repeated calls to sch_event are synchronized
+ * by the above check for work_pending(cdev).
+ */
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else
ccw_device_set_notoper(cdev);
break;
case IO_SCH_NOP:
@@ -1447,6 +1519,14 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
goto out;
break;
case IO_SCH_UNREG_ATTACH:
+ spin_lock_irqsave(sch->lock, flags);
+ if (cdev->private->flags.resuming) {
+ /* Device will be handled later. */
+ rc = 0;
+ goto out_unlock;
+ }
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
/* Unregister ccw device. */
ccw_device_unregister(cdev);
break;
@@ -1457,7 +1537,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
switch (action) {
case IO_SCH_ORPH_UNREG:
case IO_SCH_UNREG:
- css_sch_device_unregister(sch);
+ if (!cdev || !cdev->private->flags.resuming)
+ css_sch_device_unregister(sch);
break;
case IO_SCH_ORPH_ATTACH:
case IO_SCH_UNREG_ATTACH:
@@ -1492,86 +1573,121 @@ out:
return rc;
}
-#ifdef CONFIG_CCW_CONSOLE
-static struct ccw_device console_cdev;
-static struct ccw_device_private console_private;
-static int console_cdev_in_use;
-
-static DEFINE_SPINLOCK(ccw_console_lock);
-
-spinlock_t * cio_get_console_lock(void)
+static void ccw_device_set_int_class(struct ccw_device *cdev)
{
- return &ccw_console_lock;
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
}
-static int ccw_device_console_enable(struct ccw_device *cdev,
- struct subchannel *sch)
+#ifdef CONFIG_CCW_CONSOLE
+int __init ccw_device_enable_console(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
int rc;
- /* Attach subchannel private data. */
- sch->private = cio_get_console_priv();
- memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
- /* Initialize the ccw_device structure. */
- cdev->dev.parent= &sch->dev;
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- rc = -EIO;
- if (cdev->private->state != DEV_STATE_OFFLINE)
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
goto out_unlock;
- ccw_device_online(cdev);
+
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- if (cdev->private->state != DEV_STATE_ONLINE)
- goto out_unlock;
- rc = 0;
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return rc;
}
-struct ccw_device *
-ccw_device_probe_console(void)
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
- int ret;
- if (xchg(&console_cdev_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch = cio_probe_console();
- if (IS_ERR(sch)) {
- console_cdev_in_use = 0;
- return (void *) sch;
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv) {
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
- memset(&console_cdev, 0, sizeof(struct ccw_device));
- memset(&console_private, 0, sizeof(struct ccw_device_private));
- console_cdev.private = &console_private;
- console_private.cdev = &console_cdev;
- ret = ccw_device_console_enable(&console_cdev, sch);
- if (ret) {
- cio_release_console();
- console_cdev_in_use = 0;
- return ERR_PTR(ret);
+ set_io_private(sch, io_priv);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+ }
+ cdev->drv = drv;
+ ccw_device_set_int_class(cdev);
+ return cdev;
+}
+
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
}
- console_cdev.online = 1;
- return &console_cdev;
}
static int ccw_device_pm_restore(struct device *dev);
-int ccw_device_force_console(void)
+int ccw_device_force_console(struct ccw_device *cdev)
{
- if (!console_cdev_in_use)
- return -ENODEV;
- return ccw_device_pm_restore(&console_cdev.dev);
+ return ccw_device_pm_restore(&cdev->dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif
@@ -1605,15 +1721,9 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
const char *bus_id)
{
struct device *dev;
- struct device_driver *drv;
- drv = get_driver(&cdrv->driver);
- if (!drv)
- return NULL;
-
- dev = driver_find_device(drv, NULL, (void *)bus_id,
+ dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
__ccwdev_check_busid);
- put_driver(drv);
return dev ? to_ccwdev(dev) : NULL;
}
@@ -1636,19 +1746,18 @@ ccw_device_probe (struct device *dev)
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
-
+ ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
-
if (ret) {
cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
return ret;
}
return 0;
}
-static int
-ccw_device_remove (struct device *dev)
+static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
@@ -1656,9 +1765,10 @@ ccw_device_remove (struct device *dev)
if (cdrv->remove)
cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
- spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
@@ -1671,9 +1781,12 @@ ccw_device_remove (struct device *dev)
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
}
@@ -1778,26 +1891,43 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- if (cio_is_console(sch->schid))
- goto out;
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ goto out_unlock;
+ }
/*
* While we were sleeping, devices may have gone or become
* available again. Kick re-detection.
*/
- spin_lock_irq(sch->lock);
cdev->private->flags.resuming = 1;
+ cdev->private->path_new_mask = LPM_ANYPATH;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ spin_unlock_irq(sch->lock);
+ css_wait_for_slow_path();
+
+ /* cdev may have been moved to a different subchannel. */
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+
ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED);
-out:
+ spin_lock_irq(sch->lock);
+
+out_unlock:
cdev->private->flags.resuming = 0;
+ spin_unlock_irq(sch->lock);
}
static int resume_handle_boxed(struct ccw_device *cdev)
{
cdev->private->state = DEV_STATE_BOXED;
- if (ccw_device_notify(cdev, CIO_BOXED))
+ if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
@@ -1806,7 +1936,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
static int resume_handle_disc(struct ccw_device *cdev)
{
cdev->private->state = DEV_STATE_DISCONNECTED;
- if (ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
@@ -1815,40 +1945,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
static int ccw_device_pm_restore(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
- int ret = 0, cm_enabled;
+ struct subchannel *sch;
+ int ret = 0;
__ccw_device_pm_restore(cdev);
+ sch = to_subchannel(cdev->dev.parent);
spin_lock_irq(sch->lock);
- if (cio_is_console(sch->schid)) {
- cio_enable_subchannel(sch, (u32)(addr_t)sch);
- spin_unlock_irq(sch->lock);
+ if (cio_is_console(sch->schid))
goto out_restore;
- }
- cdev->private->flags.donotify = 0;
+
/* check recognition results */
switch (cdev->private->state) {
case DEV_STATE_OFFLINE:
+ case DEV_STATE_ONLINE:
+ cdev->private->flags.donotify = 0;
break;
case DEV_STATE_BOXED:
ret = resume_handle_boxed(cdev);
- spin_unlock_irq(sch->lock);
if (ret)
- goto out;
+ goto out_unlock;
goto out_restore;
- case DEV_STATE_DISCONNECTED:
- goto out_disc_unlock;
default:
- goto out_unreg_unlock;
- }
- /* check if the device id has changed */
- if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
- "changed from %04x to %04x)\n",
- sch->schid.ssid, sch->schid.sch_no,
- cdev->private->dev_id.devno,
- sch->schib.pmcw.dev);
- goto out_unreg_unlock;
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
}
/* check if the device type has changed */
if (!ccw_device_test_sense_data(cdev)) {
@@ -1857,24 +1978,30 @@ static int ccw_device_pm_restore(struct device *dev)
ret = -ENODEV;
goto out_unlock;
}
- if (!cdev->online) {
- ret = 0;
+ if (!cdev->online)
goto out_unlock;
- }
- ret = ccw_device_online(cdev);
- if (ret)
- goto out_disc_unlock;
- cm_enabled = cdev->private->cmb != NULL;
+ if (ccw_device_online(cdev)) {
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
spin_unlock_irq(sch->lock);
-
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- if (cdev->private->state != DEV_STATE_ONLINE) {
- spin_lock_irq(sch->lock);
- goto out_disc_unlock;
+ spin_lock_irq(sch->lock);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ ret = -ENODEV;
+ goto out_unlock;
}
- if (cm_enabled) {
+
+ /* reenable cmf, if needed */
+ if (cdev->private->cmb) {
+ spin_unlock_irq(sch->lock);
ret = ccw_set_cmf(cdev, 1);
+ spin_lock_irq(sch->lock);
if (ret) {
CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
"(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -1884,27 +2011,17 @@ static int ccw_device_pm_restore(struct device *dev)
}
out_restore:
+ spin_unlock_irq(sch->lock);
if (cdev->online && cdev->drv && cdev->drv->restore)
ret = cdev->drv->restore(cdev);
-out:
return ret;
-out_disc_unlock:
- ret = resume_handle_disc(cdev);
- spin_unlock_irq(sch->lock);
- if (ret)
- return ret;
- goto out_restore;
-
-out_unreg_unlock:
- ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
- ret = -ENODEV;
out_unlock:
spin_unlock_irq(sch->lock);
return ret;
}
-static struct dev_pm_ops ccw_pm_ops = {
+static const struct dev_pm_ops ccw_pm_ops = {
.prepare = ccw_device_pm_prepare,
.complete = ccw_device_pm_complete,
.freeze = ccw_device_pm_freeze,
@@ -1912,7 +2029,7 @@ static struct dev_pm_ops ccw_pm_ops = {
.restore = ccw_device_pm_restore,
};
-struct bus_type ccw_bus_type = {
+static struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
@@ -1935,8 +2052,6 @@ int ccw_driver_register(struct ccw_driver *cdriver)
struct device_driver *drv = &cdriver->driver;
drv->bus = &ccw_bus_type;
- drv->name = cdriver->name;
- drv->owner = cdriver->owner;
return driver_register(drv);
}
@@ -1952,16 +2067,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-/* Helper func for qdio. */
-struct subchannel_id
-ccw_device_get_subchannel_id(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- return sch->schid;
-}
-
static void ccw_device_todo(struct work_struct *work)
{
struct ccw_device_private *priv;
@@ -2027,18 +2132,30 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
/* Get workqueue ref. */
if (!get_device(&cdev->dev))
return;
- if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&cdev->dev);
}
}
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
-EXPORT_SYMBOL(ccw_bus_type);
-EXPORT_SYMBOL(ccw_device_work);
-EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index bcfe13e4263..8d1d2987317 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -2,9 +2,10 @@
#define S390_DEVICE_H
#include <asm/ccwdev.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
-
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
#include "io_sch.h"
/*
@@ -56,7 +57,16 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
- dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
+ int state = cdev->private->state;
+
+ if (dev_event == DEV_EVENT_INTERRUPT) {
+ if (state == DEV_STATE_ONLINE)
+ inc_irq_stat(cdev->private->int_class);
+ else if (state != DEV_STATE_CMFCHANGE &&
+ state != DEV_STATE_CMFUPDATE)
+ inc_irq_stat(IRQIO_CIO);
+ }
+ dev_jumptable[state][dev_event](cdev, dev_event);
}
/*
@@ -71,9 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
-extern struct workqueue_struct *ccw_device_work;
-extern wait_queue_head_t ccw_device_init_wq;
-extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
@@ -91,6 +98,7 @@ int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -131,10 +139,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
-/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
-extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
-extern struct bus_type ccw_bus_type;
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ae760658a13..0bc902b3cd8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -45,10 +44,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
- "device information:\n", get_clock());
+ "device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
@@ -174,7 +173,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
ret = cio_clear (sch);
return (ret == 0) ? -EBUSY : ret;
}
- panic("Can't stop i/o on subchannel.\n");
+ /* Function was unsuccessful */
+ CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
+ return -EIO;
}
void ccw_device_update_sense_data(struct ccw_device *cdev)
@@ -313,23 +315,49 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
}
+/**
+ * ccw_device_notify() - inform the device's driver about an event
+ * @cdev: device for which an event occurred
+ * @event: event that occurred
+ *
+ * Returns:
+ * -%EINVAL if the device is offline or has no driver.
+ * -%EOPNOTSUPP if the device's driver has no notifier registered.
+ * %NOTIFY_OK if the driver wants to keep the device.
+ * %NOTIFY_BAD if the driver doesn't want to keep the device.
+ */
int ccw_device_notify(struct ccw_device *cdev, int event)
{
+ int ret = -EINVAL;
+
if (!cdev->drv)
- return 0;
+ goto out;
if (!cdev->online)
- return 0;
+ goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event);
- return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
+ if (!cdev->drv->notify) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (cdev->drv->notify(cdev, event))
+ ret = NOTIFY_OK;
+ else
+ ret = NOTIFY_BAD;
+out:
+ return ret;
}
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
- if (ccw_device_notify(cdev, CIO_OPER)) {
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+ /* Save indication for new paths. */
+ cdev->private->path_new_mask = sch->vpm;
return;
}
/* Driver doesn't want device back. */
@@ -361,14 +389,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
- if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
+ if (cdev->online &&
+ ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
@@ -378,9 +407,10 @@ ccw_device_done(struct ccw_device *cdev, int state)
CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
- if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
- else
+ } else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
break;
@@ -439,8 +469,52 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
}
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int path_event[8];
+ int chp, mask;
+
+ for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+ path_event[chp] = PE_NONE;
+ if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+ path_event[chp] |= PE_PATH_GONE;
+ if (mask & cdev->private->path_new_mask & sch->vpm)
+ path_event[chp] |= PE_PATH_AVAILABLE;
+ if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+ path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+ }
+ if (cdev->online && cdev->drv->path_event)
+ cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+ cdev->private->path_gone_mask = 0;
+ cdev->private->path_new_mask = 0;
+ cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -463,18 +537,15 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
+ ccw_device_report_path_events(cdev);
break;
case -ETIME:
case -EUSERS:
@@ -493,6 +564,7 @@ callback:
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
+ ccw_device_reset_path_events(cdev);
}
/*
@@ -586,7 +658,7 @@ ccw_device_offline(struct ccw_device *cdev)
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
- if (!ccw_device_notify(cdev, CIO_GONE))
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
@@ -630,7 +702,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
(scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
- * to the device driver. Can't do path verfication now,
+ * to the device driver. Can't do path verification now,
* delay until final status was delivered.
*/
cdev->private->flags.doverify = 1;
@@ -667,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb;
int is_cmd;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
@@ -711,13 +783,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV)
+ if (ret)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -732,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
@@ -781,9 +854,6 @@ call_handler:
static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
@@ -814,6 +884,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 78a0b43862c..d4fa30541a3 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,7 +1,7 @@
/*
* CCW device SENSE ID I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da84543dfe..f3c417943da 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -418,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev)
int
ccw_device_call_handler(struct ccw_device *cdev)
{
- struct subchannel *sch;
unsigned int stctl;
int ending_status;
- sch = to_subchannel(cdev->dev.parent);
-
/*
* we allow for the device action handler if .
* - we received ending status
@@ -566,14 +563,23 @@ out_unlock:
return rc;
}
-void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+/**
+ * chp_get_chp_desc - return newly allocated channel-path descriptor
+ * @cdev: device to obtain the descriptor for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel path. Return %NULL on error.
+ */
+struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
+ int chp_idx)
{
struct subchannel *sch;
struct chp_id chpid;
sch = to_subchannel(cdev->dev.parent);
chp_id_init(&chpid);
- chpid.id = sch->schib.pmcw.chpid[chp_no];
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
return chp_get_chp_desc(chpid);
}
@@ -608,11 +614,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -687,6 +703,52 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
+ * ccw_device_get_mdc - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ int mdc = 0, i;
+
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (mask)
+ mask &= sch->lpm;
+ else
+ mask = sch->lpm;
+
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ if (!(mask & (0x80 >> i)))
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
+ return 0;
+ }
+ if (!chp->desc_fmt1.r)
+ mdc = 1;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
+ }
+
+ return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
* ccw_device_tm_intrg - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
@@ -708,14 +770,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
-// FIXME: these have to go:
-
-int
-_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+/**
+ * ccw_device_get_schid - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
- return cdev->private->schid.sch_no;
-}
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -730,5 +796,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index aad188e43b4..37ada05e82a 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,7 +1,7 @@
/*
* CCW device PGID and path verification I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -23,6 +23,8 @@
#define PGID_RETRIES 256
#define PGID_TIMEOUT (10 * HZ)
+static void verify_start(struct ccw_device *cdev);
+
/*
* Process path verification data and report result.
*/
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- /* Adjust lpm. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
sch->vpm |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev);
return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
req->cp = cp;
}
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
/*
* Perform establish/resign SET PGID on a single path.
*/
@@ -142,7 +196,7 @@ static void spid_do(struct ccw_device *cdev)
u8 fn;
/* Use next available path that is not already in correct state. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
if (!req->lpm)
goto out_nopath;
/* Channel program setup. */
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
return;
out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
-static void verify_start(struct ccw_device *cdev);
-
/*
* Process SET PGID request result for a single path.
*/
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
case 0:
sch->vpm |= req->lpm & sch->opm;
break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
case -EOPNOTSUPP:
if (cdev->private->flags.mpath) {
@@ -208,10 +270,22 @@ static void spid_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
req->callback = spid_callback;
spid_do(cdev);
}
+static int pgid_is_reset(struct pgid *p)
+{
+ char *c;
+
+ for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+ if (*c != 0)
+ return 0;
+ }
+ return 1;
+}
+
static int pgid_cmp(struct pgid *p1, struct pgid *p2)
{
return memcmp((char *) p1 + 1, (char *) p2 + 1,
@@ -222,7 +296,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
- int *mismatch, int *reserved, int *reset)
+ int *mismatch, u8 *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->pgid[0];
struct pgid *first = NULL;
@@ -236,10 +310,9 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
- *reserved = 1;
- if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
- /* A PGID was reset. */
- *reset = 1;
+ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
continue;
}
if (!first) {
@@ -254,15 +327,15 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
*p = first;
}
-static u8 pgid_to_vpm(struct ccw_device *cdev)
+static u8 pgid_to_donepm(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int i;
int lpm;
- u8 vpm = 0;
+ u8 donepm = 0;
- /* Set VPM bits for paths which are already in the target state. */
+ /* Set bits for paths which are already in the target state. */
for (i = 0; i < 8; i++) {
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
@@ -282,10 +355,10 @@ static u8 pgid_to_vpm(struct ccw_device *cdev)
if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
continue;
}
- vpm |= lpm;
+ donepm |= lpm;
}
- return vpm;
+ return donepm;
}
static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
@@ -305,29 +378,38 @@ static void snid_done(struct ccw_device *cdev, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int mismatch = 0;
- int reserved = 0;
- int reset = 0;
+ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
if (rc)
goto out;
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
- if (reserved)
+ if (reserved == cdev->private->pgid_valid_mask)
rc = -EUSERS;
else if (mismatch)
rc = -EOPNOTSUPP;
else {
- sch->vpm = pgid_to_vpm(cdev);
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
pgid_fill(cdev, pgid);
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
- cdev->private->pgid_valid_mask, sch->vpm, mismatch,
- reserved, reset);
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
/* Anything left to do? */
- if (sch->vpm == sch->schib.pmcw.pam) {
+ if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
return;
}
@@ -369,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
+ int ret;
- /* Adjust lpm if paths are not set in pam. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
snid_build_cp(cdev);
@@ -379,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
return;
out_nopath:
- snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
}
/*
@@ -389,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
cdev->private->pgid_valid_mask |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev);
return;
@@ -411,11 +511,20 @@ static void verify_start(struct ccw_device *cdev)
struct ccw_dev_id *devid = &cdev->private->dev_id;
sch->vpm = 0;
+ sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
if (cdev->private->flags.pgroup) {
CIO_TRACE_EVENT(4, "snid");
CIO_HEX_EVENT(4, devid, sizeof(*devid));
@@ -444,9 +553,6 @@ void ccw_device_verify_start(struct ccw_device *cdev)
{
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
- /* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
- cdev->private->pgid_valid_mask = 0;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
@@ -454,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
@@ -500,6 +607,7 @@ void ccw_device_disband_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
req->callback = disband_callback;
fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath)
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 66d8066ef22..15b56a15db1 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,8 +1,5 @@
/*
- * drivers/s390/cio/device_status.c
- *
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 2002
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 00000000000..c4f7bf3e24c
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,418 @@
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (5 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do { \
+ debug_text_event(eadm_debug, imp, txt); \
+ } while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+ if (!debug_level_enabled(eadm_debug, level))
+ return;
+ while (length > 0) {
+ debug_event(eadm_debug, level, data, length);
+ length -= eadm_debug->buf_size;
+ data += eadm_debug->buf_size;
+ }
+}
+
+static void orb_init(union orb *orb)
+{
+ memset(orb, 0, sizeof(union orb));
+ orb->eadm.compat1 = 1;
+ orb->eadm.compat2 = 1;
+ orb->eadm.fmt = 1;
+ orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+ union orb *orb = &get_eadm_private(sch)->orb;
+ int cc;
+
+ orb_init(orb);
+ orb->eadm.aob = (u32)__pa(aob);
+ orb->eadm.intparm = (u32)(addr_t)sch;
+ orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+ EADM_LOG(6, "start");
+ EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+ break;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+ int cc;
+
+ cc = csch(sch->schid);
+ if (cc)
+ return -ENODEV;
+
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+}
+
+static void eadm_subchannel_timeout(unsigned long data)
+{
+ struct subchannel *sch = (struct subchannel *) data;
+
+ spin_lock_irq(sch->lock);
+ EADM_LOG(1, "timeout");
+ EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+ if (eadm_subchannel_clear(sch))
+ EADM_LOG(0, "clear failed");
+ spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ if (expires == 0) {
+ del_timer(&private->timer);
+ return;
+ }
+ if (timer_pending(&private->timer)) {
+ if (mod_timer(&private->timer, jiffies + expires))
+ return;
+ }
+ private->timer.function = eadm_subchannel_timeout;
+ private->timer.data = (unsigned long) sch;
+ private->timer.expires = jiffies + expires;
+ add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+ struct irb *irb = &__get_cpu_var(cio_irb);
+ int error = 0;
+
+ EADM_LOG(6, "irq");
+ EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+ inc_irq_stat(IRQIO_ADM);
+
+ if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+ && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+ error = -EIO;
+
+ if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+ error = -ETIMEDOUT;
+
+ eadm_subchannel_set_timeout(sch, 0);
+
+ if (private->state != EADM_BUSY) {
+ EADM_LOG(1, "irq unsol");
+ EADM_LOG_HEX(1, irb, sizeof(*irb));
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ return;
+ }
+ scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+ private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(private, &eadm_list, head) {
+ sch = private->sch;
+ spin_lock(sch->lock);
+ if (private->state == EADM_IDLE) {
+ private->state = EADM_BUSY;
+ list_move_tail(&private->head, &eadm_list);
+ spin_unlock(sch->lock);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return sch;
+ }
+ spin_unlock(sch->lock);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return NULL;
+}
+
+int eadm_start_aob(struct aob *aob)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+ int ret;
+
+ sch = eadm_get_idle_sch();
+ if (!sch)
+ return -EBUSY;
+
+ spin_lock_irqsave(sch->lock, flags);
+ eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+ ret = eadm_subchannel_start(sch, aob);
+ if (!ret)
+ goto out_unlock;
+
+ /* Handle start subchannel failure. */
+ eadm_subchannel_set_timeout(sch, 0);
+ private = get_eadm_private(sch);
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+ struct eadm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->head);
+ init_timer(&private->timer);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, private);
+ private->state = EADM_IDLE;
+ private->sch = sch;
+ sch->isc = EADM_SCH_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ kfree(private);
+ goto out;
+ }
+ spin_unlock_irq(sch->lock);
+
+ spin_lock_irq(&list_lock);
+ list_add(&private->head, &eadm_list);
+ spin_unlock_irq(&list_lock);
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+out:
+ return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
+ do {
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ spin_lock_irq(&list_lock);
+ list_del(&private->head);
+ spin_unlock_irq(&list_lock);
+
+ eadm_quiesce(sch);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+
+ kfree(private);
+
+ return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+ eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ struct eadm_private *private;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ if (cio_update_schib(sch)) {
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ goto out_unlock;
+ }
+ private = get_eadm_private(sch);
+ if (private->state == EADM_NOT_OPER)
+ private->state = EADM_IDLE;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+ .drv = {
+ .name = "eadm_subchannel",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = eadm_subchannel_ids,
+ .irq = eadm_subchannel_irq,
+ .probe = eadm_subchannel_probe,
+ .remove = eadm_subchannel_remove,
+ .shutdown = eadm_subchannel_shutdown,
+ .sch_event = eadm_subchannel_sch_event,
+ .freeze = eadm_subchannel_freeze,
+ .thaw = eadm_subchannel_restore,
+ .restore = eadm_subchannel_restore,
+};
+
+static int __init eadm_sch_init(void)
+{
+ int ret;
+
+ if (!css_general_characteristics.eadm)
+ return -ENXIO;
+
+ eadm_debug = debug_register("eadm_log", 16, 1, 16);
+ if (!eadm_debug)
+ return -ENOMEM;
+
+ debug_register_view(eadm_debug, &debug_hex_ascii_view);
+ debug_set_level(eadm_debug, 2);
+
+ isc_register(EADM_SCH_ISC);
+ ret = css_driver_register(&eadm_subchannel_driver);
+ if (ret)
+ goto cleanup;
+
+ return ret;
+
+cleanup:
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+ return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+ css_driver_unregister(&eadm_subchannel_driver);
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 00000000000..9664e4653f9
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,22 @@
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+ union orb orb;
+ enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
+ struct timer_list timer;
+ struct list_head head;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9..ca5e9bb9d45 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
- memset(tcat, 0, sizeof(tcat));
+ memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
*/
void tsb_init(struct tsb *tsb)
{
- memset(tsb, 0, sizeof(tsb));
+ memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 4d10981c7cc..5a999084a22 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/cio/idset.c
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "idset.h"
#include "css.h"
@@ -18,7 +17,7 @@ struct idset {
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
- return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)
@@ -91,6 +90,14 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid)
idset_del(set, schid.ssid, schid.sch_no);
}
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+ int pos = schid.ssid * set->num_id + schid.sch_no;
+
+ bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
{
return idset_contains(set, schid.ssid, schid.sch_no);
@@ -113,20 +120,12 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
int idset_is_empty(struct idset *set)
{
- int bitnum;
-
- bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
- if (bitnum >= set->num_ssid * set->num_id)
- return 1;
- return 0;
+ return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
}
void idset_add_set(struct idset *to, struct idset *from)
{
- unsigned long i, len;
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
- len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
- __BITOPS_WORDS(from->num_ssid * from->num_id));
- for (i = 0; i < len ; i++)
- to->bitmap[i] |= from->bitmap[i];
+ bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 7543da4529f..06d3bc01bb0 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/idset.h
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
@@ -19,6 +17,7 @@ void idset_fill(struct idset *set);
struct idset *idset_sch_new(void);
void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
int idset_is_empty(struct idset *set);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d72ae4c93af..b108f4a5c7d 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -4,69 +4,38 @@
#include <linux/types.h>
#include <asm/schid.h>
#include <asm/ccwdev.h>
+#include <asm/irq.h>
#include "css.h"
-
-/*
- * command-mode operation request block
- */
-struct cmd_orb {
- u32 intparm; /* interruption parameter */
- u32 key : 4; /* flags, like key, suspend control, etc. */
- u32 spnd : 1; /* suspend control */
- u32 res1 : 1; /* reserved */
- u32 mod : 1; /* modification control */
- u32 sync : 1; /* synchronize control */
- u32 fmt : 1; /* format control */
- u32 pfch : 1; /* prefetch control */
- u32 isic : 1; /* initial-status-interruption control */
- u32 alcc : 1; /* address-limit-checking control */
- u32 ssic : 1; /* suppress-suspended-interr. control */
- u32 res2 : 1; /* reserved */
- u32 c64 : 1; /* IDAW/QDIO 64 bit control */
- u32 i2k : 1; /* IDAW 2/4kB block size control */
- u32 lpm : 8; /* logical path mask */
- u32 ils : 1; /* incorrect length */
- u32 zero : 6; /* reserved zeros */
- u32 orbx : 1; /* ORB extension control */
- u32 cpa; /* channel program address */
-} __attribute__ ((packed, aligned(4)));
-
-/*
- * transport-mode operation request block
- */
-struct tm_orb {
- u32 intparm;
- u32 key:4;
- u32 :9;
- u32 b:1;
- u32 :2;
- u32 lpm:8;
- u32 :7;
- u32 x:1;
- u32 tcw;
- u32 prio:8;
- u32 :8;
- u32 rsvpgm:8;
- u32 :8;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
-} __attribute__ ((packed, aligned(4)));
-
-union orb {
- struct cmd_orb cmd;
- struct tm_orb tm;
-} __attribute__ ((packed, aligned(4)));
+#include "orb.h"
struct io_subchannel_private {
union orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
-} __attribute__ ((aligned(8)));
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+} __aligned(8);
-#define to_io_private(n) ((struct io_subchannel_private *)n->private)
-#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
-#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
#define MAX_CIWS 8
@@ -92,11 +61,12 @@ enum io_status {
* @filter: optional callback to adjust request status based on IRB data
* @callback: final callback
* @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
* @mask: current path mask
* @retries: current number of retries
* @drc: delayed return code
- * @cancel: non-zero if request was cancelled
- * @done: non-zero if request was finished
*/
struct ccw_request {
struct ccw1 *cp;
@@ -108,12 +78,13 @@ struct ccw_request {
enum io_status);
void (*callback)(struct ccw_device *, void *, int);
void *data;
+ unsigned int singlepath:1;
/* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
u16 mask;
u16 retries;
int drc;
- int cancel:1;
- int done:1;
} __attribute__((packed));
/*
@@ -140,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -149,7 +123,15 @@ struct ccw_device_private {
struct subchannel_id schid; /* subchannel number */
struct ccw_request req; /* internal I/O request */
int iretry;
- u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
+ u8 path_gone_mask; /* mask of paths, that became unavailable */
+ u8 path_new_mask; /* mask of paths, that became available */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
@@ -163,10 +145,11 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
@@ -183,25 +166,9 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ enum interruption_class int_class;
};
-static inline int ssch(struct subchannel_id schid, union orb *addr)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode = -EIO;
-
- asm volatile(
- " ssch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc", "memory");
- return ccode;
-}
-
static inline int rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
@@ -217,21 +184,6 @@ static inline int rsch(struct subchannel_id schid)
return ccode;
}
-static inline int csch(struct subchannel_id schid)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode;
-
- asm volatile(
- " csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
- return ccode;
-}
-
static inline int hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 75926279263..4d80fc67a06 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
/*
* TPI info structure
@@ -23,21 +25,6 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(struct subchannel_id schid, struct schib *addr)
-{
- register struct subchannel_id reg1 asm ("1") = schid;
- int ccode;
-
- asm volatile(
- " stsch 0(%3)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode), "=m" (*addr)
- : "d" (reg1), "a" (addr)
- : "cc");
- return ccode;
-}
-
static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
@@ -102,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
return ccode;
}
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
static inline int tpi(struct tpi_info *addr)
{
int ccode;
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index 17da9ab932e..358ee16d10a 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -42,7 +42,7 @@
* size_t size;
*
* size = itcw_calc_size(1, 2, 0);
- * buffer = kmalloc(size, GFP_DMA);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
* if (!buffer)
* return -ENOMEM;
* itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
{
size_t len;
+ int cross_count;
/* Main data. */
len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
}
+
/* Maximum required alignment padding. */
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
- /* Maximum padding for structures that may not cross 4k boundary. */
- if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
- len += max(max_tidaws, intrg_max_tidaws) *
- sizeof(struct tidaw) - 1;
+
+ /* TIDAW lists may not cross a 4k boundary. To cross a
+ * boundary we need to add a TTIC TIDAW. We need to reserve
+ * one additional TIDAW for a TTIC that we may need to add due
+ * to the placement of the data chunk in memory, and a further
+ * TIDAW for each page boundary that the TIDAW list may cross
+ * due to it's own size.
+ */
+ if (max_tidaws) {
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ if (intrg_max_tidaws) {
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
return len;
}
EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
void *chunk;
addr_t start;
addr_t end;
+ int cross_count;
/* Check for 2G limit. */
start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
if (IS_ERR(chunk))
return chunk;
itcw = chunk;
- itcw->max_tidaws = max_tidaws;
- itcw->intrg_max_tidaws = intrg_max_tidaws;
+ /* allow for TTIC tidaws that may be needed to cross a page boundary */
+ cross_count = 0;
+ if (max_tidaws)
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->max_tidaws = max_tidaws + cross_count;
+ cross_count = 0;
+ if (intrg_max_tidaws)
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
/* Main TCW. */
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
/* Data TIDAL. */
if (max_tidaws > 0) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
- max_tidaws, 16, 1);
+ itcw->max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
/* Interrogate data TIDAL. */
if (intrg && (intrg_max_tidaws > 0)) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
- intrg_max_tidaws, 16, 1);
+ itcw->intrg_max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
* available space.
*
- * Note: the tidaw-list is assumed to be contiguous with no ttics. The
- * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
*/
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
{
+ struct tidaw *following;
+
if (itcw->num_tidaws >= itcw->max_tidaws)
return ERR_PTR(-ENOSPC);
+ /*
+ * Is the tidaw, which follows the one we are about to fill, on the next
+ * page? Then we have to insert a TTIC tidaw first, that points to the
+ * tidaw on the new page.
+ */
+ following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+ + itcw->num_tidaws + 1;
+ if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+ tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+ TIDAW_FLAGS_TTIC, following, 0);
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ }
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
}
EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 00000000000..7a640530e7f
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,91 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:4;
+ u32 compat1:1;
+ u32 compat2:1;
+ u32:21;
+ u32 x:1;
+ u32 aob;
+ u32 css_prio:8;
+ u32:8;
+ u32 scm_prio:8;
+ u32:8;
+ u32:29;
+ u32 fmt:3;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+ struct eadm_orb eadm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff7748a9199..a563e4c0059 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/qdio.h
- *
- * Copyright 2000,2009 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -13,16 +11,10 @@
#include <asm/debug.h>
#include "chsc.h"
-#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
-#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
-
-/*
- * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
- * till next initiative to give transmitted skbs back to the stack is too long.
- * Therefore polling is started in case of multicast queue is filled more
- * than 50 percent.
- */
-#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
+#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
@@ -42,6 +34,7 @@ enum qdio_irq_states {
#define SLSB_STATE_NOT_INIT 0x0
#define SLSB_STATE_EMPTY 0x1
#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_PENDING 0x3
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_TYPE_INPUT 0x0
@@ -65,6 +58,8 @@ enum qdio_irq_states {
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
#define SLSB_P_OUTPUT_EMPTY \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
#define SLSB_CU_OUTPUT_PRIMED \
(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
#define SLSB_P_OUTPUT_HALTED \
@@ -82,14 +77,12 @@ enum qdio_irq_states {
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
-/* qdio adapter-characteristics-1 flag */
-#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
-#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
-#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
-#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
-#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
-#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
-#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
+/* SIGA flags */
+#define QDIO_SIGA_WRITE 0x00
+#define QDIO_SIGA_READ 0x01
+#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEQ 0x04
+#define QDIO_SIGA_QEBSM_FLAG 0x80
#ifdef CONFIG_64BIT
static inline int do_sqbs(u64 token, unsigned char state, int queue,
@@ -142,97 +135,102 @@ struct siga_flag {
u8 input:1;
u8 output:1;
u8 sync:1;
- u8 no_sync_ti:1;
- u8 no_sync_out_ti:1;
- u8 no_sync_out_pci:1;
- u8:2;
+ u8 sync_after_ai:1;
+ u8 sync_out_after_pci:1;
+ u8:3;
} __attribute__ ((packed));
-struct chsc_ssqd_area {
- struct chsc_header request;
- u16:10;
- u8 ssid:2;
- u8 fmt:4;
- u16 first_sch;
- u16:16;
- u16 last_sch;
- u32:32;
- struct chsc_header response;
- u32:32;
- struct qdio_ssqd_desc qdio_ssqd;
-} __attribute__ ((packed));
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int outbound_queue_full;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+ unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+ /*
+ * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+ * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+ * aka 127 SBALs found.
+ */
+ unsigned int nr_sbals[8];
+ unsigned int nr_sbal_error;
+ unsigned int nr_sbal_nop;
+ unsigned int nr_sbal_total;
+};
-struct scssc_area {
- struct chsc_header request;
- u16 operation_code;
- u16:16;
- u32:32;
- u32:32;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
- u32 ks:4;
- u32 kc:4;
- u32:21;
- u32 isc:3;
- u32 word_with_d_bit;
- u32:32;
- struct subchannel_id schid;
- u32 reserved[1004];
- struct chsc_header response;
- u32:32;
-} __attribute__ ((packed));
+enum qdio_queue_irq_states {
+ QDIO_QUEUE_IRQS_DISABLED,
+};
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
-
/* first ACK'ed buffer */
int ack_start;
-
/* how much sbals are acknowledged with qebsm */
int ack_count;
-
/* last time of noticing incoming data */
u64 timestamp;
+ /* upper-layer polling flag */
+ unsigned long queue_irq_state;
+ /* callback to start upper-layer polling */
+ void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};
struct qdio_output_q {
/* PCIs are enabled for the queue */
int pci_out_enabled;
-
- /* IQDIO: output multiple buffers (enhanced SIGA) */
- int use_enh_siga;
-
+ /* cq: use asynchronous output buffers */
+ int use_cq;
+ /* cq: aobs used for particual SBAL */
+ struct qaob **aobs;
+ /* cq: sbal state related to asynchronous operation */
+ struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */
struct timer_list timer;
+ /* used SBALs before tasklet schedule */
+ int scan_threshold;
};
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
struct qdio_q {
struct slsb slsb;
+
union {
struct qdio_input_q in;
struct qdio_output_q out;
} u;
- /* queue number */
- int nr;
-
- /* bitmask of queue number */
- int mask;
-
- /* input or output queue */
- int is_input_q;
-
- /* list of thinint input queues */
- struct list_head entry;
-
- /* upper-layer program handler */
- qdio_handler_t (*handler);
-
/*
* inbound: next buffer the program should check for
- * outbound: next buffer to check for having been processed
- * by the card
+ * outbound: next buffer to check if adapter processed it
*/
int first_to_check;
@@ -245,21 +243,38 @@ struct qdio_q {
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
- struct qdio_irq *irq_ptr;
- struct dentry *debugfs_q;
- struct tasklet_struct tasklet;
-
/* error condition during a data transfer */
unsigned int qdio_error;
- struct sl *sl;
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ /* last scan of the queue */
+ u64 timestamp;
+
+ struct tasklet_struct tasklet;
+ struct qdio_queue_perf_stat q_stats;
+
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+ /* queue number */
+ int nr;
+
+ /* bitmask of queue number */
+ int mask;
+
+ /* input or output queue */
+ int is_input_q;
+
+ /* list of thinint input queues */
+ struct list_head entry;
+
+ /* upper-layer program handler */
+ qdio_handler_t (*handler);
+
+ struct dentry *debugfs_q;
+ struct qdio_irq *irq_ptr;
+ struct sl *sl;
/*
- * Warning: Leave this member at the end so it won't be cleared in
- * qdio_fill_qs. A page is allocated under this pointer and used for
- * slib and sl. slib is 2048 bytes big and sl points to offset
- * PAGE_SIZE / 2.
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -269,6 +284,7 @@ struct qdio_irq {
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -286,13 +302,10 @@ struct qdio_irq {
struct ciw aqueue;
struct qdio_ssqd_desc ssqd_desc;
-
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
- /*
- * Warning: Leave these members together at the end so they won't be
- * cleared in qdio_setup_irq.
- */
+ int perf_stat_enabled;
+
struct qdr *qdr;
unsigned long chsc_page;
@@ -301,6 +314,7 @@ struct qdio_irq {
debug_info_t *debug_area;
struct mutex setup_mutex;
+ struct qdio_dev_perf_stat perf_stat;
};
/* helper functions */
@@ -311,6 +325,21 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
+#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr) \
+({ \
+ struct qdio_irq *qdev = (__q)->irq_ptr; \
+ if (qdev->perf_stat_enabled) \
+ (qdev->perf_stat.__attr)++; \
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+ q->q_stats.nr_sbal_error += count;
+ q->q_stats.nr_sbal_total += count;
+}
+
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
@@ -318,30 +347,24 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-static inline unsigned long long get_usecs(void)
-{
- return monotonic_clock() >> 12;
-}
-
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
-#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
-#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
-#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
-
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
@@ -352,6 +375,13 @@ static inline unsigned long long get_usecs(void)
#define sub_buf(bufnr, dec) \
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+#define queue_irqs_enabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+extern u64 last_ai_time;
+
/* prototypes for thin interrupt */
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
@@ -363,6 +393,8 @@ int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
@@ -384,6 +416,9 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 76769978285..f1f3baa8e6e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,12 +1,13 @@
/*
- * drivers/s390/cio/qdio_debug.c
- *
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define QDIO_DEBUGFS_NAME_LEN 10
+#define QDIO_DBF_NAME_LEN 20
+
+struct qdio_dbf_entry {
+ char dbf_name[QDIO_DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+ struct qdio_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+ return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+ struct qdio_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+}
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
{
- char text[20];
+ char text[QDIO_DBF_NAME_LEN];
+ struct qdio_dbf_entry *new_entry;
DBF_EVENT("qfmt:%1d", init_data->q_format);
DBF_HEX(init_data->adapter_name, 8);
@@ -33,17 +74,39 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
DBF_HEX(&init_data->input_handler, sizeof(void *));
DBF_HEX(&init_data->output_handler, sizeof(void *));
DBF_HEX(&init_data->int_parm, sizeof(long));
- DBF_HEX(&init_data->flags, sizeof(long));
DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
- snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
- irq_ptr->debug_area = debug_register(text, 2, 1, 16);
- debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
- debug_set_level(irq_ptr->debug_area, DBF_WARN);
- DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+ dev_name(&init_data->cdev->dev));
+ irq_ptr->debug_area = qdio_get_dbf_entry(text);
+ if (irq_ptr->debug_area)
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+ else {
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ if (!irq_ptr->debug_area)
+ return -ENOMEM;
+ if (debug_register_view(irq_ptr->debug_area,
+ &debug_hex_ascii_view)) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+ if (!new_entry) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ new_entry->dbf_info = irq_ptr->debug_area;
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qdio_dbf_list);
+ mutex_unlock(&qdio_dbf_list_mutex);
+ }
+ return 0;
}
static int qstat_show(struct seq_file *m, void *v)
@@ -55,14 +118,21 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
- seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d\n", q->first_to_check);
- seq_printf(m, "last_move: %d\n", q->last_move);
- seq_printf(m, "polling: %d\n", q->u.in.polling);
- seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
- seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
- seq_printf(m, "slsb buffer states:\n");
+ seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
+ q->timestamp, last_ai_time);
+ seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
+ atomic_read(&q->nr_buf_used),
+ q->first_to_check, q->last_move);
+ if (q->is_input_q) {
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start,
+ q->u.in.ack_count);
+ seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
+ *(u32 *)q->irq_ptr->dsci,
+ test_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state));
+ }
+ seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -72,6 +142,9 @@ static int qstat_show(struct seq_file *m, void *v)
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
+ case SLSB_P_OUTPUT_PENDING:
+ seq_printf(m, "P");
+ break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
@@ -99,42 +172,132 @@ static int qstat_show(struct seq_file *m, void *v)
}
seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
+
+ seq_printf(m, "\nSBAL statistics:");
+ if (!q->irq_ptr->perf_stat_enabled) {
+ seq_printf(m, " disabled\n");
+ return 0;
+ }
+
+ seq_printf(m, "\n1 2.. 4.. 8.. "
+ "16.. 32.. 64.. 127\n");
+ for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+ seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+ seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
+ q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+ q->q_stats.nr_sbal_total);
return 0;
}
-static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
+static int qstat_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qstat_show,
+ file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qstat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound queue full",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial",
+ "Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
+ int i;
+
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
- struct qdio_q *q = seq->private;
+ struct qdio_irq *irq_ptr = seq->private;
+ struct qdio_q *q;
+ unsigned long val;
+ int ret, i;
- if (!q)
+ if (!irq_ptr)
return 0;
- if (q->is_input_q)
- xchg(q->irq_ptr->dsci, 1);
- local_bh_disable();
- tasklet_schedule(&q->tasklet);
- local_bh_enable();
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ for_each_input_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ for_each_output_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
+ }
return count;
}
-static int qstat_seq_open(struct inode *inode, struct file *filp)
+static int qperf_seq_open(struct inode *inode, struct file *filp)
{
- return single_open(filp, qstat_show,
- filp->f_path.dentry->d_inode->i_private);
+ return single_open(filp, qperf_show,
+ file_inode(filp)->i_private);
}
-static const struct file_operations debugfs_fops = {
+static const struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
- .open = qstat_seq_open,
+ .open = qperf_seq_open,
.read = seq_read,
- .write = qstat_seq_write,
+ .write = qperf_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
-static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
+static void setup_debugfs_entry(struct qdio_q *q)
{
char name[QDIO_DEBUGFS_NAME_LEN];
@@ -156,13 +319,21 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
debugfs_root);
if (IS_ERR(irq_ptr->debugfs_dev))
irq_ptr->debugfs_dev = NULL;
+
+ irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr,
+ &debugfs_perf_fops);
+ if (IS_ERR(irq_ptr->debugfs_perf))
+ irq_ptr->debugfs_perf = NULL;
+
for_each_input_queue(irq_ptr, q, i)
- setup_debugfs_entry(q, cdev);
+ setup_debugfs_entry(q);
for_each_output_queue(irq_ptr, q, i)
- setup_debugfs_entry(q, cdev);
+ setup_debugfs_entry(q);
}
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
@@ -171,6 +342,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_perf);
debugfs_remove(irq_ptr->debugfs_dev);
}
@@ -192,6 +364,7 @@ int __init qdio_debug_init(void)
void qdio_debug_exit(void)
{
+ qdio_clear_dbf_list();
debugfs_remove(debugfs_root);
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 5d70bd162ae..f33ce857761 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/qdio_debug.h
- *
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
@@ -18,12 +16,6 @@
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error;
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -39,10 +31,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
- } while (0)
+static inline void DBF_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+ len -= qdio_dbf_setup->buf_size;
+ addr += qdio_dbf_setup->buf_size;
+ }
+}
#define DBF_ERROR(text...) \
do { \
@@ -51,32 +47,39 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_ERROR_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
- } while (0)
-
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+ len -= qdio_dbf_error->buf_size;
+ addr += qdio_dbf_error->buf_size;
+ }
+}
#define DBF_DEV_EVENT(level, device, text...) \
do { \
char debug_buffer[QDIO_DBF_LEN]; \
- if (qdio_dbf_passes(device->debug_area, level)) { \
+ if (debug_level_enabled(device->debug_area, level)) { \
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
debug_text_event(device->debug_area, level, debug_buffer); \
} \
} while (0)
-#define DBF_DEV_HEX(level, device, addr, len) \
- do { \
- debug_event(device->debug_area, level, (void*)(addr), len); \
- } while (0)
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+ int len, int level)
+{
+ while (len > 0) {
+ debug_event(dev->debug_area, level, addr, len);
+ len -= dev->debug_area->buf_size;
+ addr += dev->debug_area->buf_size;
+ }
+}
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
int qdio_debug_init(void);
void qdio_debug_exit(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84b959..848e3b64ea6 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/qdio_main.c
- *
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -13,27 +11,30 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
+#include <asm/ipl.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
-static inline int do_siga_sync(struct subchannel_id schid,
- unsigned int out_mask, unsigned int in_mask)
+static inline int do_siga_sync(unsigned long schid,
+ unsigned int out_mask, unsigned int in_mask,
+ unsigned int fc)
{
- register unsigned long __fc asm ("0") = 2;
- register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
@@ -47,10 +48,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
return cc;
}
-static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+ unsigned int fc)
{
- register unsigned long __fc asm ("0") = 1;
- register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
@@ -59,7 +61,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
+ : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
return cc;
}
@@ -70,26 +72,27 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
*
- * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
+ * Returns condition code.
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc)
+ unsigned int *bb, unsigned int fc,
+ unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
- int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
+ register unsigned long __aob asm("3") = aob;
+ int cc;
asm volatile(
" siga 0\n"
- "0: ipm %0\n"
+ " ipm %0\n"
" srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
- : : "cc", "memory");
- *bb = ((unsigned int) __fc) >> 31;
+ : "=d" (cc), "+d" (__fc), "+d" (__aob)
+ : "d" (__schid), "d" (__mask)
+ : "cc");
+ *bb = __fc >> 31;
return cc;
}
@@ -98,9 +101,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
- /* not all buffers processed */
- if (ccq == 96 || ccq == 97)
+ /* no buffer processed */
+ if (ccq == 97)
return 1;
+ /* not all buffers processed */
+ if (ccq == 96)
+ return 2;
/* notify devices immediately */
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
return -EIO;
@@ -120,13 +126,10 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- int tmp_count = count, tmp_start = start;
- int nr = q->nr;
- int rc;
- BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
+ qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -134,29 +137,33 @@ again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
rc = qdio_check_ccq(q, ccq);
-
- /* At least one buffer was processed, return and extract the remaining
- * buffers later.
- */
- if ((ccq == 96) && (count != tmp_count)) {
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
- return (count - tmp_count);
- }
+ if (!rc)
+ return count - tmp_count;
if (rc == 1) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
+ if (rc == 2) {
+ qperf_inc(q, eqbs_partial);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ tmp_count);
+ /*
+ * Retry once, if that fails bail out and process the
+ * extracted buffers before trying again.
+ */
+ if (!retried++)
+ goto again;
+ else
+ return count - tmp_count;
}
- return count - tmp_count;
+
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
/**
@@ -180,50 +187,51 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
-
- BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
+ qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
- if (rc == 1) {
+ if (!rc) {
+ WARN_ON_ONCE(tmp_count);
+ return count - tmp_count;
+ }
+
+ if (rc == 1 || rc == 2) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
+ qperf_inc(q, sqbs_partial);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
- }
- WARN_ON(tmp_count);
- return count - tmp_count;
+
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
- int auto_ack)
+ int auto_ack, int merge_pending)
{
unsigned char __state = 0;
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
- if (!__state)
+ if (!__state) {
__state = q->slsb.val[bufnr];
- else if (q->slsb.val[bufnr] != __state)
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+ } else if (merge_pending) {
+ if ((q->slsb.val[bufnr] & __state) != __state)
+ break;
+ } else if (q->slsb.val[bufnr] != __state)
break;
bufnr = next_buf(bufnr);
}
@@ -234,7 +242,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack)
{
- return get_buf_states(q, bufnr, state, 1, auto_ack);
+ return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -243,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@@ -263,7 +268,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr,
}
/* set slsb states to initial state */
-void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
@@ -279,18 +284,22 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_SYNC;
int cc;
- if (!need_siga_sync(q))
- return 0;
-
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_sync);
+ qperf_inc(q, siga_sync);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
- cc = do_siga_sync(q->irq_ptr->schid, output, input);
- if (cc)
+ cc = do_siga_sync(schid, output, input, fc);
+ if (unlikely(cc))
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
- return cc;
+ return (cc) ? -EIO : 0;
}
static inline int qdio_siga_sync_q(struct qdio_q *q)
@@ -301,79 +310,86 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static inline int qdio_siga_sync_out(struct qdio_q *q)
-{
- return qdio_siga_sync(q, ~0U, 0);
-}
-
-static inline int qdio_siga_sync_all(struct qdio_q *q)
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+ unsigned long aob)
{
- return qdio_siga_sync(q, ~0U, ~0U);
-}
-
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
-{
- unsigned long schid;
- unsigned int fc = 0;
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
- int cc;
+ int retries = 0, cc;
+ unsigned long laob = 0;
- if (q->u.out.use_enh_siga)
- fc = 3;
+ if (q->u.out.use_cq && aob != 0) {
+ fc = QDIO_SIGA_WRITEQ;
+ laob = aob;
+ }
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
- fc |= 0x80;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
}
- else
- schid = *((u32 *)&q->irq_ptr->schid);
-
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc);
+ WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
+ (aob && fc != QDIO_SIGA_WRITEQ));
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
/* hipersocket busy condition */
- if (*busy_bit) {
- WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+ if (unlikely(*busy_bit)) {
+ retries++;
if (!start_time) {
- start_time = get_usecs();
+ start_time = get_tod_clock_fast();
goto again;
}
- if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
return cc;
}
static inline int qdio_siga_input(struct qdio_q *q)
{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_READ;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_in);
+ qperf_inc(q, siga_read);
- cc = do_siga_input(q->irq_ptr->schid, q->mask);
- if (cc)
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_input(schid, q->mask, fc);
+ if (unlikely(cc))
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
- return cc;
+ return (cc) ? -EIO : 0;
}
-static inline void qdio_sync_after_thinint(struct qdio_q *q)
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
{
- if (pci_out_supported(q)) {
- if (need_siga_sync_thinint(q))
- qdio_siga_sync_all(q);
- else if (need_siga_sync_out_thinint(q))
- qdio_siga_sync_out(q);
- } else
+ /* PCI capable outbound queues will also be scanned so sync them too */
+ if (pci_out_supported(q))
+ qdio_siga_sync_all(q);
+ else
qdio_siga_sync_q(q);
}
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
- qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0);
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -382,7 +398,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
return;
q->u.in.polling = 0;
- qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+ qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
@@ -393,25 +409,48 @@ static inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
-static void announce_buffer_error(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
+{
+ int pos;
+
+ q->q_stats.nr_sbal_total += count;
+ if (count == QDIO_MAX_BUFFERS_MASK) {
+ q->q_stats.nr_sbals[7]++;
+ return;
+ }
+ pos = ilog2(count);
+ q->q_stats.nr_sbals[pos]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, int count)
{
- q->qdio_error |= QDIO_ERROR_SLSB_STATE;
+ unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
+ SLSB_P_OUTPUT_NOT_INIT;
+
+ q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
- (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
- qdio_perf_stat_inc(&perf_stats.outbound_target_full);
+ (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+ qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
- return;
+ goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].flags & 0xff,
- q->sbal[q->first_to_check]->element[15].flags & 0xff);
+ q->sbal[q->first_to_check]->element[14].sflags,
+ q->sbal[q->first_to_check]->element[15].sflags);
+
+set:
+ /*
+ * Interrupts may be avoided as long as the error is present
+ * so change the buffer state immediately to avoid starvation.
+ */
+ set_buf_states(q, q->first_to_check, state, count);
}
static inline void inbound_primed(struct qdio_q *q, int count)
@@ -462,7 +501,9 @@ static inline void inbound_primed(struct qdio_q *q, int count)
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
+
+ q->timestamp = get_tod_clock_fast();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -478,7 +519,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1);
+ count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
if (!count)
goto out;
@@ -486,21 +527,27 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
break;
case SLSB_P_INPUT_ERROR:
- announce_buffer_error(q, count);
- /* process the buffer, the upper layer will take care of it */
+ process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
@@ -512,10 +559,10 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
bufnr = get_inbound_buffer_frontier(q);
- if ((bufnr != q->last_move) || q->qdio_error) {
+ if (bufnr != q->last_move) {
q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
- q->u.in.timestamp = get_usecs();
+ if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
@@ -528,10 +575,11 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
if (!atomic_read(&q->nr_buf_used))
return 1;
- qdio_siga_sync_q(q);
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
- if (state == SLSB_P_INPUT_PRIMED)
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
@@ -546,7 +594,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -554,6 +602,60 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
return 0;
}
+static inline int contains_aobs(struct qdio_q *q)
+{
+ return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+ unsigned char state = 0;
+ int j, b = start;
+
+ if (!contains_aobs(q))
+ return;
+
+ for (j = 0; j < count; ++j) {
+ get_buf_state(q, b, &state, 0);
+ if (state == SLSB_P_OUTPUT_PENDING) {
+ struct qaob *aob = q->u.out.aobs[b];
+ if (aob == NULL)
+ continue;
+
+ q->u.out.sbal_state[b].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[b] = NULL;
+ } else if (state == SLSB_P_OUTPUT_EMPTY) {
+ q->u.out.sbal_state[b].aob = NULL;
+ }
+ b = next_buf(b);
+ }
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+ int bufnr)
+{
+ unsigned long phys_aob = 0;
+
+ if (!q->use_cq)
+ goto out;
+
+ if (!q->aobs[bufnr]) {
+ struct qaob *aob = qdio_allocate_aob();
+ q->aobs[bufnr] = aob;
+ }
+ if (q->aobs[bufnr]) {
+ q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
+ q->sbal_state[bufnr].aob = q->aobs[bufnr];
+ q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+ phys_aob = virt_to_phys(q->aobs[bufnr]);
+ WARN_ON_ONCE(phys_aob & 0xFF);
+ }
+
+out:
+ return phys_aob;
+}
+
static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
@@ -566,11 +668,15 @@ static void qdio_kick_handler(struct qdio_q *q)
count = sub_buf(end, start);
if (q->is_input_q) {
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
+ qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
- } else
+ } else {
+ qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
+ }
+
+ qdio_handle_aobs(q, start, count);
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
@@ -582,24 +688,32 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
-again:
+ qperf_inc(q, tasklet_inbound);
+
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
- goto again;
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+ }
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q))
- goto again;
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
}
void qdio_inbound_processing(unsigned long data)
@@ -611,11 +725,16 @@ void qdio_inbound_processing(unsigned long data)
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
- if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
- (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
- qdio_siga_sync_q(q);
+ q->timestamp = get_tod_clock_fast();
+
+ if (need_siga_sync(q))
+ if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+ !pci_out_supported(q)) ||
+ (queue_type(q) == QDIO_IQDIO_QFMT &&
+ multicast_outbound(q)))
+ qdio_siga_sync_q(q);
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -623,38 +742,47 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
-
if (q->first_to_check == stop)
- return q->first_to_check;
+ goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 0);
+ count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
if (!count)
- return q->first_to_check;
+ goto out;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+
break;
case SLSB_P_OUTPUT_ERROR:
- announce_buffer_error(q, count);
- /* process the buffer, the upper layer will take care of it */
+ process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+ q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
+
+out:
return q->first_to_check;
}
@@ -670,7 +798,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
bufnr = get_outbound_buffer_frontier(q);
- if ((bufnr != q->last_move) || q->qdio_error) {
+ if (bufnr != q->last_move) {
q->last_move = bufnr;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
return 1;
@@ -678,40 +806,52 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
return 0;
}
-static int qdio_kick_outbound_q(struct qdio_q *q)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
{
+ int retries = 0, cc;
unsigned int busy_bit;
- int cc;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_out);
+retry:
+ qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit);
+ cc = qdio_siga_output(q, &busy_bit, aob);
switch (cc) {
case 0:
break;
case 2:
if (busy_bit) {
- DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
- cc |= QDIO_ERROR_SIGA_BUSY;
- } else
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+ cc = -EBUSY;
+ } else {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+ cc = -ENOBUFS;
+ }
break;
case 1:
case 3:
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+ cc = -EIO;
break;
}
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
return cc;
}
static void __qdio_outbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
- BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+ qperf_inc(q, tasklet_outbound);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@@ -720,30 +860,19 @@ static void __qdio_outbound_processing(struct qdio_q *q)
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
goto sched;
- /* bail out for HiperSockets unicast queues */
- if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
- return;
-
- if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
- (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
- goto sched;
-
if (q->u.out.pci_out_enabled)
return;
/*
* Now we know that queue type is either qeth without pci enabled
- * or HiperSockets multicast. Make sure buffer switch from PRIMED to
- * EMPTY is noticed and outbound_handler is called after some time.
+ * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+ * is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
- else {
- if (!timer_pending(&q->u.out.timer)) {
+ else
+ if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
- }
- }
return;
sched:
@@ -783,8 +912,9 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
+ qperf_inc(q, tasklet_inbound);
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+ qdio_sync_queues(q);
/*
* The interrupt could be caused by a PCI request. Check the
@@ -798,7 +928,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
@@ -811,7 +941,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
@@ -850,10 +980,20 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- qdio_perf_stat_inc(&perf_stats.pci_int);
-
- for_each_input_queue(irq_ptr, q, i)
- tasklet_schedule(&q->tasklet);
+ for_each_input_queue(irq_ptr, q, i) {
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
+ tasklet_schedule(&q->tasklet);
+ }
+ }
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
@@ -861,10 +1001,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
for_each_output_queue(irq_ptr, q, i) {
if (qdio_outbound_q_done(q))
continue;
-
- if (!siga_syncs_out_pci(q))
+ if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q);
-
tasklet_schedule(&q->tasklet);
}
}
@@ -874,6 +1012,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
+ int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -887,10 +1026,17 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
dump_stack();
goto no_handler;
}
- q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, irq_ptr->int_parm);
+
+ count = sub_buf(q->first_to_check, q->first_to_kick);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
+ q->nr, q->first_to_kick, count, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ /*
+ * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+ * Therefore we call the LGR detection function here.
+ */
+ lgr_info_log();
}
static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
@@ -922,24 +1068,19 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- qdio_perf_stat_inc(&perf_stats.qdio_int);
-
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
}
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
if (IS_ERR(irb)) {
- switch (PTR_ERR(irb)) {
- case -EIO:
- DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- default:
- WARN_ON(1);
- return;
- }
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
@@ -962,8 +1103,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@@ -988,30 +1131,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-/**
- * qdio_cleanup - shutdown queues and free data structures
- * @cdev: associated ccw device
- * @how: use halt or clear to shutdown
- *
- * This function calls qdio_shutdown() for @cdev with method @how.
- * and qdio_free(). The qdio_free() return value is ignored since
- * !irq_ptr is already checked.
- */
-int qdio_cleanup(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- int rc;
-
- if (!irq_ptr)
- return -ENODEV;
-
- rc = qdio_shutdown(cdev, how);
-
- qdio_free(cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_cleanup);
-
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1041,7 +1160,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- BUG_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1062,7 +1181,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
- qdio_shutdown_debug_entries(irq_ptr, cdev);
+ qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -1114,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
return -ENODEV;
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
- if (irq_ptr->debug_area != NULL) {
- debug_unregister(irq_ptr->debug_area);
- irq_ptr->debug_area = NULL;
- }
+ irq_ptr->debug_area = NULL;
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1129,28 +1246,6 @@ int qdio_free(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(qdio_free);
/**
- * qdio_initialize - allocate and establish queues for a qdio subchannel
- * @init_data: initialization data
- *
- * This function first allocates queues via qdio_allocate() and on success
- * establishes them via qdio_establish().
- */
-int qdio_initialize(struct qdio_initialize *init_data)
-{
- int rc;
-
- rc = qdio_allocate(init_data);
- if (rc)
- return rc;
-
- rc = qdio_establish(init_data);
- if (rc)
- qdio_free(init_data->cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_initialize);
-
-/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
@@ -1178,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_err;
mutex_init(&irq_ptr->setup_mutex);
- qdio_allocate_dbf(init_data, irq_ptr);
+ if (qdio_allocate_dbf(init_data, irq_ptr))
+ goto out_rel;
/*
* Allocate a page for the chsc calls in qdio_establish.
@@ -1194,7 +1290,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
- WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@@ -1210,6 +1305,26 @@ out_err:
}
EXPORT_SYMBOL_GPL(qdio_allocate);
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q = irq_ptr->input_qs[0];
+ int i, use_cq = 0;
+
+ if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+ use_cq = 1;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (use_cq) {
+ if (qdio_enable_async_operation(&q->u.out) < 0) {
+ use_cq = 0;
+ continue;
+ }
+ } else
+ qdio_disable_async_operation(&q->u.out);
+ }
+ DBF_EVENT("use_cq:%d", use_cq);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
* @init_data: initialization data
@@ -1273,8 +1388,8 @@ int qdio_establish(struct qdio_initialize *init_data)
}
qdio_setup_ssqd_info(irq_ptr);
- DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
- DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
+
+ qdio_detect_hsicq(irq_ptr);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
@@ -1380,7 +1495,9 @@ static inline int buf_in_between(int bufnr, int start, int count)
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int used, diff;
+ int diff;
+
+ qperf_inc(q, inbound_call);
if (!q->u.in.polling)
goto set;
@@ -1411,16 +1528,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
-
- used = atomic_add_return(count, &q->nr_buf_used) - count;
- BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
-
- /* no need to signal as long as the adapter had free buffers */
- if (used)
- return 0;
+ atomic_add(count, &q->nr_buf_used);
if (need_siga_in(q))
return qdio_siga_input(q);
+
return 0;
}
@@ -1434,59 +1546,50 @@ set:
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- unsigned char state;
+ unsigned char state = 0;
int used, rc = 0;
- qdio_perf_stat_inc(&perf_stats.outbound_handler);
+ qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
- BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
- if (callflags & QDIO_FLAG_PCI_OUT)
+ if (used == QDIO_MAX_BUFFERS_PER_Q)
+ qperf_inc(q, outbound_queue_full);
+
+ if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
- else
+ qperf_inc(q, pci_request_int);
+ } else
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- if (multicast_outbound(q))
- rc = qdio_kick_outbound_q(q);
- else
- if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
- (count > 1) &&
- (count <= q->irq_ptr->ssqd_desc.mmwc)) {
- /* exploit enhanced SIGA */
- q->u.out.use_enh_siga = 1;
- rc = qdio_kick_outbound_q(q);
- } else {
- /*
- * One siga-w per buffer required for unicast
- * HiperSockets.
- */
- q->u.out.use_enh_siga = 0;
- while (count--) {
- rc = qdio_kick_outbound_q(q);
- if (rc)
- goto out;
- }
- }
- goto out;
- }
+ unsigned long phys_aob = 0;
- if (need_siga_sync(q)) {
- qdio_siga_sync_q(q);
- goto out;
+ /* One SIGA-W per buffer required for unicast HSI */
+ WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+ rc = qdio_kick_outbound_q(q, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
+ } else {
+ /* try to fast requeue buffers */
+ get_buf_state(q, prev_buf(bufnr), &state, 0);
+ if (state != SLSB_CU_OUTPUT_PRIMED)
+ rc = qdio_kick_outbound_q(q, 0);
+ else
+ qperf_inc(q, fast_requeue);
}
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q);
+ /* in case of SIGA errors we must process the error immediately */
+ if (used >= q->u.out.scan_threshold || rc)
+ tasklet_schedule(&q->tasklet);
else
- qdio_perf_stat_inc(&perf_stats.fast_requeue);
-
-out:
- tasklet_schedule(&q->tasklet);
+ /* free the SBALs in case of no further traffic */
+ if (!timer_pending(&q->u.out.timer))
+ mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
@@ -1514,8 +1617,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
"do%02x b:%02x c:%02x", callflags, bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
- return -EBUSY;
-
+ return -EIO;
+ if (!count)
+ return 0;
if (callflags & QDIO_FLAG_SYNC_INPUT)
return handle_inbound(irq_ptr->input_qs[q_nr],
callflags, bufnr, count);
@@ -1526,35 +1630,241 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
}
EXPORT_SYMBOL_GPL(do_QDIO);
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - success
+ * 1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ clear_nonshared_ind(irq_ptr);
+ qdio_stop_polling(q);
+ clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (test_nonshared_ind(irq_ptr))
+ goto rescan;
+ if (!qdio_inbound_q_done(q))
+ goto rescan;
+ return 0;
+
+rescan:
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ * < 0 - error
+ * = 0 - no new buffers found
+ * > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+ int *error)
+{
+ struct qdio_q *q;
+ int start, end;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ /*
+ * Cannot rely on automatic sync after interrupt since queues may
+ * also be examined without interrupt.
+ */
+ if (need_siga_sync(q))
+ qdio_sync_queues(q);
+
+ /* check the PCI capable outbound queues. */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return 0;
+
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+ start = q->first_to_kick;
+ end = q->first_to_check;
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_kick = end;
+ q->qdio_error = 0;
+ return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - interrupts were already disabled
+ * 1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
static int __init init_QDIO(void)
{
int rc;
- rc = qdio_setup_init();
+ rc = qdio_debug_init();
if (rc)
return rc;
+ rc = qdio_setup_init();
+ if (rc)
+ goto out_debug;
rc = tiqdio_allocate_memory();
if (rc)
goto out_cache;
- rc = qdio_debug_init();
- if (rc)
- goto out_ti;
- rc = qdio_setup_perf_stats();
- if (rc)
- goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
- goto out_perf;
+ goto out_ti;
return 0;
-out_perf:
- qdio_remove_perf_stats();
-out_debug:
- qdio_debug_exit();
out_ti:
tiqdio_free_memory();
out_cache:
qdio_setup_exit();
+out_debug:
+ qdio_debug_exit();
return rc;
}
@@ -1562,9 +1872,8 @@ static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_remove_perf_stats();
- qdio_debug_exit();
qdio_setup_exit();
+ qdio_debug_exit();
}
module_init(init_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 968e3c7c263..00000000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.c
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/ccwdev.h>
-
-#include "cio.h"
-#include "css.h"
-#include "device.h"
-#include "ioasm.h"
-#include "chsc.h"
-#include "qdio_debug.h"
-#include "qdio_perf.h"
-
-int qdio_performance_stats;
-struct qdio_perf_stats perf_stats;
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *qdio_perf_pde;
-#endif
-
-/*
- * procfs functions
- */
-static int qdio_perf_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.qdio_int));
- seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.pci_int));
- seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.thin_int));
- seq_printf(m, "\n");
- seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_inbound));
- seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_outbound));
- seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.tasklet_thinint),
- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
- seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound),
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
- seq_printf(m, "\n");
- seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_in));
- seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_out));
- seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_sync));
- seq_printf(m, "\n");
- seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.inbound_handler));
- seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_handler));
- seq_printf(m, "\n");
- seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
- (long)atomic_long_read(&perf_stats.fast_requeue));
- seq_printf(m, "Number of outbound target full condition\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_target_full));
- seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
- seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_stop_polling));
- seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
- seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
- seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
- seq_printf(m, "\n");
- return 0;
-}
-static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, qdio_perf_proc_show, NULL);
-}
-
-static const struct file_operations qdio_perf_proc_fops = {
- .owner = THIS_MODULE,
- .open = qdio_perf_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*
- * sysfs functions
- */
-static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
-}
-
-static ssize_t qdio_perf_stats_store(struct bus_type *bus,
- const char *buf, size_t count)
-{
- unsigned long i;
-
- if (strict_strtoul(buf, 16, &i) != 0)
- return -EINVAL;
- if ((i != 0) && (i != 1))
- return -EINVAL;
- if (i == qdio_performance_stats)
- return count;
-
- qdio_performance_stats = i;
- /* reset performance statistics */
- if (i == 0)
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- return count;
-}
-
-static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
- qdio_perf_stats_store);
-
-int __init qdio_setup_perf_stats(void)
-{
- int rc;
-
- rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
- if (rc)
- return rc;
-
-#ifdef CONFIG_PROC_FS
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
- NULL, &qdio_perf_proc_fops);
-#endif
- return 0;
-}
-
-void qdio_remove_perf_stats(void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("qdio_perf", NULL);
-#endif
- bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index ff4504ce1e3..00000000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.h
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#ifndef QDIO_PERF_H
-#define QDIO_PERF_H
-
-#include <linux/types.h>
-#include <asm/atomic.h>
-
-struct qdio_perf_stats {
- /* interrupt handler calls */
- atomic_long_t qdio_int;
- atomic_long_t pci_int;
- atomic_long_t thin_int;
-
- /* tasklet runs */
- atomic_long_t tasklet_inbound;
- atomic_long_t tasklet_outbound;
- atomic_long_t tasklet_thinint;
- atomic_long_t tasklet_thinint_loop;
- atomic_long_t thinint_inbound;
- atomic_long_t thinint_inbound_loop;
- atomic_long_t thinint_inbound_loop2;
-
- /* signal adapter calls */
- atomic_long_t siga_out;
- atomic_long_t siga_in;
- atomic_long_t siga_sync;
-
- /* misc */
- atomic_long_t inbound_handler;
- atomic_long_t outbound_handler;
- atomic_long_t fast_requeue;
- atomic_long_t outbound_target_full;
-
- /* for debugging */
- atomic_long_t debug_tl_out_timer;
- atomic_long_t debug_stop_polling;
- atomic_long_t debug_eqbs_all;
- atomic_long_t debug_eqbs_incomplete;
- atomic_long_t debug_sqbs_all;
- atomic_long_t debug_sqbs_incomplete;
-};
-
-extern struct qdio_perf_stats perf_stats;
-extern int qdio_performance_stats;
-
-static inline void qdio_perf_stat_inc(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_inc(count);
-}
-
-int qdio_setup_perf_stats(void);
-void qdio_remove_perf_stats(void);
-
-#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce..f5f4a91fab4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,13 +1,12 @@
/*
- * driver/s390/cio/qdio_setup.c
- *
* qdio queue initialization
*
- * Copyright (C) IBM Corp. 2008
+ * Copyright IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -19,6 +18,19 @@
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+ kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
/*
* qebsm is only available under 64bit but the adapter sets the feature
@@ -48,7 +60,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
if (!irq_ptr)
return;
- WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +93,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
- WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
- WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
@@ -109,10 +118,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- /* must be cleared by every qdio_establish */
- memset(q, 0, ((char *)&q->slib) - ((char *)q));
- memset(q->slib, 0, PAGE_SIZE);
+ struct slib *slib = q->slib;
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
@@ -129,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
- WARN_ON((unsigned long)q->sbal[j] & 0xff);
- }
/* fill in slib */
if (i > 0) {
@@ -147,11 +156,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
-
- DBF_EVENT("sl-slsb-sbal");
- DBF_HEX(q->sl, sizeof(void *));
- DBF_HEX(&q->slsb, sizeof(void *));
- DBF_HEX(q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -160,29 +164,39 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array =
+ qdio_init->output_sbal_state_array;
int i;
for_each_input_queue(irq_ptr, q, i) {
- DBF_EVENT("in-q:%1d", i);
+ DBF_EVENT("inq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
+
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
- if (is_thinint_irq(irq_ptr))
+ if (is_thinint_irq(irq_ptr)) {
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
- else
+ } else {
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
+ }
}
for_each_output_queue(irq_ptr, q, i) {
DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+ q->u.out.sbal_state = output_sbal_state_array;
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
q->is_input_q = 0;
+ q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
@@ -201,14 +215,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
irq_ptr->siga_flag.output = 1;
if (qdioac & AC1_SIGA_SYNC_NEEDED)
irq_ptr->siga_flag.sync = 1;
- if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
- irq_ptr->siga_flag.no_sync_ti = 1;
- if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
- irq_ptr->siga_flag.no_sync_out_pci = 1;
-
- if (irq_ptr->siga_flag.no_sync_out_pci &&
- irq_ptr->siga_flag.no_sync_ti)
- irq_ptr->siga_flag.no_sync_out_ti = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+ irq_ptr->siga_flag.sync_after_ai = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+ irq_ptr->siga_flag.sync_out_after_pci = 1;
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -244,40 +254,31 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
int rc;
DBF_EVENT("getssqd:%4x", schid->sch_no);
- if (irq_ptr != NULL)
- ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
- else
+ if (!irq_ptr) {
ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
- memset(ssqd, 0, PAGE_SIZE);
-
- ssqd->request = (struct chsc_header) {
- .length = 0x0010,
- .code = 0x0024,
- };
- ssqd->first_sch = schid->sch_no;
- ssqd->last_sch = schid->sch_no;
- ssqd->ssid = schid->ssid;
-
- if (chsc(ssqd))
- return -EIO;
- rc = chsc_error_from_response(ssqd->response.code);
+ if (!ssqd)
+ return -ENOMEM;
+ } else {
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ }
+
+ rc = chsc_ssqd(*schid, ssqd);
if (rc)
- return rc;
+ goto out;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != schid->sch_no))
- return -EINVAL;
-
- if (irq_ptr != NULL)
- memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
- sizeof(struct qdio_ssqd_desc));
- else {
- memcpy(data, &ssqd->qdio_ssqd,
- sizeof(struct qdio_ssqd_desc));
+ rc = -EINVAL;
+
+ if (!rc)
+ memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+ if (!irq_ptr)
free_page((unsigned long)ssqd);
- }
- return 0;
+
+ return rc;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
@@ -285,7 +286,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
unsigned char qdioac;
int rc;
- rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
+ rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
if (rc) {
DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%x", rc);
@@ -297,7 +298,8 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
- DBF_EVENT("qdioac:%4x", qdioac);
+ DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+ DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
@@ -319,6 +321,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
+ if (q->u.out.use_cq) {
+ int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+ struct qaob *aob = q->u.out.aobs[n];
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
+ }
+
+ qdio_disable_async_operation(&q->u.out);
+ }
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
@@ -341,10 +356,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
- irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -353,12 +368,13 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->ac = qdio_init->qdr_ac;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
- irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -374,6 +390,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+ irq_ptr->qib.rflags |= init_data->qib_rflags;
+
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
@@ -390,16 +408,23 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
- memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+ irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+ irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
-
- irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
@@ -446,7 +471,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
- "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n",
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -458,29 +483,65 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
(irq_ptr->siga_flag.sync) ? "S" : " ",
- (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ",
- (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ",
- (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
+ (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+ (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
printk(KERN_INFO "%s", s);
}
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+ outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+ GFP_ATOMIC);
+ if (!outq->aobs) {
+ outq->use_cq = 0;
+ return -ENOMEM;
+ }
+ outq->use_cq = 1;
+ return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+ kfree(q->aobs);
+ q->aobs = NULL;
+ q->use_cq = 0;
+}
+
int __init qdio_setup_init(void)
{
+ int rc;
+
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
+ qdio_aob_cache = kmem_cache_create("qdio_aob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0,
+ NULL);
+ if (!qdio_aob_cache) {
+ rc = -ENOMEM;
+ goto free_qdio_q_cache;
+ }
+
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
- return 0;
+ rc = 0;
+out:
+ return rc;
+free_qdio_q_cache:
+ kmem_cache_destroy(qdio_q_cache);
+ goto out;
}
void qdio_setup_exit(void)
{
+ kmem_cache_destroy(qdio_aob_cache);
kmem_cache_destroy(qdio_q_cache);
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 981a77ea7ee..5d06253c2a7 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,15 +1,13 @@
/*
- * linux/drivers/s390/cio/thinint_qdio.c
- *
- * thin interrupt support for qdio
- *
- * Copyright 2000-2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
-#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
@@ -19,7 +17,6 @@
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -29,33 +26,27 @@
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
-/* list of thin interrupt input queues */
-static LIST_HEAD(tiq_list);
-DEFINE_MUTEX(tiq_list_lock);
-
-/* adapter local summary indicator */
-static unsigned char *tiqdio_alsi;
-
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
-static struct indicator_t *q_indicators;
-static int css_qdio_omit_svs;
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
-static inline unsigned long do_clear_global_summary(void)
-{
- register unsigned long __fn asm("1") = 3;
- register unsigned long __tmp asm("2");
- register unsigned long __time asm("3");
-
- asm volatile(
- " .insn rre,0xb2650000,2,0"
- : "+d" (__fn), "=d" (__tmp), "=d" (__time));
- return __time;
-}
+/* Adapter interrupt definitions */
+static void tiqdio_thinint_handler(struct airq_struct *airq);
+
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
+
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
@@ -86,147 +77,167 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
- int i;
-
- /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
- if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
- css_qdio_omit_svs = 1;
-
mutex_lock(&tiq_list_lock);
- for_each_input_queue(irq_ptr, q, i)
- list_add_rcu(&q->entry, &tiq_list);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1);
+ xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
- int i;
- for (i = 0; i < irq_ptr->nr_input_qs; i++) {
- q = irq_ptr->input_qs[i];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
- continue;
+ q = irq_ptr->input_qs[0];
+ /* if establish triggered an error */
+ if (!q || !q->entry.prev || !q->entry.next)
+ return;
- mutex_lock(&tiq_list_lock);
- list_del_rcu(&q->entry);
- mutex_unlock(&tiq_list_lock);
- synchronize_rcu();
- }
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
}
-static inline int shared_ind(struct qdio_irq *irq_ptr)
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
{
- return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+ return irq_ptr->nr_input_qs > 1;
}
-/**
- * tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
- */
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
- qdio_perf_stat_inc(&perf_stats.thin_int);
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+ return references_shared_dsci(irq_ptr) ||
+ has_multiple_inq_on_dsci(irq_ptr);
+}
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+ if (shared_ind(irq_ptr))
+ return;
+ xchg(irq_ptr->dsci, 0);
+}
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now
- */
- xchg((u8 *)ind, 0);
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ if (shared_ind(irq_ptr))
+ return 0;
+ if (*irq_ptr->dsci)
+ return 1;
+ else
+ return 0;
+}
- /* protect tiq_list entries, only changed in activate or shutdown */
- rcu_read_lock();
+static inline u32 clear_shared_ind(void)
+{
+ if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+ return 0;
+ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+}
- /* check for work on all inbound thinint queues */
- list_for_each_entry_rcu(q, &tiq_list, entry)
- /* only process queues from changed sets */
- if (*q->irq_ptr->dsci) {
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+ struct qdio_q *q;
+ int i;
- /* only clear it if the indicator is non-shared */
+ for_each_input_queue(irq, q, i) {
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(q->irq_ptr->dsci, 0);
+
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+
+ /* avoid dsci clear here, done after processing */
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
+
/*
- * don't call inbound processing directly since
- * that could starve other thinint queues
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
*/
tasklet_schedule(&q->tasklet);
}
+ }
+}
- rcu_read_unlock();
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @alsi: pointer to adapter local summary indicator
+ * @data: NULL
+ */
+static void tiqdio_thinint_handler(struct airq_struct *airq)
+{
+ u32 si_used = clear_shared_ind();
+ struct qdio_q *q;
+
+ last_ai_time = S390_lowcore.int_clock;
+ inc_irq_stat(IRQIO_QAI);
- /*
- * if we used the shared indicator clear it now after all queues
- * were processed
- */
- if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+ /* protect tiq_list entries, only changed in activate or shutdown */
+ rcu_read_lock();
+
+ /* check for work on all inbound thinint queues */
+ list_for_each_entry_rcu(q, &tiq_list, entry) {
+ struct qdio_irq *irq;
- /* prevent racing */
- if (*tiqdio_alsi)
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
+ /* only process queues from changed sets */
+ irq = q->irq_ptr;
+ if (unlikely(references_shared_dsci(irq))) {
+ if (!si_used)
+ continue;
+ } else if (!*irq->dsci)
+ continue;
+
+ tiqdio_call_inq_handlers(irq);
+
+ qperf_inc(q, adapter_int);
}
+ rcu_read_unlock();
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
- struct scssc_area *scssc_area;
+ struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+ u64 summary_indicator_addr, subchannel_indicator_addr;
int rc;
- scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
- memset(scssc_area, 0, PAGE_SIZE);
-
if (reset) {
- scssc_area->summary_indicator_addr = 0;
- scssc_area->subchannel_indicator_addr = 0;
+ summary_indicator_addr = 0;
+ subchannel_indicator_addr = 0;
} else {
- scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
- scssc_area->subchannel_indicator_addr =
- virt_to_phys(irq_ptr->dsci);
+ summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
}
- scssc_area->request = (struct chsc_header) {
- .length = 0x0fe0,
- .code = 0x0021,
- };
- scssc_area->operation_code = 0;
- scssc_area->ks = PAGE_DEFAULT_KEY;
- scssc_area->kc = PAGE_DEFAULT_KEY;
- scssc_area->isc = QDIO_AIRQ_ISC;
- scssc_area->schid = irq_ptr->schid;
-
- /* enable the time delay disablement facility */
- if (css_general_characteristics.aif_tdd)
- scssc_area->word_with_d_bit = 0x10000000;
-
- rc = chsc(scssc_area);
- if (rc)
- return -EIO;
-
- rc = chsc_error_from_response(scssc_area->response.code);
+ rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+ subchannel_indicator_addr);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
- scssc_area->response.code);
- DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
- return rc;
+ scssc->response.code);
+ goto out;
}
DBF_EVENT("setscind");
- DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
- DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
- return 0;
+ DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+ DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+ return rc;
}
/* allocate non-shared indicators and shared indicator */
@@ -246,14 +257,12 @@ void tiqdio_free_memory(void)
int __init tiqdio_register_thinints(void)
{
- isc_register(QDIO_AIRQ_ISC);
- tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
- NULL, QDIO_AIRQ_ISC);
- if (IS_ERR(tiqdio_alsi)) {
- DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
- tiqdio_alsi = NULL;
- isc_unregister(QDIO_AIRQ_ISC);
- return -ENOMEM;
+ int rc;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ return rc;
}
return 0;
}
@@ -262,12 +271,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
-
- /* Check for aif time delay disablement. If installed,
- * omit SVS even under LPAR
- */
- if (css_general_characteristics.aif_tdd)
- css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0);
}
@@ -285,16 +288,12 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
return;
/* reset adapter interrupt indicators */
- put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
+ put_indicator(irq_ptr->dsci);
}
void __exit tiqdio_unregister_thinints(void)
{
WARN_ON(!list_empty(&tiq_list));
-
- if (tiqdio_alsi) {
- s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
- isc_unregister(QDIO_AIRQ_ISC);
- }
+ unregister_adapter_interrupt(&tiqdio_airq);
}
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 00000000000..15268edc54a
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,288 @@
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+ .name = "scm",
+ .probe = scmdev_probe,
+ .remove = scmdev_remove,
+ .uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+ struct device_driver *drv = &scmdrv->drv;
+
+ drv->bus = &scm_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+ driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+void scm_irq_handler(struct aob *aob, int error)
+{
+ struct aob_rq_header *aobrq = (void *) aob->request.data;
+ struct scm_device *scmdev = aobrq->scmdev;
+ struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+ scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scm_device *scmdev = to_scm_dev(dev); \
+ int ret; \
+ \
+ device_lock(dev); \
+ ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ device_unlock(dev); \
+ \
+ return ret; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+ &dev_attr_persistence.attr,
+ &dev_attr_oper_state.attr,
+ &dev_attr_data_state.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_release.attr,
+ &dev_attr_res_id.attr,
+ NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+ .attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+ &scmdev_attr_group,
+ NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+ unsigned int size, unsigned int max_blk_count)
+{
+ dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+ scmdev->nr_max_block = max_blk_count;
+ scmdev->address = sale->sa;
+ scmdev->size = 1UL << size;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.persistence = sale->p;
+ scmdev->attrs.oper_state = sale->op_state;
+ scmdev->attrs.data_state = sale->data_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.release = sale->r;
+ scmdev->attrs.res_id = sale->rid;
+ scmdev->dev.parent = scm_root;
+ scmdev->dev.bus = &scm_bus_type;
+ scmdev->dev.release = scmdev_release;
+ scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+ struct scm_driver *scmdrv;
+ bool changed;
+
+ device_lock(&scmdev->dev);
+ changed = scmdev->attrs.rank != sale->rank ||
+ scmdev->attrs.oper_state != sale->op_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.oper_state = sale->op_state;
+ if (!scmdev->dev.driver)
+ goto out;
+ scmdrv = to_scm_drv(scmdev->dev.driver);
+ if (changed && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_CHANGE);
+out:
+ device_unlock(&scmdev->dev);
+ if (changed)
+ kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, void *data)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct sale *sale = data;
+
+ return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+ return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+ struct sale *sale, *scmal = scm_info->scmal;
+ struct scm_device *scmdev;
+ int ret;
+
+ for (sale = scmal; sale < scmal + num; sale++) {
+ scmdev = scmdev_find(sale);
+ if (scmdev) {
+ scmdev_update(scmdev, sale);
+ /* Release reference from scm_find(). */
+ put_device(&scmdev->dev);
+ continue;
+ }
+ scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+ if (!scmdev)
+ return -ENODEV;
+ scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+ ret = device_register(&scmdev->dev);
+ if (ret) {
+ /* Release reference from device_initialize(). */
+ put_device(&scmdev->dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int scm_update_information(void)
+{
+ struct chsc_scm_info *scm_info;
+ u64 token = 0;
+ size_t num;
+ int ret;
+
+ scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!scm_info)
+ return -ENOMEM;
+
+ do {
+ ret = chsc_scm_info(scm_info, token);
+ if (ret)
+ break;
+
+ num = (scm_info->response.length -
+ (offsetof(struct chsc_scm_info, scmal) -
+ offsetof(struct chsc_scm_info, response))
+ ) / sizeof(struct sale);
+
+ ret = scm_add(scm_info, num);
+ if (ret)
+ break;
+
+ token = scm_info->restok;
+ } while (token);
+
+ free_page((unsigned long)scm_info);
+
+ return ret;
+}
+
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ if (dev->driver && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_AVAIL);
+
+ return 0;
+}
+
+int scm_process_availability_information(void)
+{
+ return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
+static int __init scm_init(void)
+{
+ int ret;
+
+ ret = bus_register(&scm_bus_type);
+ if (ret)
+ return ret;
+
+ scm_root = root_device_register("scm");
+ if (IS_ERR(scm_root)) {
+ bus_unregister(&scm_bus_type);
+ return PTR_ERR(scm_root);
+ }
+
+ scm_update_information();
+ return 0;
+}
+subsys_initcall_sync(scm_init);