aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile8
-rw-r--r--drivers/s390/cio/airq.c334
-rw-r--r--drivers/s390/cio/blacklist.c63
-rw-r--r--drivers/s390/cio/ccwgroup.c604
-rw-r--r--drivers/s390/cio/ccwreq.c367
-rw-r--r--drivers/s390/cio/chp.c128
-rw-r--r--drivers/s390/cio/chp.h18
-rw-r--r--drivers/s390/cio/chsc.c714
-rw-r--r--drivers/s390/cio/chsc.h210
-rw-r--r--drivers/s390/cio/chsc_sch.c251
-rw-r--r--drivers/s390/cio/cio.c483
-rw-r--r--drivers/s390/cio/cio.h42
-rw-r--r--drivers/s390/cio/cmf.c23
-rw-r--r--drivers/s390/cio/crw.c32
-rw-r--r--drivers/s390/cio/css.c684
-rw-r--r--drivers/s390/cio/css.h37
-rw-r--r--drivers/s390/cio/device.c1806
-rw-r--r--drivers/s390/cio/device.h53
-rw-r--r--drivers/s390/cio/device_fsm.c643
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c291
-rw-r--r--drivers/s390/cio/device_pgid.c1045
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/eadm_sch.c418
-rw-r--r--drivers/s390/cio/eadm_sch.h22
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/idset.c29
-rw-r--r--drivers/s390/cio/idset.h7
-rw-r--r--drivers/s390/cio/io_sch.h206
-rw-r--r--drivers/s390/cio/ioasm.h49
-rw-r--r--drivers/s390/cio/itcw.c64
-rw-r--r--drivers/s390/cio/orb.h91
-rw-r--r--drivers/s390/cio/qdio.h274
-rw-r--r--drivers/s390/cio/qdio_debug.c301
-rw-r--r--drivers/s390/cio/qdio_debug.h53
-rw-r--r--drivers/s390/cio/qdio_main.c1157
-rw-r--r--drivers/s390/cio/qdio_perf.c159
-rw-r--r--drivers/s390/cio/qdio_perf.h59
-rw-r--r--drivers/s390/cio/qdio_setup.c201
-rw-r--r--drivers/s390/cio/qdio_thinint.c337
-rw-r--r--drivers/s390/cio/scm.c288
-rw-r--r--drivers/s390/cio/scsw.c843
42 files changed, 7280 insertions, 5501 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd30152..8c4a386e97f 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,13 +2,15 @@
# Makefile for the S/390 common i/o drivers
#
-obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
- fcx.o itcw.o crw.o
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
+ fcx.o itcw.o crw.o ccwreq.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
-qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 65d2e769dfa..00bfbee0af9 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/airq.c
* Support for adapter interruptions
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
@@ -10,142 +9,267 @@
*/
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
#include <linux/slab.h>
-#include <linux/rcupdate.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
+#include "ioasm.h"
-#define NR_AIRQS 32
-#define NR_AIRQS_PER_WORD sizeof(unsigned long)
-#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD)
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
-union indicator_t {
- unsigned long word[NR_AIRQ_WORDS];
- unsigned char byte[NR_AIRQS];
-} __attribute__((packed));
+/**
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ *
+ * Returns 0 on success, or -EINVAL.
+ */
+int register_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
-struct airq_t {
- adapter_int_handler_t handler;
- void *drv_data;
-};
+ if (!airq->handler || airq->isc > MAX_ISC)
+ return -EINVAL;
+ if (!airq->lsi_ptr) {
+ airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+ if (!airq->lsi_ptr)
+ return -ENOMEM;
+ airq->flags |= AIRQ_PTR_ALLOCATED;
+ }
+ if (!airq->lsi_mask)
+ airq->lsi_mask = 0xff;
+ snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ isc_register(airq->isc);
+ spin_lock(&airq_lists_lock);
+ hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+ spin_unlock(&airq_lists_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_adapter_interrupt);
-static union indicator_t indicators[MAX_ISC+1];
-static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
+/**
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ */
+void unregister_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
-static int register_airq(struct airq_t *airq, u8 isc)
+ if (hlist_unhashed(&airq->list))
+ return;
+ snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ spin_lock(&airq_lists_lock);
+ hlist_del_rcu(&airq->list);
+ spin_unlock(&airq_lists_lock);
+ synchronize_rcu();
+ isc_unregister(airq->isc);
+ if (airq->flags & AIRQ_PTR_ALLOCATED) {
+ kfree(airq->lsi_ptr);
+ airq->lsi_ptr = NULL;
+ airq->flags &= ~AIRQ_PTR_ALLOCATED;
+ }
+}
+EXPORT_SYMBOL(unregister_adapter_interrupt);
+
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{
- int i;
+ struct tpi_info *tpi_info;
+ struct airq_struct *airq;
+ struct hlist_head *head;
+
+ __this_cpu_write(s390_idle.nohz_delay, 1);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ head = &airq_lists[tpi_info->isc];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(airq, head, list)
+ if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ airq->handler(airq);
+ rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+ .name = "AIO",
+ .handler = do_airq_interrupt,
+};
- for (i = 0; i < NR_AIRQS; i++)
- if (!cmpxchg(&airqs[isc][i], NULL, airq))
- return i;
- return -ENOMEM;
+void __init init_airq_interrupts(void)
+{
+ irq_set_chip_and_handler(THIN_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
/**
- * s390_register_adapter_interrupt() - register adapter interrupt handler
- * @handler: adapter handler to be registered
- * @drv_data: driver data passed with each call to the handler
- * @isc: isc for which the handler should be called
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
*
- * Returns:
- * Pointer to the indicator to be used on success
- * ERR_PTR() if registration failed
+ * Returns a pointer to an interrupt vector structure
*/
-void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
- void *drv_data, u8 isc)
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
{
- struct airq_t *airq;
- char dbf_txt[16];
- int ret;
-
- if (isc > MAX_ISC)
- return ERR_PTR(-EINVAL);
- airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
- if (!airq) {
- ret = -ENOMEM;
+ struct airq_iv *iv;
+ unsigned long size;
+
+ iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+ if (!iv)
goto out;
+ iv->bits = bits;
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ if (flags & AIRQ_IV_ALLOC) {
+ iv->avail = kmalloc(size, GFP_KERNEL);
+ if (!iv->avail)
+ goto out_free;
+ memset(iv->avail, 0xff, size);
+ iv->end = 0;
+ } else
+ iv->end = bits;
+ if (flags & AIRQ_IV_BITLOCK) {
+ iv->bitlock = kzalloc(size, GFP_KERNEL);
+ if (!iv->bitlock)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_PTR) {
+ size = bits * sizeof(unsigned long);
+ iv->ptr = kzalloc(size, GFP_KERNEL);
+ if (!iv->ptr)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_DATA) {
+ size = bits * sizeof(unsigned int);
+ iv->data = kzalloc(size, GFP_KERNEL);
+ if (!iv->data)
+ goto out_free;
}
- airq->handler = handler;
- airq->drv_data = drv_data;
+ spin_lock_init(&iv->lock);
+ return iv;
- ret = register_airq(airq, isc);
+out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+ kfree(iv->vector);
+ kfree(iv);
out:
- snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
- CIO_TRACE_EVENT(4, dbf_txt);
- if (ret < 0) {
- kfree(airq);
- return ERR_PTR(ret);
- } else
- return &indicators[isc].byte[ret];
+ return NULL;
}
-EXPORT_SYMBOL(s390_register_adapter_interrupt);
+EXPORT_SYMBOL(airq_iv_create);
/**
- * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
- * @ind: indicator for which the handler is to be unregistered
- * @isc: interruption subclass
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
*/
-void s390_unregister_adapter_interrupt(void *ind, u8 isc)
+void airq_iv_release(struct airq_iv *iv)
{
- struct airq_t *airq;
- char dbf_txt[16];
- int i;
-
- i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
- snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
- CIO_TRACE_EVENT(4, dbf_txt);
- indicators[isc].byte[i] = 0;
- airq = xchg(&airqs[isc][i], NULL);
- /*
- * Allow interrupts to complete. This will ensure that the airq handle
- * is no longer referenced by any interrupt handler.
- */
- synchronize_sched();
- kfree(airq);
+ kfree(iv->data);
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->vector);
+ kfree(iv->avail);
+ kfree(iv);
}
-EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
-
-#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
+EXPORT_SYMBOL(airq_iv_release);
-void do_adapter_IO(u8 isc)
+/**
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
+ *
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
+ */
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
- int w;
- int i;
- unsigned long word;
- struct airq_t *airq;
-
- /*
- * Access indicator array in word-sized chunks to minimize storage
- * fetch operations.
- */
- for (w = 0; w < NR_AIRQ_WORDS; w++) {
- word = indicators[isc].word[w];
- i = w * NR_AIRQS_PER_WORD;
- /*
- * Check bytes within word for active indicators.
- */
- while (word) {
- if (word & INDICATOR_MASK) {
- airq = airqs[isc][i];
- /* Make sure gcc reads from airqs only once. */
- barrier();
- if (likely(airq))
- airq->handler(&indicators[isc].byte[i],
- airq->drv_data);
- else
- /*
- * Reset ill-behaved indicator.
- */
- indicators[isc].byte[i] = 0;
- }
- word <<= 8;
- i++;
+ unsigned long bit, i, flags;
+
+ if (!iv->avail || num == 0)
+ return -1UL;
+ spin_lock_irqsave(&iv->lock, flags);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
}
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
+ bit = -1UL;
+ spin_unlock_irqrestore(&iv->lock, flags);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_alloc);
+
+/**
+ * airq_iv_free - free irq bits of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
+ */
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
+{
+ unsigned long i, flags;
+
+ if (!iv->avail || num == 0)
+ return;
+ spin_lock_irqsave(&iv->lock, flags);
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
+ /* Find new end of bit-field */
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
}
+ spin_unlock_irqrestore(&iv->lock, flags);
+}
+EXPORT_SYMBOL(airq_iv_free);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end)
+{
+ unsigned long bit;
+
+ /* Find non-zero bit starting from 'ivs->next'. */
+ bit = find_next_bit_inv(iv->vector, end, start);
+ if (bit >= end)
+ return -1UL;
+ clear_bit_inv(bit, iv->vector);
+ return bit;
}
+EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 6565f027791..b3f791b2c1f 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,9 +1,7 @@
/*
- * drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
*
- * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 1999, 2013
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -14,14 +12,14 @@
#include <linux/init.h>
#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
-#include <asm/cio.h>
#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
#include "blacklist.h"
#include "cio.h"
@@ -80,17 +78,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
- unsigned int value;
diff = 0;
*val = 0;
- while (isxdigit(**cp) && (diff <= max_digit)) {
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
- if (isdigit(**cp))
- value = **cp - '0';
- else
- value = tolower(**cp) - 'a' + 10;
+ if (value < 0)
+ break;
*val = *val * 16 + value;
(*cp)++;
diff++;
@@ -177,6 +173,29 @@ static int blacklist_parse_parameters(char *str, range_action action,
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
+ } else if (strcmp(parm, "ipldev") == 0) {
+ if (ipl_info.type == IPL_TYPE_CCW) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.ccw.dev_id.ssid;
+ from = ipl_info.data.ccw.dev_id.devno;
+ } else if (ipl_info.type == IPL_TYPE_FCP ||
+ ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.fcp.dev_id.ssid;
+ from = ipl_info.data.fcp.dev_id.devno;
+ } else {
+ continue;
+ }
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ } else if (strcmp(parm, "condev") == 0) {
+ if (console_devno == -1)
+ continue;
+
+ from_cssid = to_cssid = 0;
+ from_ssid = to_ssid = 0;
+ from = to = console_devno;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
@@ -241,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
parm = strsep(&buf, " ");
- if (strcmp("free", parm) == 0)
+ if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- else if (strcmp("add", parm) == 0)
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
- css_schedule_reprobe();
return rc;
}
@@ -265,13 +284,11 @@ struct ccwdev_iter {
static void *
cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
- struct ccwdev_iter *iter;
+ struct ccwdev_iter *iter = s->private;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
- iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
- if (!iter)
- return ERR_PTR(-ENOMEM);
+ memset(iter, 0, sizeof(*iter));
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
return iter;
@@ -280,8 +297,6 @@ cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
static void
cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
{
- if (!IS_ERR(it))
- kfree(it);
}
static void *
@@ -342,10 +357,9 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
- buf = vmalloc (user_len + 1); /* maybe better use the stack? */
+ buf = vzalloc(user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
- memset(buf, 0, user_len + 1);
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
@@ -378,14 +392,15 @@ static const struct seq_operations cio_ignore_proc_seq_ops = {
static int
cio_ignore_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &cio_ignore_proc_seq_ops);
+ return seq_open_private(file, &cio_ignore_proc_seq_ops,
+ sizeof(struct ccwdev_iter));
}
static const struct file_operations cio_ignore_proc_fops = {
.open = cio_ignore_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_private,
.write = cio_ignore_write,
};
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 22ce765d537..e443b0d0b23 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Arnd Bergmann (arndb@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Copyright IBM Corp. 2002, 2012
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
@@ -16,10 +15,13 @@
#include <linux/ctype.h>
#include <linux/dcache.h>
+#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
-#define CCW_BUS_ID_SIZE 20
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE 10
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
@@ -28,68 +30,161 @@
* to devices that use multiple subchannels.
*/
-/* a device matches a driver if all its slave devices match the same
- * entry of the driver */
-static int
-ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
+ int i;
+ char str[8];
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(drv);
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ }
+}
- if (gdev->creator_id == gdrv->driver_id)
- return 1;
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+ struct ccw_device *cdev;
+ int i;
- return 0;
+ for (i = 0; i < gdev->count; i++) {
+ cdev = gdev->cdev[i];
+ if (!cdev)
+ continue;
+ spin_lock_irq(cdev->ccwlock);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irq(cdev->ccwlock);
+ gdev->cdev[i] = NULL;
+ put_device(&cdev->dev);
+ }
}
-static int
-ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
+
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
- /* TODO */
- return 0;
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE)
+ goto out;
+ if (gdrv->set_online)
+ ret = gdrv->set_online(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_online);
-static struct bus_type ccwgroup_bus_type;
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+ if (gdrv->set_offline)
+ ret = gdrv->set_offline(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_OFFLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_offline);
-static void
-__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+static ssize_t ccwgroup_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int i;
- char str[8];
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ unsigned long value;
+ int ret;
- for (i = 0; i < gdev->count; i++) {
- sprintf(str, "cdev%d", i);
- sysfs_remove_link(&gdev->dev.kobj, str);
- sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
}
-
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ goto out;
+
+ if (value == 1)
+ ret = ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ret = ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+out:
+ device_unlock(dev);
+ return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int online;
+
+ online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
-static void ccwgroup_ungroup_callback(struct device *dev)
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
+ device_unregister(&gdev->dev);
+ __ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
-static ssize_t
-ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct ccwgroup_device *gdev;
- int rc;
-
- gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int rc = 0;
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -98,49 +193,57 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
rc = -EINVAL;
goto out;
}
- /* Note that we cannot unregister the device from one of its
- * attribute methods, so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
+ else
+ rc = -ENODEV;
out:
if (rc) {
- if (rc != -EAGAIN)
- /* Release onoff "lock" when ungrouping failed. */
- atomic_set(&gdev->onoff, 0);
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
-
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_ungroup.attr,
+ NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+ .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+ &ccwgroup_attr_group,
+ NULL,
+};
-static void
-ccwgroup_release (struct device *dev)
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
{
- struct ccwgroup_device *gdev;
- int i;
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
- gdev = to_ccwgroupdev(dev);
+ ccwgroup_ungroup(gdev);
+ put_device(&gdev->dev);
+}
- for (i = 0; i < gdev->count; i++) {
- if (gdev->cdev[i]) {
- if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
- dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
- put_device(&gdev->cdev[i]->dev);
- }
- }
- kfree(gdev);
+static void ccwgroup_release(struct device *dev)
+{
+ kfree(to_ccwgroupdev(dev));
}
-static int
-__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
- rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
- "group_device");
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+ &gdev->dev.kobj, "group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
@@ -150,8 +253,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
- rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
- str);
+ rc = sysfs_create_link(&gdev->dev.kobj,
+ &gdev->cdev[i]->dev.kobj, str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
@@ -166,9 +269,10 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
return 0;
}
-static int __get_next_bus_id(const char **buf, char *bus_id)
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
{
- int rc, len;
+ unsigned int cssid, ssid, devno;
+ int ret = 0, len;
char *start, *end;
start = (char *)*buf;
@@ -183,49 +287,40 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
len = end - start + 1;
end++;
}
- if (len < CCW_BUS_ID_SIZE) {
- strlcpy(bus_id, start, len);
- rc = 0;
+ if (len <= CCW_BUS_ID_SIZE) {
+ if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+ ret = -EINVAL;
} else
- rc = -EINVAL;
- *buf = end;
- return rc;
-}
-
-static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
-{
- int cssid, ssid, devno;
+ ret = -EINVAL;
- /* Must be of form %x.%x.%04x */
- if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
- return 0;
- return 1;
+ if (!ret) {
+ id->ssid = ssid;
+ id->devno = devno;
+ }
+ *buf = end;
+ return ret;
}
/**
- * ccwgroup_create_from_string() - create and register a ccw group device
- * @root: parent device for the new device
- * @creator_id: identifier of creating driver
- * @cdrv: ccw driver of slave devices
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
- * Create and register a new ccw group device as a child of @root. Slave
- * devices are obtained from the list of bus ids given in @buf and must all
- * belong to @cdrv.
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
-int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
- struct ccw_driver *cdrv, int num_devices,
- const char *buf)
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+ int num_devices, const char *buf)
{
struct ccwgroup_device *gdev;
+ struct ccw_dev_id dev_id;
int rc, i;
- char tmp_bus_id[CCW_BUS_ID_SIZE];
- const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
@@ -235,77 +330,76 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
- gdev->creator_id = creator_id;
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
- gdev->dev.parent = root;
+ gdev->dev.parent = parent;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
- curr_buf = buf;
- for (i = 0; i < num_devices && curr_buf; i++) {
- rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
+ for (i = 0; i < num_devices && buf; i++) {
+ rc = __get_next_id(&buf, &dev_id);
if (rc != 0)
goto error;
- if (!__is_valid_bus_id(tmp_bus_id)) {
- rc = -EINVAL;
- goto error;
- }
- gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
+ gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
- if (!gdev->cdev[i]
- || gdev->cdev[i]->id.driver_info !=
+ if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+ gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+ gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
- if (i < num_devices && !curr_buf) {
+ if (i < num_devices) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
- if (i == num_devices && strlen(curr_buf) > 0) {
+ if (i == num_devices && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+ gdev->dev.groups = ccwgroup_attr_groups;
+ if (gdrv) {
+ gdev->dev.driver = &gdrv->driver;
+ rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+ if (rc)
+ goto error;
+ }
rc = device_add(&gdev->dev);
if (rc)
goto error;
- get_device(&gdev->dev);
- rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
-
+ rc = __ccwgroup_create_symlinks(gdev);
if (rc) {
- device_unregister(&gdev->dev);
+ device_del(&gdev->dev);
goto error;
}
-
- rc = __ccwgroup_create_symlinks(gdev);
- if (!rc) {
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
- return 0;
- }
- device_remove_file(&gdev->dev, &dev_attr_ungroup);
- device_unregister(&gdev->dev);
+ mutex_unlock(&gdev->reg_mutex);
+ return 0;
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
@@ -313,10 +407,20 @@ error:
put_device(&gdev->dev);
return rc;
}
-EXPORT_SYMBOL(ccwgroup_create_from_string);
+EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data);
+ void *data)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ get_device(&gdev->dev);
+ schedule_work(&gdev->ungroup_work);
+ }
+
+ return NOTIFY_OK;
+}
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
@@ -348,181 +452,105 @@ module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
-static int
-ccwgroup_set_online(struct ccwgroup_device *gdev)
+static int ccwgroup_remove(struct device *dev)
{
- struct ccwgroup_driver *gdrv;
- int ret;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_ONLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
- goto out;
+ if (!dev->driver)
+ return 0;
+ if (gdrv->remove)
+ gdrv->remove(gdev);
- gdev->state = CCWGROUP_ONLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
+ return 0;
}
-static int
-ccwgroup_set_offline(struct ccwgroup_device *gdev)
+static void ccwgroup_shutdown(struct device *dev)
{
- struct ccwgroup_driver *gdrv;
- int ret;
-
- if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
- return -EAGAIN;
- if (gdev->state == CCWGROUP_OFFLINE) {
- ret = 0;
- goto out;
- }
- if (!gdev->dev.driver) {
- ret = -EINVAL;
- goto out;
- }
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
- if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
- goto out;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
- gdev->state = CCWGROUP_OFFLINE;
- out:
- atomic_set(&gdev->onoff, 0);
- return ret;
+ if (!dev->driver)
+ return;
+ if (gdrv->shutdown)
+ gdrv->shutdown(gdev);
}
-static ssize_t
-ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static int ccwgroup_pm_prepare(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
- unsigned long value;
- int ret;
-
- if (!dev->driver)
- return -ENODEV;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- if (!try_module_get(gdrv->owner))
- return -EINVAL;
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&gdev->onoff))
+ return -EAGAIN;
- ret = strict_strtoul(buf, 0, &value);
- if (ret)
- goto out;
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
- if (value == 1)
- ret = ccwgroup_set_online(gdev);
- else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
- else
- ret = -EINVAL;
-out:
- module_put(gdrv->owner);
- return (ret == 0) ? count : ret;
+ return gdrv->prepare ? gdrv->prepare(gdev) : 0;
}
-static ssize_t
-ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
+static void ccwgroup_pm_complete(struct device *dev)
{
- int online;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
- online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return;
- return sprintf(buf, online ? "1\n" : "0\n");
+ if (gdrv->complete)
+ gdrv->complete(gdev);
}
-static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
-
-static int
-ccwgroup_probe (struct device *dev)
+static int ccwgroup_pm_freeze(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- int ret;
-
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if ((ret = device_create_file(dev, &dev_attr_online)))
- return ret;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
- if (ret)
- device_remove_file(dev, &dev_attr_online);
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
- return ret;
+ return gdrv->freeze ? gdrv->freeze(gdev) : 0;
}
-static int
-ccwgroup_remove (struct device *dev)
+static int ccwgroup_pm_thaw(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- device_remove_file(dev, &dev_attr_online);
- device_remove_file(dev, &dev_attr_ungroup);
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- if (!dev->driver)
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
-
- if (gdrv->remove)
- gdrv->remove(gdev);
-
- return 0;
+ return gdrv->thaw ? gdrv->thaw(gdev) : 0;
}
-static void ccwgroup_shutdown(struct device *dev)
+static int ccwgroup_pm_restore(struct device *dev)
{
- struct ccwgroup_device *gdev;
- struct ccwgroup_driver *gdrv;
-
- if (!dev->driver)
- return;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- gdev = to_ccwgroupdev(dev);
- gdrv = to_ccwgroupdrv(dev->driver);
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
- if (gdrv->shutdown)
- gdrv->shutdown(gdev);
+ return gdrv->restore ? gdrv->restore(gdev) : 0;
}
+static const struct dev_pm_ops ccwgroup_pm_ops = {
+ .prepare = ccwgroup_pm_prepare,
+ .complete = ccwgroup_pm_complete,
+ .freeze = ccwgroup_pm_freeze,
+ .thaw = ccwgroup_pm_thaw,
+ .restore = ccwgroup_pm_restore,
+};
+
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
- .match = ccwgroup_bus_match,
- .uevent = ccwgroup_uevent,
- .probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
+ .pm = &ccwgroup_pm_ops,
};
-
-static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- struct device *dev = data;
-
- if (action == BUS_NOTIFY_UNBIND_DRIVER)
- device_schedule_callback(dev, ccwgroup_ungroup_callback);
-
- return NOTIFY_OK;
-}
-
-
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
@@ -533,14 +561,12 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
- cdriver->driver.name = cdriver->name;
- cdriver->driver.owner = cdriver->owner;
return driver_register(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_register);
-static int
-__ccwgroup_match_all(struct device *dev, void *data)
+static int __ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
@@ -556,20 +582,16 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
- get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- mutex_lock(&gdev->reg_mutex);
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
- mutex_unlock(&gdev->reg_mutex);
+ ccwgroup_ungroup(gdev);
put_device(dev);
}
- put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
@@ -584,25 +606,7 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
-
-static struct ccwgroup_device *
-__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
-{
- struct ccwgroup_device *gdev;
-
- gdev = dev_get_drvdata(&cdev->dev);
- if (gdev) {
- if (get_device(&gdev->dev)) {
- mutex_lock(&gdev->reg_mutex);
- if (device_is_registered(&gdev->dev))
- return gdev;
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
- }
- return NULL;
- }
- return NULL;
-}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
@@ -619,17 +623,19 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
- gdev = __ccwgroup_get_gdev_by_cdev(cdev);
- if (gdev) {
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(&gdev->dev);
- mutex_unlock(&gdev->reg_mutex);
- put_device(&gdev->dev);
+ spin_lock_irq(cdev->ccwlock);
+ gdev = dev_get_drvdata(&cdev->dev);
+ if (!gdev) {
+ spin_unlock_irq(cdev->ccwlock);
+ return;
}
+ /* Get ccwgroup device reference for local processing. */
+ get_device(&gdev->dev);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Unregister group device. */
+ ccwgroup_ungroup(gdev);
+ /* Release ccwgroup device reference for local processing. */
+ put_device(&gdev->dev);
}
-
-MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ccwgroup_driver_register);
-EXPORT_SYMBOL(ccwgroup_driver_unregister);
-EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 00000000000..07676c22d51
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,367 @@
+/*
+ * Handling of internal CCW device requests.
+ *
+ * Copyright IBM Corp. 2009, 2011
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+ while (lpm && ((lpm & mask) == 0))
+ lpm >>= 1;
+ return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask >> 1, req->lpm);
+out:
+ return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->done)
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw1 *cp = req->cp;
+ int rc = -EACCES;
+
+ while (req->mask) {
+ if (req->retries-- == 0) {
+ /* Retries exhausted, try next path. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Perform start function. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+ ccw_device_set_timeout(cdev, req->timeout);
+ return;
+ }
+ if (rc == -ENODEV) {
+ /* Permanent device error. */
+ break;
+ }
+ if (rc == -EACCES) {
+ /* Permant path error. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Temporary improper status. */
+ rc = cio_clear(sch);
+ if (rc)
+ break;
+ return;
+ }
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask, req->lpm);
+ req->drc = 0;
+ req->done = 0;
+ req->cancel = 0;
+ if (!req->mask)
+ goto out_nopath;
+ ccwreq_do(cdev);
+ return;
+
+out_nopath:
+ ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (req->done)
+ return 1;
+ req->cancel = 1;
+ rc = cio_clear(sch);
+ if (rc)
+ ccwreq_stop(cdev, rc);
+ return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+ struct irb *irb = &cdev->private->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
+
+ /* Perform BASIC SENSE if needed. */
+ if (ccw_device_accumulate_and_sense(cdev, lcirb))
+ return IO_RUNNING;
+ /* Check for halt/clear interrupt. */
+ if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return IO_KILLED;
+ /* Check for path error. */
+ if (scsw->cc == 3 || scsw->pno)
+ return IO_PATH_ERROR;
+ /* Handle BASIC SENSE data. */
+ if (irb->esw.esw0.erw.cons) {
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+ CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+ /* Ask the driver what to do */
+ if (cdev->drv && cdev->drv->uc_handler) {
+ todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
+ switch (todo) {
+ case UC_TODO_RETRY:
+ return IO_STATUS_ERROR;
+ case UC_TODO_RETRY_ON_NEW_PATH:
+ return IO_PATH_ERROR;
+ case UC_TODO_STOP:
+ return IO_REJECTED;
+ default:
+ return IO_STATUS_ERROR;
+ }
+ }
+ /* Assume that unexpected SENSE data implies an error. */
+ return IO_STATUS_ERROR;
+ }
+ /* Check for channel errors. */
+ if (scsw->cstat != 0)
+ return IO_STATUS_ERROR;
+ /* Check for device errors. */
+ if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return IO_STATUS_ERROR;
+ /* Check for final state. */
+ if (!(scsw->dstat & DEV_STAT_DEV_END))
+ return IO_RUNNING;
+ /* Check for other improper status. */
+ if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+ return IO_STATUS_ERROR;
+ return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct {
+ struct ccw_dev_id dev_id;
+ u16 retries;
+ u8 lpm;
+ u8 status;
+ } __attribute__ ((packed)) data;
+ data.dev_id = cdev->private->dev_id;
+ data.retries = req->retries;
+ data.lpm = (u8) req->mask;
+ data.status = (u8) status;
+ CIO_TRACE_EVENT(2, "reqstat");
+ CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+ struct irb *irb = &__get_cpu_var(cio_irb);
+ struct ccw_request *req = &cdev->private->req;
+ enum io_status status;
+ int rc = -EOPNOTSUPP;
+
+ /* Check status of I/O request. */
+ status = ccwreq_status(cdev, irb);
+ if (req->filter)
+ status = req->filter(cdev, req->data, irb, status);
+ if (status != IO_RUNNING)
+ ccw_device_set_timeout(cdev, 0);
+ if (status != IO_DONE && status != IO_RUNNING)
+ ccwreq_log_status(cdev, status);
+ switch (status) {
+ case IO_DONE:
+ break;
+ case IO_RUNNING:
+ return;
+ case IO_REJECTED:
+ goto err;
+ case IO_PATH_ERROR:
+ goto out_next_path;
+ case IO_STATUS_ERROR:
+ goto out_restart;
+ case IO_KILLED:
+ /* Check if request was cancelled on purpose. */
+ if (req->cancel) {
+ rc = -EIO;
+ goto err;
+ }
+ goto out_restart;
+ }
+ /* Check back with request initiator. */
+ if (!req->check)
+ goto out;
+ switch (req->check(cdev, req->data)) {
+ case 0:
+ break;
+ case -EAGAIN:
+ goto out_restart;
+ case -EACCES:
+ goto out_next_path;
+ default:
+ goto err;
+ }
+out:
+ ccwreq_stop(cdev, 0);
+ return;
+
+out_next_path:
+ /* Try next path and restart I/O. */
+ if (!ccwreq_next_path(cdev)) {
+ rc = -EACCES;
+ goto err;
+ }
+out_restart:
+ /* Restart. */
+ ccwreq_do(cdev);
+ return;
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc = -ENODEV, chp;
+
+ if (cio_update_schib(sch))
+ goto err;
+
+ for (chp = 0; chp < 8; chp++) {
+ if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+ pr_warning("%s: No interrupt was received within %lus "
+ "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
+ }
+
+ if (!ccwreq_next_path(cdev)) {
+ /* set the final return code for this request */
+ req->drc = -ETIME;
+ }
+ rc = cio_clear(sch);
+ if (rc)
+ goto err;
+ return;
+
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle notoper during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+ ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 3e5f304ad88..d497aa05a72 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.c
- *
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -10,11 +8,14 @@
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/crw.h>
@@ -53,19 +54,13 @@ static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static wait_queue_head_t cfg_wait_queue;
-/* Return channel_path struct for given chpid. */
-static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
-{
- return channel_subsystems[chpid.cssid]->chps[chpid.id];
-}
-
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
chpid_to_chp(chpid)->state = onoff;
}
-/* On succes return 0 if channel-path is varied offline, 1 if it is varied
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */
int chp_get_status(struct chp_id chpid)
{
@@ -134,7 +129,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -181,7 +177,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -239,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ int status;
- if (!chp)
- return 0;
- return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
- sprintf(buf, "offline\n"));
+ mutex_lock(&chp->lock);
+ status = chp->state;
+ mutex_unlock(&chp->lock);
+
+ return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
@@ -259,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
if (!num_args)
return count;
- if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
+ if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
- else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
+ mutex_unlock(&cp->lock);
+ } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+ mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 0);
- else
+ mutex_unlock(&cp->lock);
+ } else
error = -EINVAL;
return error < 0 ? error : count;
-
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -313,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
+ u8 type;
- if (!chp)
- return 0;
- return sprintf(buf, "%x\n", chp->desc.desc);
+ mutex_lock(&chp->lock);
+ type = chp->desc.desc;
+ mutex_unlock(&chp->lock);
+ return sprintf(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -349,18 +352,57 @@ static ssize_t chp_shared_show(struct device *dev,
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
+ &dev_attr_chid.attr,
+ &dev_attr_chid_external.attr,
NULL,
};
-
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
static void chp_release(struct device *dev)
{
@@ -371,6 +413,26 @@ static void chp_release(struct device *dev)
}
/**
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+ * Update the channel-path description of the specified channel-path.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+
+ return rc;
+}
+
+/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
@@ -392,11 +454,12 @@ int chp_new(struct chp_id chpid)
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
- dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+ mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
- ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+ ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
@@ -411,24 +474,20 @@ int chp_new(struct chp_id chpid)
} else {
chp->cmg = -1;
}
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
- goto out_free;
- }
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
+ put_device(&chp->dev);
goto out;
}
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
@@ -450,7 +509,7 @@ out:
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
-void *chp_get_chp_desc(struct chp_id chpid)
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
struct channel_path_desc *desc;
@@ -461,7 +520,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
if (!desc)
return NULL;
+
+ mutex_lock(&chp->lock);
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ mutex_unlock(&chp->lock);
return desc;
}
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d224617..4efd5b867cc 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.h
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
@@ -10,6 +8,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
@@ -40,22 +39,31 @@ static inline int chp_test_bit(u8 *bitmap, int num)
struct channel_path {
+ struct device dev;
struct chp_id chpid;
+ struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
void *cmg_chars;
- struct device dev;
};
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
+}
+
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
-void *chp_get_chp_desc(struct chp_id chpid);
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 883f16f96f2..e3bf885f4a6 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -15,11 +14,13 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/crw.h>
+#include <asm/isc.h>
#include "css.h"
#include "cio.h"
@@ -29,6 +30,8 @@
#include "chsc.h"
static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
@@ -47,9 +50,16 @@ int chsc_error_from_response(int response)
case 0x0007:
case 0x0008:
case 0x000a:
+ case 0x0104:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
+ case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
default:
return -EIO;
}
@@ -82,17 +92,15 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- unsigned long page;
struct chsc_ssd_area *ssd_area;
int ccode;
int ret;
int i;
int mask;
- page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- ssd_area = (struct chsc_ssd_area *) page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
@@ -103,25 +111,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out_free;
+ goto out;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- goto out_free;
+ goto out;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
- goto out_free;
+ goto out;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
- goto out_free;
+ goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
@@ -133,11 +141,70 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
-out_free:
- free_page(page);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+ memset(ssqd, 0, sizeof(*ssqd));
+ ssqd->request.length = 0x0010;
+ ssqd->request.code = 0x0024;
+ ssqd->first_sch = schid.sch_no;
+ ssqd->last_sch = schid.sch_no;
+ ssqd->ssid = schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+
+ return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+ memset(scssc, 0, sizeof(*scssc));
+ scssc->request.length = 0x0fe0;
+ scssc->request.code = 0x0021;
+ scssc->operation_code = 0;
+
+ scssc->summary_indicator_addr = summary_indicator_addr;
+ scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+ scssc->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc->kc = PAGE_DEFAULT_KEY >> 4;
+ scssc->isc = QDIO_AIRQ_ISC;
+ scssc->schid = schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc->word_with_d_bit = 0x10000000;
+
+ if (chsc(scssc))
+ return -EIO;
+
+ return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -171,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
-static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
-
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -221,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
- for_each_subchannel_staged(__s390_process_res_acc,
- s390_process_res_acc_new_sch, link);
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
}
static int
@@ -255,26 +302,46 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
-struct chsc_sei_area {
- struct chsc_header request;
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 vf; /* validity flags */
- u8 rs; /* reporting source */
- u8 cc; /* content code */
- u16 fla; /* full link address */
- u16 rsid; /* reporting source id */
- u32 reserved5;
- u32 reserved6;
- u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
-} __attribute__ ((packed));
-
-static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 :24;
+ u8 nt;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@@ -293,7 +360,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@@ -325,13 +392,43 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *data;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+ if (sei_area->rs != 0)
+ return;
+ data = sei_area->ccdf;
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data, num))
+ continue;
+ chpid.id = num;
+
+ CIO_CRW_EVENT(4, "Update information for channel path "
+ "%x.%02x\n", chpid.cssid, chpid.id);
+ chp = chpid_to_chp(chpid);
+ if (!chp) {
+ chp_new(chpid);
+ continue;
+ }
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+}
+
struct chp_config_data {
u8 map[32];
u8 op;
u8 pc;
};
-static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
@@ -363,34 +460,139 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
- /* Check if we might have lost some information. */
- if (sei_area->flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
}
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area);
break;
- case 2: /* i/o resource accessibiliy */
+ case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 7: /* channel-path-availability information */
+ chsc_process_sei_chp_avail(sei_area);
+ break;
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+}
+
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+ static int ntsm_unsupported;
+
+ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
+ break;
+ }
+
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
}
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei_area *sei_area;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -400,29 +602,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei_area = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- do {
- memset(sei_area, 0, sizeof(*sei_area));
- sei_area->request.length = 0x0010;
- sei_area->request.code = 0x000e;
- if (chsc(sei_area))
- break;
-
- if (sei_area->response.code == 0x0001) {
- CIO_CRW_EVENT(4, "chsc: sei successful\n");
- chsc_process_sei(sei_area);
- } else {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei_area->response.code);
- break;
- }
- } while (sei_area->flags & 0x80);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
@@ -440,6 +622,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
@@ -474,19 +657,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
-static int
-__s390_vary_chpid_on(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
@@ -494,22 +664,22 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
- struct chp_link link;
+ struct channel_path *chp = chpid_to_chp(chpid);
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
-
- if (on)
+ if (on) {
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
- else
+ NULL, &chpid);
+ css_schedule_reprobe();
+ } else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
@@ -549,8 +719,7 @@ cleanup:
return ret;
}
-static int
-__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
{
struct {
struct chsc_header request;
@@ -571,19 +740,23 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
} __attribute__ ((packed)) *secm_area;
int ret, ccode;
- secm_area = page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
- secm_area->key = PAGE_DEFAULT_KEY;
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
- if (ccode > 0)
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
switch (secm_area->response.code) {
case 0x0102:
@@ -596,37 +769,32 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
- void *secm_area;
int ret;
- secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!secm_area)
- return -ENOMEM;
-
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
- free_page((unsigned long)secm_area);
return -ENOMEM;
}
}
- ret = __chsc_do_secm(css, enable, secm_area);
+ ret = __chsc_do_secm(css, enable);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
- memset(secm_area, 0, PAGE_SIZE);
- __chsc_do_secm(css, 0, secm_area);
+ __chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
@@ -636,44 +804,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
- free_page((unsigned long)secm_area);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp)
+ int c, int m, void *page)
{
+ struct chsc_scpd *scpd_area;
int ccode, ret;
- struct {
- struct chsc_header request;
- u32 : 2;
- u32 m : 1;
- u32 c : 1;
- u32 fmt : 4;
- u32 cssid : 8;
- u32 : 4;
- u32 rfmt : 4;
- u32 first_chpid : 8;
- u32 : 24;
- u32 last_chpid : 8;
- u32 zeroes1;
- struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
- } __attribute__ ((packed)) *scpd_area;
-
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scpd_area)
- return -ENOMEM;
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
-
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
@@ -683,20 +831,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
- if (ccode > 0) {
- ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out;
- }
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
ret = chsc_error_from_response(scpd_area->response.code);
- if (ret == 0)
- /* Success. */
- memcpy(resp, &scpd_area->response, scpd_area->response.length);
- else
+ if (ret)
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
-out:
- free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -705,17 +846,39 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
- if (!chsc_resp)
- return -ENOMEM;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
+ if (ret)
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc)
+{
+ struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
- goto out_free;
- memcpy(desc, &chsc_resp->data, chsc_resp->length);
-out_free:
- kfree(chsc_resp);
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -723,33 +886,22 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- switch (chp->cmg) {
- case 2:
- case 3:
- chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
- GFP_KERNEL);
- if (chp->cmg_chars) {
- int i, mask;
- struct cmg_chars *cmg_chars;
-
- cmg_chars = chp->cmg_chars;
- for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
- mask = 0x80 >> (i + 3);
- if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
- else
- cmg_chars->values[i] = 0;
- }
- }
- break;
- default:
- /* No cmg-dependent data. */
- break;
+ struct cmg_chars *cmg_chars;
+ int i, mask;
+
+ cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ cmg_chars->values[i] = chars->values[i];
+ else
+ cmg_chars->values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -773,13 +925,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scmc_area)
+ chp->cmg_chars = NULL;
+ cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+ if (!cmg_chars)
return -ENOMEM;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
-
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
@@ -790,52 +945,63 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
}
ret = chsc_error_from_response(scmc_area->response.code);
- if (ret == 0) {
- /* Success. */
- if (!scmc_area->not_valid) {
- chp->cmg = scmc_area->cmg;
- chp->shared = scmc_area->shared;
- chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
- (struct cmg_chars *)
- &scmc_area->data);
- } else {
- chp->cmg = -1;
- chp->shared = -1;
- }
- } else {
+ if (ret) {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid) {
+ chp->cmg = -1;
+ chp->shared = -1;
+ goto out;
}
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
+ }
+ chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
out:
- free_page((unsigned long)scmc_area);
+ spin_unlock_irq(&chsc_page_lock);
+ if (!chp->cmg_chars)
+ kfree(cmg_chars);
+
return ret;
}
-int __init chsc_alloc_sei_area(void)
+int __init chsc_init(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sei_page) {
- CIO_MSG_EVENT(0, "Can't allocate page for processing of "
- "chsc machine checks!\n");
- return -ENOMEM;
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
}
ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
- kfree(sei_page);
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
return ret;
}
-void __init chsc_free_sei_area(void)
+void __init chsc_init_cleanup(void)
{
crw_unregister_handler(CRW_RSC_CSS);
- kfree(sei_page);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
}
-int __init
-chsc_enable_facility(int operation_code)
+int chsc_enable_facility(int operation_code)
{
+ unsigned long flags;
int ret;
struct {
struct chsc_header request;
@@ -852,9 +1018,9 @@ chsc_enable_facility(int operation_code)
u32 reserved6:24;
} __attribute__ ((packed)) *sda_area;
- sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!sda_area)
- return -ENOMEM;
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
@@ -875,8 +1041,8 @@ chsc_enable_facility(int operation_code)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
- out:
- free_page((unsigned long)sda_area);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -895,13 +1061,12 @@ chsc_determine_css_characteristics(void)
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
- u32 chsc_char[518];
+ u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area)
- return -ENOMEM;
-
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
@@ -921,7 +1086,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- free_page ((unsigned long) scsc_area);
+ spin_unlock_irq(&chsc_page_lock);
return result;
}
@@ -976,3 +1141,110 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+int chsc_siosl(struct subchannel_id schid)
+{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } __attribute__ ((packed)) *siosl_area;
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
+
+ ccode = chsc(siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area->response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area->response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index ba59bceace9..76c9b50700b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -3,17 +3,14 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <asm/css_chars.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/schid.h>
+#include <asm/qdio.h>
#define CHSC_SDA_OC_MSS 0x2
-struct chsc_header {
- u16 length;
- u16 code;
-} __attribute__ ((packed));
-
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
@@ -24,42 +21,26 @@ struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
} __attribute__ ((packed));
-struct channel_path_desc {
+struct channel_path_desc_fmt1 {
u8 flags;
u8 lsn;
u8 desc;
u8 chpid;
- u8 swla;
- u8 zeroes;
- u8 chla;
+ u32:24;
u8 chpp;
+ u32 unused[2];
+ u16 chid;
+ u32:16;
+ u16 mdc;
+ u16:13;
+ u8 r:1;
+ u8 s:1;
+ u8 f:1;
+ u32 zeros[2];
} __attribute__ ((packed));
struct channel_path;
-struct css_general_char {
- u64 : 12;
- u32 dynio : 1; /* bit 12 */
- u32 : 28;
- u32 aif : 1; /* bit 41 */
- u32 : 3;
- u32 mcss : 1; /* bit 45 */
- u32 fcs : 1; /* bit 46 */
- u32 : 1;
- u32 ext_mb : 1; /* bit 48 */
- u32 : 7;
- u32 aif_tdd : 1; /* bit 56 */
- u32 : 1;
- u32 qebsm : 1; /* bit 58 */
- u32 : 8;
- u32 aif_osa : 1; /* bit 67 */
- u32 : 14;
- u32 cib : 1; /* bit 82 */
- u32 : 5;
- u32 fcx : 1; /* bit 88 */
- u32 : 7;
-}__attribute__((packed));
-
struct css_chsc_char {
u64 res;
u64 : 20;
@@ -69,10 +50,11 @@ struct css_chsc_char {
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
- u32 : 19;
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
}__attribute__((packed));
-extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {
@@ -81,26 +63,176 @@ struct chsc_ssd_info {
struct chp_id chpid[8];
u16 fla[8];
};
+
+struct chsc_ssqd_area {
+ struct chsc_header request;
+ u16:10;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 first_sch;
+ u16:16;
+ u16 last_sch;
+ u32:32;
+ struct chsc_header response;
+ u32:32;
+ struct qdio_ssqd_desc qdio_ssqd;
+} __packed;
+
+struct chsc_scssc_area {
+ struct chsc_header request;
+ u16 operation_code;
+ u16:16;
+ u32:32;
+ u32:32;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ u32:32;
+ struct subchannel_id schid;
+ u32 reserved[1004];
+ struct chsc_header response;
+ u32:32;
+} __packed;
+
+struct chsc_scpd {
+ struct chsc_header request;
+ u32:2;
+ u32 m:1;
+ u32 c:1;
+ u32 fmt:4;
+ u32 cssid:8;
+ u32:4;
+ u32 rfmt:4;
+ u32 first_chpid:8;
+ u32:24;
+ u32 last_chpid:8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+} __attribute__ ((packed));
+
+
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
-extern int chsc_alloc_sei_area(void);
-extern void chsc_free_sei_area(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp);
+ int c, int m, void *page);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc);
void chsc_chp_online(struct chp_id chpid);
void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
-
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr);
int chsc_error_from_response(int response);
+int chsc_siosl(struct subchannel_id schid);
+
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+ u64 sa;
+ u32 p:4;
+ u32 op_state:4;
+ u32 data_state:4;
+ u32 rank:4;
+ u32 r:1;
+ u32:7;
+ u32 rid:8;
+ u32:32;
+} __packed;
+
+struct chsc_scm_info {
+ struct chsc_header request;
+ u32:32;
+ u64 reqtok;
+ u32 reserved1[4];
+ struct chsc_header response;
+ u64:56;
+ u8 rq;
+ u32 mbc;
+ u64 msa;
+ u16 is;
+ u16 mmc;
+ u32 mci;
+ u64 nr_scm_ini;
+ u64 nr_scm_unini;
+ u32 reserved2[10];
+ u64 restok;
+ struct sale scmal[248];
+} __packed;
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+struct chsc_brinfo_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+ struct chsc_brinfo_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_brinfo_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_brinfo_naihdr naihdr;
+ union {
+ struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+ struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+ struct qdio_brinfo_entry_l2 l2[0];
+ } entries;
+} __packed;
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+int scm_process_availability_information(void);
+#else /* CONFIG_SCM_BUS */
+static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
+#endif /* CONFIG_SCM_BUS */
+
+
#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 93eca1731b8..3d22d2a4ce1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,16 +1,21 @@
/*
* Driver for s390 chsc subchannels
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2011
+ *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
*/
+#include <linux/slab.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
@@ -24,6 +29,10 @@
static debug_info_t *chsc_debug_msg_id;
static debug_info_t *chsc_debug_log_id;
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
#define CHSC_MSG(imp, args...) do { \
debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
} while (0)
@@ -47,12 +56,14 @@ MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
- struct chsc_private *private = sch->private;
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
- struct irb *irb = (struct irb *)__LC_IRB;
+ struct irb *irb = &__get_cpu_var(cio_irb);
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
+ inc_irq_stat(IRQIO_CSC);
+
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
@@ -77,13 +88,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
kfree(private);
} else {
- sch->private = private;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -97,8 +109,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
struct chsc_private *private;
cio_disable_subchannel(sch);
- private = sch->private;
- sch->private = NULL;
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
@@ -112,6 +124,31 @@ static void chsc_subchannel_shutdown(struct subchannel *sch)
cio_disable_subchannel(sch);
}
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+ int cc;
+ struct schib schib;
+ /*
+ * Don't allow suspend while the subchannel is not idle
+ * since we don't have a way to clear the subchannel and
+ * cannot disable it with a request running.
+ */
+ cc = stsch_err(sch->schid, &schib);
+ if (!cc && scsw_stctl(&schib.scsw))
+ return -EAGAIN;
+ return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
static struct css_device_id chsc_subchannel_ids[] = {
{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
{ /* end of list */ },
@@ -119,19 +156,24 @@ static struct css_device_id chsc_subchannel_ids[] = {
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
.remove = chsc_subchannel_remove,
.shutdown = chsc_subchannel_shutdown,
- .name = "chsc_subchannel",
+ .prepare = chsc_subchannel_prepare,
+ .freeze = chsc_subchannel_freeze,
+ .thaw = chsc_subchannel_restore,
+ .restore = chsc_subchannel_restore,
};
static int __init chsc_init_dbfs(void)
{
- chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
- 16 * sizeof(long));
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
@@ -206,10 +248,10 @@ static int chsc_async(struct chsc_async_area *chsc_area,
int ret = -ENODEV;
char dbf[10];
- chsc_area->header.key = PAGE_DEFAULT_KEY;
+ chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
- private = sch->private;
+ private = dev_get_drvdata(&sch->dev);
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
@@ -219,7 +261,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
CHSC_LOG(2, "schid");
CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
cc = chsc(chsc_area);
- sprintf(dbf, "cc:%d", cc);
+ snprintf(dbf, sizeof(dbf), "cc:%d", cc);
CHSC_LOG(2, dbf);
switch (cc) {
case 0:
@@ -248,11 +290,11 @@ static int chsc_async(struct chsc_async_area *chsc_area,
return ret;
}
-static void chsc_log_command(struct chsc_async_area *chsc_area)
+static void chsc_log_command(void *chsc_area)
{
char dbf[10];
- sprintf(dbf, "CHSC:%x", chsc_area->header.code);
+ snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
CHSC_LOG(0, dbf);
CHSC_LOG_HEX(0, chsc_area, 32);
}
@@ -316,13 +358,106 @@ static int chsc_ioctl_start(void __user *user_area)
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
out_free:
- sprintf(dbf, "ret:%d", ret);
+ snprintf(dbf, sizeof(dbf), "ret:%d", ret);
CHSC_LOG(0, dbf);
kfree(request);
free_page((unsigned long)chsc_area);
return ret;
}
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (on_close_chsc_area) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+ if (!on_close_request) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!on_close_chsc_area) {
+ ret = -ENOMEM;
+ goto out_free_request;
+ }
+ if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_chsc;
+ }
+ ret = 0;
+ goto out_unlock;
+
+out_free_chsc:
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+out_free_request:
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+ ret = 0;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+ struct chsc_sync_area *chsc_area;
+ int ret, ccode;
+
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!chsc_area)
+ return -ENOMEM;
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ if (chsc_area->header.code & 0x4000) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ ccode = chsc(chsc_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
static int chsc_ioctl_info_channel_path(void __user *user_cd)
{
struct chsc_chp_cd *cd;
@@ -656,25 +791,31 @@ out_free:
static int chsc_ioctl_chpd(void __user *user_chpd)
{
+ struct chsc_scpd *scpd_area;
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- if (!chpd)
- return -ENOMEM;
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area || !chpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
- &chpd->chpdb);
+ scpd_area);
if (ret)
goto out_free;
+ memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
+ free_page((unsigned long)scpd_area);
return ret;
}
@@ -740,33 +881,89 @@ out_free:
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp;
+
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
- return chsc_ioctl_start((void __user *)arg);
+ return chsc_ioctl_start(argp);
+ case CHSC_START_SYNC:
+ return chsc_ioctl_start_sync(argp);
case CHSC_INFO_CHANNEL_PATH:
- return chsc_ioctl_info_channel_path((void __user *)arg);
+ return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
- return chsc_ioctl_info_cu((void __user *)arg);
+ return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
- return chsc_ioctl_info_sch_cu((void __user *)arg);
+ return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
- return chsc_ioctl_conf_info((void __user *)arg);
+ return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
- return chsc_ioctl_conf_comp_list((void __user *)arg);
+ return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
- return chsc_ioctl_chpd((void __user *)arg);
+ return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
- return chsc_ioctl_dcal((void __user *)arg);
+ return chsc_ioctl_dcal(argp);
+ case CHSC_ON_CLOSE_SET:
+ return chsc_ioctl_on_close_set(argp);
+ case CHSC_ON_CLOSE_REMOVE:
+ return chsc_ioctl_on_close_remove();
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
}
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+ if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+ atomic_inc(&chsc_ready_for_use);
+ return -EBUSY;
+ }
+ return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area)
+ goto out_unlock;
+ init_completion(&on_close_request->completion);
+ CHSC_LOG(0, "on_close");
+ chsc_log_command(on_close_chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(on_close_chsc_area, on_close_request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&on_close_request->completion);
+ ret = chsc_examine_irb(on_close_request);
+ }
+ snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+ CHSC_LOG(0, dbf);
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ atomic_inc(&chsc_ready_for_use);
+ return 0;
+}
+
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
+ .open = chsc_open,
+ .release = chsc_release,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
+ .llseek = no_llseek,
};
static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2aebb982304..2905d8b0ec9 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -12,12 +11,14 @@
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -28,7 +29,7 @@
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
-#include <asm/cpu.h>
+#include <linux/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -45,6 +46,9 @@ debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
debug_info_t *cio_debug_crw_id;
+DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
+EXPORT_PER_CPU_SYMBOL(cio_irb);
+
/*
* Function: cio_debug_init
* Initializes three debug logs for common I/O:
@@ -54,7 +58,7 @@ debug_info_t *cio_debug_crw_id;
*/
static int __init cio_debug_init(void)
{
- cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
@@ -64,7 +68,7 @@ static int __init cio_debug_init(void)
goto out_unregister;
debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(cio_debug_trace_id, 2);
- cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
@@ -83,29 +87,14 @@ out_unregister:
arch_initcall (cio_debug_init);
-int
-cio_set_options (struct subchannel *sch, int flags)
+int cio_set_options(struct subchannel *sch, int flags)
{
- sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
- sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
- sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
- return 0;
-}
+ struct io_subchannel_private *priv = to_io_private(sch);
-/* FIXME: who wants to use this? */
-int
-cio_get_options (struct subchannel *sch)
-{
- int flags;
-
- flags = 0;
- if (sch->options.suspend)
- flags |= DOIO_ALLOW_SUSPEND;
- if (sch->options.prefetch)
- flags |= DOIO_DENY_PREFETCH;
- if (sch->options.inter)
- flags |= DOIO_SUPPRESS_INTER;
- return flags;
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
}
static int
@@ -138,22 +127,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
- char dbf_txt[15];
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
int ccode;
- union orb *orb;
- CIO_TRACE_EVENT(4, "stIO");
- CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(5, "stIO");
+ CIO_TRACE_EVENT(5, dev_name(&sch->dev));
- orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
- orb->cmd.pfch = sch->options.prefetch == 0;
- orb->cmd.spnd = sch->options.suspend;
- orb->cmd.ssic = sch->options.suspend && sch->options.inter;
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
@@ -168,8 +156,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
ccode = ssch(sch->schid, orb);
/* process condition code */
- sprintf(dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT(4, dbf_txt);
+ CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -200,16 +187,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
int
cio_resume (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
- CIO_TRACE_EVENT (4, "resIO");
+ CIO_TRACE_EVENT(4, "resIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (4, dbf_txt);
+ CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -234,13 +219,12 @@ cio_resume (struct subchannel *sch)
int
cio_halt(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "haltIO");
+ CIO_TRACE_EVENT(2, "haltIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -248,8 +232,7 @@ cio_halt(struct subchannel *sch)
*/
ccode = hsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -269,13 +252,12 @@ cio_halt(struct subchannel *sch)
int
cio_clear(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "clearIO");
+ CIO_TRACE_EVENT(2, "clearIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -283,8 +265,7 @@ cio_clear(struct subchannel *sch)
*/
ccode = csch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -305,19 +286,17 @@ cio_clear(struct subchannel *sch)
int
cio_cancel (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "cancelIO");
+ CIO_TRACE_EVENT(2, "cancelIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0: /* success */
@@ -367,10 +346,11 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
for (retry = 0; retry < 5; retry++) {
@@ -381,7 +361,7 @@ int cio_commit_config(struct subchannel *sch)
return ccode;
switch (ccode) {
case 0: /* successful */
- if (stsch(sch->schid, &schib) ||
+ if (stsch_err(sch->schid, &schib) ||
!css_sch_is_valid(&schib))
return -ENODEV;
if (cio_check_config(sch, &schib)) {
@@ -392,7 +372,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -413,7 +396,7 @@ int cio_update_schib(struct subchannel *sch)
{
struct schib schib;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
@@ -428,11 +411,9 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- char dbf_txt[15];
- int retry;
int ret;
- CIO_TRACE_EVENT (2, "ensch");
+ CIO_TRACE_EVENT(2, "ensch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -444,23 +425,16 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@@ -471,11 +445,9 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- char dbf_txt[15];
- int retry;
int ret;
- CIO_TRACE_EVENT (2, "dissch");
+ CIO_TRACE_EVENT(2, "dissch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -484,31 +456,13 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-int cio_create_sch_lock(struct subchannel *sch)
-{
- sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (!sch->lock)
- return -ENOMEM;
- spin_lock_init(sch->lock);
- return 0;
-}
-
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
@@ -565,37 +519,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
- /* Nuke all fields. */
- memset(sch, 0, sizeof(struct subchannel));
-
- sch->schid = schid;
- if (cio_is_console(schid)) {
- sch->lock = cio_get_console_lock();
- } else {
- err = cio_create_sch_lock(sch);
- if (err)
- goto out;
- }
- mutex_init(&sch->reg_mutex);
- /* Set a name for the subchannel */
- if (cio_is_console(schid))
- sch->dev.init_name = cio_get_console_sch_name(schid);
- else
- dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
-
/*
* The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
+ * indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
- * is not valid.
+ * is not valid.
*/
- ccode = stsch_err (schid, &sch->schib);
+ ccode = stsch_err(schid, &sch->schib);
if (ccode) {
err = (ccode == 3) ? -ENXIO : ccode;
goto out;
}
- /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
+ sch->schid = schid;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
@@ -612,274 +548,179 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
- return 0;
out:
- if (!cio_is_console(schid))
- kfree(sch->lock);
- sch->lock = NULL;
return err;
}
/*
- * do_IRQ() handles all normal I/O device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
+ * do_cio_interrupt() handles all normal I/O device IRQ's
*/
-void
-do_IRQ (struct pt_regs *regs)
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
- struct pt_regs *old_regs;
-
- old_regs = set_irq_regs(regs);
- s390_idle_check();
- irq_enter();
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
- /* Serve timer interrupts first. */
- clock_comparator_work();
- /*
- * Get interrupt information from lowcore
- */
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- irb = (struct irb *) __LC_IRB;
- do {
- kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
- /*
- * Non I/O-subchannel thin interrupts are processed differently
- */
- if (tpi_info->adapter_IO == 1 &&
- tpi_info->int_type == IO_INTERRUPT_TYPE) {
- do_adapter_IO(tpi_info->isc);
- continue;
- }
- sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch) {
- /* Clear pending interrupt condition. */
- tsch(tpi_info->schid, irb);
- continue;
- }
- spin_lock(sch->lock);
- /* Store interrupt response block to lowcore. */
- if (tsch(tpi_info->schid, irb) == 0) {
- /* Keep subchannel information word up to date. */
- memcpy (&sch->schib.scsw, &irb->scsw,
- sizeof (irb->scsw));
- /* Call interrupt handler if there is one. */
- if (sch->driver && sch->driver->irq)
- sch->driver->irq(sch);
- }
- spin_unlock(sch->lock);
- /*
- * Are more interrupts pending?
- * If so, the tpi instruction will update the lowcore
- * to hold the info for the next interrupt.
- * We don't do this for VM because a tpi drops the cpu
- * out of the sie which costs more cycles than it saves.
- */
- } while (!MACHINE_IS_VM && tpi (NULL) != 0);
- irq_exit();
- set_irq_regs(old_regs);
+
+ __this_cpu_write(s390_idle.nohz_delay, 1);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ irb = &__get_cpu_var(cio_irb);
+ sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+ if (!sch) {
+ /* Clear pending interrupt condition. */
+ inc_irq_stat(IRQIO_CIO);
+ tsch(tpi_info->schid, irb);
+ return IRQ_HANDLED;
+ }
+ spin_lock(sch->lock);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(tpi_info->schid, irb) == 0) {
+ /* Keep subchannel information word up to date. */
+ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+ /* Call interrupt handler if there is one. */
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ } else
+ inc_irq_stat(IRQIO_CIO);
+ spin_unlock(sch->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction io_interrupt = {
+ .name = "IO",
+ .handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+ irq_set_chip_and_handler(IO_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(IO_INTERRUPT, &io_interrupt);
}
#ifdef CONFIG_CCW_CONSOLE
-static struct subchannel console_subchannel;
-static char console_sch_name[10] = "0.x.xxxx";
-static struct io_subchannel_private console_priv;
-static int console_subchannel_in_use;
+static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
/*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the subchannel's lock held.
*/
-static int cio_tpi(void)
+void cio_tsch(struct subchannel *sch)
{
- struct tpi_info *tpi_info;
- struct subchannel *sch;
struct irb *irb;
int irq_context;
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- if (tpi(NULL) != 1)
- return 0;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
/* Store interrupt response block to lowcore. */
- if (tsch(tpi_info->schid, irb) != 0)
+ if (tsch(sch->schid, irb) != 0)
/* Not status pending or not operational. */
- return 1;
- sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch)
- return 1;
+ return;
+ memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ /* Call interrupt handler with updated status. */
irq_context = in_interrupt();
- if (!irq_context)
+ if (!irq_context) {
local_bh_disable();
- irq_enter();
- spin_lock(sch->lock);
- memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ irq_enter();
+ }
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
- spin_unlock(sch->lock);
- irq_exit();
- if (!irq_context)
+ else
+ inc_irq_stat(IRQIO_CIO);
+ if (!irq_context) {
+ irq_exit();
_local_bh_enable();
- return 1;
-}
-
-void *cio_get_console_priv(void)
-{
- return &console_priv;
+ }
}
-/*
- * busy wait for the next interrupt on the console
- */
-void wait_cons_dev(void)
- __releases(console_subchannel.lock)
- __acquires(console_subchannel.lock)
+static int cio_test_for_console(struct subchannel_id schid, void *data)
{
- unsigned long cr6 __attribute__ ((aligned (8)));
- unsigned long save_cr6 __attribute__ ((aligned (8)));
-
- /*
- * before entering the spinlock we may already have
- * processed the interrupt on a different CPU...
- */
- if (!console_subchannel_in_use)
- return;
-
- /* disable all but the console isc */
- __ctl_store (save_cr6, 6, 6);
- cr6 = 1UL << (31 - CONSOLE_ISC);
- __ctl_load (cr6, 6, 6);
-
- do {
- spin_unlock(console_subchannel.lock);
- if (!cio_tpi())
- cpu_relax();
- spin_lock(console_subchannel.lock);
- } while (console_subchannel.schib.scsw.cmd.actl != 0);
- /*
- * restore previous isc value
- */
- __ctl_load (save_cr6, 6, 6);
-}
+ struct schib schib;
-static int
-cio_test_for_console(struct subchannel_id schid, void *data)
-{
- if (stsch_err(schid, &console_subchannel.schib) != 0)
+ if (stsch_err(schid, &schib) != 0)
return -ENXIO;
- if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
- console_subchannel.schib.pmcw.dnv &&
- (console_subchannel.schib.pmcw.dev == console_devno)) {
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
-
-static int
-cio_get_console_sch_no(void)
+static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
-
+ struct schib schib;
+
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch(schid, &console_subchannel.schib) != 0 ||
- (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
- !console_subchannel.schib.pmcw.dnv)
+ if (stsch_err(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
- console_devno = console_subchannel.schib.pmcw.dev;
+ console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
- if (console_irq == -1)
- return -1;
- } else {
- /* unlike in 2.4, we cannot autoprobe here, since
- * the channel subsystem is not fully initialized.
- * With some luck, the HWC console can take over */
- return -1;
}
return console_irq;
}
-struct subchannel *
-cio_probe_console(void)
+struct subchannel *cio_probe_console(void)
{
- int sch_no, ret;
struct subchannel_id schid;
+ struct subchannel *sch;
+ int sch_no, ret;
- if (xchg(&console_subchannel_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
- console_subchannel_in_use = 0;
pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
- memset(&console_subchannel, 0, sizeof(struct subchannel));
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- ret = cio_validate_subchannel(&console_subchannel, schid);
- if (ret) {
- console_subchannel_in_use = 0;
- return ERR_PTR(-ENODEV);
- }
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return sch;
- /*
- * enable console I/O-interrupt subclass
- */
+ lockdep_set_class(sch->lock, &console_sch_key);
isc_register(CONSOLE_ISC);
- console_subchannel.config.isc = CONSOLE_ISC;
- console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
- ret = cio_commit_config(&console_subchannel);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
+ put_device(&sch->dev);
return ERR_PTR(ret);
}
- return &console_subchannel;
+ console_sch = sch;
+ return sch;
}
-void
-cio_release_console(void)
-{
- console_subchannel.config.intparm = 0;
- cio_commit_config(&console_subchannel);
- isc_unregister(CONSOLE_ISC);
- console_subchannel_in_use = 0;
-}
-
-/* Bah... hack to catch console special sausages. */
-int
-cio_is_console(struct subchannel_id schid)
+int cio_is_console(struct subchannel_id schid)
{
- if (!console_subchannel_in_use)
+ if (!console_sch)
return 0;
- return schid_equal(&schid, &console_subchannel.schid);
+ return schid_equal(&schid, &console_sch->schid);
}
-struct subchannel *
-cio_get_console_subchannel(void)
+void cio_register_early_subchannels(void)
{
- if (!console_subchannel_in_use)
- return NULL;
- return &console_subchannel;
-}
+ int ret;
-const char *cio_get_console_sch_name(struct subchannel_id schid)
-{
- snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
- return (const char *)console_sch_name;
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
}
+#endif /* CONFIG_CCW_CONSOLE */
-#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
@@ -888,10 +729,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
cc = 0;
for (retry=0;retry<3;retry++) {
schib->pmcw.ena = 0;
- cc = msch(schid, schib);
+ cc = msch_err(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
+ if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
return -ENODEV;
if (!schib->pmcw.ena)
return 0;
@@ -910,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.schid, (struct irb *)__LC_IRB);
+ tsch(ti.schid, &__get_cpu_var(cio_irb));
if (schid_equal(&ti.schid, &schid))
return 0;
}
@@ -938,7 +779,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
pgm_check_occured = 0;
s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
+ rc = stsch_err(schid, addr);
s390_base_pgm_handler_fn = NULL;
/* The program check handler could have changed pgm_check_occured. */
@@ -975,7 +816,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
/* No default clear strategy */
break;
}
- stsch(schid, &schib);
+ stsch_err(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
out:
@@ -1028,9 +869,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_clock() > timeout)
+ if (get_tod_clock_fast() > timeout)
break;
cpu_relax();
}
@@ -1095,9 +936,9 @@ extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
void reipl_ccw_dev(struct ccw_dev_id *devid)
{
- struct subchannel_id schid;
+ struct subchannel_id uninitialized_var(schid);
- s390_reset_system();
+ s390_reset_system(NULL, NULL);
if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
do_reipl_asm(*((__u32*)&schid));
@@ -1108,10 +949,10 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
struct subchannel_id schid;
struct schib schib;
- schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
+ schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
- if (stsch(schid, &schib))
+ if (stsch_err(schid, &schib))
return -ENODEV;
if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
return -ENODEV;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 5150fba742a..a01376ae174 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,16 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
+enum sch_todo {
+ SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
+ SCH_TODO_UNREG,
+};
+
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
@@ -79,13 +89,6 @@ struct subchannel {
SUBCHANNEL_TYPE_MSG = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
-
- struct {
- unsigned int suspend:1; /* allow suspend */
- unsigned int prefetch:1;/* deny prefetch */
- unsigned int inter:1; /* suppress intermediate interrupts */
- } __attribute__ ((packed)) options;
-
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
@@ -94,12 +97,12 @@ struct subchannel {
struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
- void *private; /* private per subchannel type data */
- struct work_struct work;
+ enum sch_todo todo;
+ struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
-#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
+DECLARE_PER_CPU(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
@@ -114,34 +117,21 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
-extern int cio_get_options (struct subchannel *);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
-int cio_create_sch_lock(struct subchannel *);
-void do_adapter_IO(u8 isc);
-void do_IRQ(struct pt_regs *);
-
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
-extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
-extern struct subchannel *cio_get_console_subchannel(void);
-extern spinlock_t * cio_get_console_lock(void);
-extern void *cio_get_console_priv(void);
-extern const char *cio_get_console_sch_name(struct subchannel_id schid);
-extern const char *cio_get_console_cdev_name(struct subchannel *sch);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
-#define cio_get_console_subchannel() NULL
-#define cio_get_console_lock() NULL
-#define cio_get_console_priv() NULL
-#define cio_get_console_sch_name(schid) NULL
-#define cio_get_console_cdev_name(sch) NULL
+static inline void cio_register_early_subchannels(void) {}
#endif
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index dc98b2c6386..23054f8fa9f 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/cmf.c
- *
* Linux on zSeries Channel Measurement Facility support
*
- * Copyright 2000,2006 IBM Corporation
+ * Copyright IBM Corp. 2000, 2006
*
* Authors: Arnd Bergmann <arndb@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -35,7 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/timex.h> /* get_clock() */
+#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -98,7 +96,7 @@ enum cmb_format {
* enum cmb_format.
*/
static int format = CMF_AUTODETECT;
-module_param(format, bool, 0444);
+module_param(format, bint, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
@@ -328,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
- cmb_data->last_update = get_clock();
+ cmb_data->last_update = get_tod_clock();
kfree(reference_buf);
return 0;
}
@@ -430,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
- cdev->private->cmb_start_time = get_clock();
+ cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
@@ -462,7 +460,7 @@ static struct cmb_area cmb_area = {
* block of memory, which can not be moved as long as any channel
* is active. Therefore, a maximum number of subchannels needs to
* be defined somewhere. This is a module parameter, defaulting to
- * a resonable value of 1024, or 32 kb of memory.
+ * a reasonable value of 1024, or 32 kb of memory.
* Current kernels don't allow kmalloc with more than 128kb, so the
* maximum is 4096.
*/
@@ -1184,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
@@ -1204,6 +1202,11 @@ static ssize_t cmb_enable_store(struct device *dev,
DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+ return cmbops->set(cdev, enable ? 2 : 0);
+}
+
/**
* enable_cmf() - switch on the channel measurement for a specific device
* @cdev: The ccw device to be enabled
@@ -1336,7 +1339,7 @@ module_init(init_cmf);
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright 2003 IBM Corporation\n");
+ "Copyright IBM Corp. 2003\n");
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e7..0f8a25f98b1 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,22 +1,24 @@
/*
* Channel report handling code
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
+#include <linux/wait.h>
#include <asm/crw.h>
+#include <asm/ctl_reg.h>
-static struct semaphore crw_semaphore;
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/**
* crw_register_handler() - register a channel report word handler
@@ -59,12 +61,14 @@ void crw_unregister_handler(int rsc)
static int crw_collect_info(void *unused)
{
struct crw crw[2];
- int ccode;
+ int ccode, signal;
unsigned int chain;
- int ignore;
repeat:
- ignore = down_interruptible(&crw_semaphore);
+ signal = wait_event_interruptible(crw_handler_wait_q,
+ atomic_read(&crw_nr_req) > 0);
+ if (unlikely(signal))
+ atomic_inc(&crw_nr_req);
chain = 0;
while (1) {
crw_handler_t handler;
@@ -122,25 +126,23 @@ repeat:
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
+ if (atomic_dec_and_test(&crw_nr_req))
+ wake_up(&crw_handler_wait_q);
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
- up(&crw_semaphore);
+ atomic_inc(&crw_nr_req);
+ wake_up(&crw_handler_wait_q);
}
-/*
- * Separate initcall needed for semaphore initialization since
- * crw_handle_channel_report might be called before crw_machine_check_init.
- */
-static int __init crw_init_semaphore(void)
+void crw_wait_for_channel_report(void)
{
- init_MUTEX_LOCKED(&crw_semaphore);
- return 0;
+ crw_handle_channel_report();
+ wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
-pure_initcall(crw_init_semaphore);
/*
* Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 0085d890179..0268e5fd59b 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,10 +1,10 @@
/*
- * drivers/s390/cio/css.c
- * driver for channel subsystem
+ * driver for channel subsystem
*
- * Copyright IBM Corp. 2002,2008
- * Author(s): Arnd Bergmann (arndb@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Copyright IBM Corp. 2002, 2010
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
@@ -17,6 +17,8 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/proc_fs.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -30,10 +32,10 @@
#include "chp.h"
int css_init_done = 0;
-static int need_reprobe = 0;
-static int max_ssid = 0;
+int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -67,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
struct cb_data *cb = data;
int rc = 0;
- idset_sch_del(cb->set, sch->schid);
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
@@ -113,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
@@ -133,45 +143,55 @@ out:
return rc;
}
-static struct subchannel *
-css_alloc_subchannel(struct subchannel_id schid)
+static void css_sch_todo(struct work_struct *work);
+
+static int css_sch_create_locks(struct subchannel *sch)
{
- struct subchannel *sch;
- int ret;
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
- sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
- if (sch == NULL)
- return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, schid);
- if (ret < 0) {
- kfree(sch);
- return ERR_PTR(ret);
- }
- return sch;
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
}
-static void
-css_free_subchannel(struct subchannel *sch)
+static void css_subchannel_release(struct device *dev)
{
- if (sch) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->lock);
+ kfree(sch);
}
-static void
-css_subchannel_release(struct device *dev)
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
+ int ret;
- sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid)) {
- kfree(sch->lock);
- kfree(sch);
- }
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cio_validate_subchannel(sch, schid);
+ if (ret < 0)
+ goto err;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
+ INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
+ return sch;
+
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
@@ -179,7 +199,9 @@ static int css_sch_device_register(struct subchannel *sch)
int ret;
mutex_lock(&sch->reg_mutex);
- ret = device_register(&sch->dev);
+ dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+ sch->schid.sch_no);
+ ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
@@ -230,16 +252,11 @@ void css_update_ssd_info(struct subchannel *sch)
{
int ret;
- if (cio_is_console(sch->schid)) {
- /* Console is initialized too early for functions requiring
- * memory allocation. */
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- } else {
- ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
- if (ret)
- ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
- ssd_register_chpids(&sch->ssd_info);
- }
+
+ ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -272,19 +289,18 @@ static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
-static struct attribute_group *default_subch_attr_groups[] = {
+static const struct attribute_group *default_subch_attr_groups[] = {
&subch_attr_group,
NULL,
};
-static int css_register_subchannel(struct subchannel *sch)
+int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
- sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
@@ -316,17 +332,19 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
-int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid)
{
- int ret;
struct subchannel *sch;
+ int ret;
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
+
ret = css_register_subchannel(sch);
if (ret)
- css_free_subchannel(sch);
+ put_device(&sch->dev);
+
return ret;
}
@@ -373,12 +391,16 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
+ if (stsch_err(schid, &schib)) {
+ /* Subchannel is not provided. */
+ return -ENXIO;
+ }
+ if (!css_sch_is_valid(&schib)) {
/* Unusable - ignore. */
return 0;
}
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
- "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
+ CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
+ schid.sch_no);
return css_probe_device(schid);
}
@@ -395,6 +417,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
+ if (ret != 0 && ret != -EAGAIN) {
+ CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ }
return ret;
}
@@ -413,12 +439,76 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
spin_lock_init(&slow_subchannel_lock);
+ atomic_set(&css_eval_scheduled, 0);
+ init_waitqueue_head(&css_eval_wq);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
@@ -464,23 +554,37 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
case -ENOMEM:
case -EIO:
/* These should abort looping */
+ spin_lock_irq(&slow_subchannel_lock);
+ idset_sch_del_subseq(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
break;
default:
rc = 0;
}
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
}
return rc;
}
static void css_slow_path_func(struct work_struct *unused)
{
+ unsigned long flags;
+
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ if (idset_is_empty(slow_subchannel_set)) {
+ atomic_set(&css_eval_scheduled, 0);
+ wake_up(&css_eval_wq);
+ }
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static DECLARE_WORK(slow_path_work, css_slow_path_func);
-struct workqueue_struct *slow_path_wq;
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
@@ -488,7 +592,8 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
- queue_work(slow_path_wq, &slow_path_work);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -498,80 +603,54 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
- queue_work(slow_path_wq, &slow_path_work);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-void css_wait_for_slow_path(void)
-{
- flush_workqueue(slow_path_wq);
-}
-
-/* Reprobe subchannel if unregistered. */
-static int reprobe_subchannel(struct subchannel_id schid, void *data)
+static int __unset_registered(struct device *dev, void *data)
{
- int ret;
-
- CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
- schid.ssid, schid.sch_no);
- if (need_reprobe)
- return -EAGAIN;
-
- ret = css_probe_device(schid);
- switch (ret) {
- case 0:
- break;
- case -ENXIO:
- case -ENOMEM:
- case -EIO:
- /* These should abort looping */
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
-static void reprobe_after_idle(struct work_struct *unused)
-{
- /* Make sure initial subchannel scan is done. */
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- if (need_reprobe)
- css_schedule_reprobe();
+ idset_sch_del(set, sch->schid);
+ return 0;
}
-static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
-
-/* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(struct work_struct *unused)
+void css_schedule_eval_all_unreg(unsigned long delay)
{
- int ret;
-
- CIO_MSG_EVENT(4, "reprobe start\n");
+ unsigned long flags;
+ struct idset *unreg_set;
- /* Make sure initial subchannel scan is done. */
- if (atomic_read(&ccw_device_init_count) != 0) {
- queue_work(ccw_device_work, &reprobe_idle_work);
+ /* Find unregistered subchannels. */
+ unreg_set = idset_sch_new();
+ if (!unreg_set) {
+ /* Fallback. */
+ css_schedule_eval_all();
return;
}
- need_reprobe = 0;
- ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
-
- CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
- need_reprobe);
+ idset_fill(unreg_set);
+ bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ /* Apply to slow_subchannel_set. */
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_add_set(slow_subchannel_set, unreg_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+ idset_free(unreg_set);
}
-static DECLARE_WORK(css_reprobe_work, reprobe_all);
+void css_wait_for_slow_path(void)
+{
+ flush_workqueue(cio_work_q);
+}
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
- need_reprobe = 1;
- queue_work(slow_path_wq, &css_reprobe_work);
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
}
-
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
@@ -580,6 +659,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
+ struct subchannel *sch;
if (overflow) {
css_schedule_eval_all();
@@ -597,8 +677,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
- mchk_schid.ssid = (crw1->rsid >> 8) & 3;
+ mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+ if (crw0->erc == CRW_ERC_PMOD) {
+ sch = get_subchannel_by_schid(mchk_schid);
+ if (sch) {
+ css_update_ssd_info(sch);
+ put_device(&sch->dev);
+ }
+ }
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
@@ -607,49 +694,11 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
css_evaluate_subchannel(mchk_schid, 0);
}
-static int __init
-__init_channel_subsystem(struct subchannel_id schid, void *data)
-{
- struct subchannel *sch;
- int ret;
-
- if (cio_is_console(schid))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- ret = PTR_ERR(sch);
- else
- ret = 0;
- switch (ret) {
- case 0:
- break;
- case -ENOMEM:
- panic("Out of memory in init_channel_subsystem\n");
- /* -ENXIO: no more subchannels. */
- case -ENXIO:
- return ret;
- /* -EIO: this subchannel set not supported. */
- case -EIO:
- return ret;
- default:
- return 0;
- }
- }
- /*
- * We register ALL valid subchannels in ioinfo, even those
- * that have been present before init_channel_subsystem.
- * These subchannels can't have been registered yet (kmalloc
- * not working) so we do it now. This is true e.g. for the
- * console subchannel.
- */
- css_register_subchannel(sch);
- return 0;
-}
-
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
+ struct cpuid cpu_id;
+
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -660,8 +709,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
- css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -704,7 +754,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
@@ -739,7 +789,8 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
- ret = cio_create_sch_lock(css->pseudo_subchannel);
+ mutex_init(&css->pseudo_subchannel->reg_mutex);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
@@ -749,7 +800,7 @@ static int __init setup_css(int nr)
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_clock() >> 32);
+ tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
@@ -780,22 +831,82 @@ static struct notifier_block css_reboot_notifier = {
};
/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ int ret, i;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ /* search for subchannels, which appeared during hibernation */
+ css_schedule_reprobe();
+ break;
+ default:
+ ret = NOTIFY_DONE;
+ }
+ return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+ .notifier_call = css_power_event,
+};
+
+/*
* Now that the driver core is running, we can setup our channel subsystem.
- * The struct subchannel's are created during probing (except for the
- * static console subchannel).
+ * The struct subchannel's are created during probing.
*/
-static int __init
-init_channel_subsystem (void)
+static int __init css_bus_init(void)
{
int ret, i;
- ret = chsc_determine_css_characteristics();
- if (ret == -ENOMEM)
- goto out; /* No need to continue. */
+ ret = chsc_init();
+ if (ret)
+ return ret;
- ret = chsc_alloc_sei_area();
+ chsc_determine_css_characteristics();
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
if (ret)
- goto out;
+ max_ssid = 0;
+ else /* Success. */
+ max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
@@ -808,17 +919,6 @@ init_channel_subsystem (void)
if ((ret = bus_register(&css_bus_type)))
goto out;
- /* Try to enable MSS. */
- ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
- switch (ret) {
- case 0: /* Success. */
- max_ssid = __MAX_SSID;
- break;
- case -ENOMEM:
- goto out_bus;
- default:
- max_ssid = 0;
- }
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
@@ -846,18 +946,24 @@ init_channel_subsystem (void)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
- if (ret)
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
goto out_file;
+ }
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
goto out_unregister;
+ ret = register_pm_notifier(&css_power_notifier);
+ if (ret) {
+ unregister_reboot_notifier(&css_reboot_notifier);
+ goto out_unregister;
+ }
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
- for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
out_file:
if (css_chsc_characteristics.secm)
@@ -878,17 +984,145 @@ out_unregister:
&dev_attr_cm_enable);
device_unregister(&css->device);
}
-out_bus:
bus_unregister(&css_bus_type);
out:
- crw_unregister_handler(CRW_RSC_CSS);
- chsc_free_sei_area();
- kfree(slow_subchannel_set);
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
}
+static void __init css_bus_cleanup(void)
+{
+ struct channel_subsystem *css;
+ int i;
+
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
+ css->pseudo_subchannel = NULL;
+ if (css_chsc_characteristics.secm)
+ device_remove_file(&css->device, &dev_attr_cm_enable);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
+ isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+ int ret;
+
+ ret = css_bus_init();
+ if (ret)
+ return ret;
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
+ ret = io_subchannel_init();
+ if (ret)
+ goto out_wq;
+
+ return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+ struct css_driver *cssdrv = to_cssdriver(drv);
+
+ if (cssdrv->settle)
+ return cssdrv->settle();
+ return 0;
+}
+
+int css_complete_work(void)
+{
+ int ret;
+
+ /* Wait for the evaluation of subchannels to finish. */
+ ret = wait_event_interruptible(css_eval_wq,
+ atomic_read(&css_eval_scheduled) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+ css_complete_work();
+ return 0;
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
+void channel_subsystem_reinit(void)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+
+ chsc_enable_facility(CHSC_SDA_OC_MSS);
+ chp_id_for_each(&chpid) {
+ chp = chpid_to_chp(chpid);
+ if (chp)
+ chp_update_desc(chp);
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ /* Handle pending CRW's. */
+ crw_wait_for_channel_report();
+ ret = css_complete_work();
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
+ .write = cio_settle_write,
+ .llseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_settle", S_IWUSR, NULL,
+ &cio_settle_proc_fops);
+ if (!entry)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
@@ -953,13 +1187,82 @@ static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-struct bus_type css_bus_type = {
+static int css_pm_prepare(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (mutex_is_locked(&sch->reg_mutex))
+ return -EAGAIN;
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ /* Notify drivers that they may not register children. */
+ return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return;
+ drv = to_cssdriver(sch->dev.driver);
+ if (drv->complete)
+ drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ css_update_ssd_info(sch);
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->restore ? drv->restore(sch) : 0;
+}
+
+static const struct dev_pm_ops css_pm_ops = {
+ .prepare = css_pm_prepare,
+ .complete = css_pm_complete,
+ .freeze = css_pm_freeze,
+ .thaw = css_pm_thaw,
+ .restore = css_pm_restore,
+};
+
+static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
+ .pm = &css_pm_ops,
};
/**
@@ -971,9 +1274,7 @@ struct bus_type css_bus_type = {
*/
int css_driver_register(struct css_driver *cdrv)
{
- cdrv->drv.name = cdrv->name;
cdrv->drv.bus = &css_bus_type;
- cdrv->drv.owner = cdrv->owner;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
@@ -990,7 +1291,4 @@ void css_driver_unregister(struct css_driver *cdrv)
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
-subsys_initcall(init_channel_subsystem);
-
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 57ebf120f82..2c9107e2025 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "cio.h"
+
/*
* path grouping stuff
*/
@@ -61,7 +63,6 @@ struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
- * @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
@@ -70,10 +71,14 @@ struct chp_link;
* @probe: function called on probe
* @remove: function called on remove
* @shutdown: called at device shutdown
- * @name: name of the device driver
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
*/
struct css_driver {
- struct module *owner;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
@@ -82,33 +87,31 @@ struct css_driver {
int (*probe)(struct subchannel *);
int (*remove)(struct subchannel *);
void (*shutdown)(struct subchannel *);
- const char *name;
+ int (*prepare) (struct subchannel *);
+ void (*complete) (struct subchannel *);
+ int (*freeze)(struct subchannel *);
+ int (*thaw) (struct subchannel *);
+ int (*restore)(struct subchannel *);
+ int (*settle)(void);
};
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
-/*
- * all css_drivers have the css_bus_type
- */
-extern struct bus_type css_bus_type;
-
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
-extern int css_probe_device(struct subchannel_id);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
+extern int max_ssid;
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
-#define __MAX_SUBCHANNEL 65535
-#define __MAX_SSID 3
-
struct channel_subsystem {
u8 cssid;
int valid;
@@ -125,17 +128,19 @@ struct channel_subsystem {
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
-extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
+int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
struct schib;
int css_sch_is_valid(struct schib *);
-extern struct workqueue_struct *slow_path_wq;
+extern struct workqueue_struct *cio_work_q;
void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 35441fa16be..dfef5e63cb7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,12 +1,15 @@
/*
- * drivers/s390/cio/device.c
* bus driver for ccw devices
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -16,7 +19,9 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <linux/timer.h>
+#include <linux/kernel_stat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -32,12 +37,17 @@
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
+#include "chsc.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
@@ -122,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-struct bus_type ccw_bus_type;
-
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
@@ -131,6 +139,7 @@ static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
+static void recovery_func(unsigned long data);
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -138,70 +147,62 @@ static struct css_device_id io_subchannel_ids[] = {
};
MODULE_DEVICE_TABLE(css, io_subchannel_ids);
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ /*
+ * Don't allow suspend while a ccw device registration
+ * is still outstanding.
+ */
+ cdev = sch_get_cdev(sch);
+ if (cdev && !device_is_registered(&cdev->dev))
+ return -EAGAIN;
+ return 0;
+}
+
+static int io_subchannel_settle(void)
+{
+ int ret;
+
+ ret = wait_event_interruptible(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ return 0;
+}
+
static struct css_driver io_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
.subchannel_type = io_subchannel_ids,
- .name = "io_subchannel",
.irq = io_subchannel_irq,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
.probe = io_subchannel_probe,
.remove = io_subchannel_remove,
.shutdown = io_subchannel_shutdown,
+ .prepare = io_subchannel_prepare,
+ .settle = io_subchannel_settle,
};
-struct workqueue_struct *ccw_device_work;
-wait_queue_head_t ccw_device_init_wq;
-atomic_t ccw_device_init_count;
-
-static void recovery_func(unsigned long data);
-
-static int __init
-init_ccw_bus_type (void)
+int __init io_subchannel_init(void)
{
int ret;
- init_waitqueue_head(&ccw_device_init_wq);
- atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
-
- ccw_device_work = create_singlethread_workqueue("cio");
- if (!ccw_device_work)
- return -ENOMEM; /* FIXME: better errno ? */
- slow_path_wq = create_singlethread_workqueue("kslowcrw");
- if (!slow_path_wq) {
- ret = -ENOMEM; /* FIXME: better errno ? */
- goto out_err;
- }
- if ((ret = bus_register (&ccw_bus_type)))
- goto out_err;
-
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
ret = css_driver_register(&io_subchannel_driver);
if (ret)
- goto out_err;
+ bus_unregister(&ccw_bus_type);
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- flush_workqueue(ccw_device_work);
- return 0;
-out_err:
- if (ccw_device_work)
- destroy_workqueue(ccw_device_work);
- if (slow_path_wq)
- destroy_workqueue(slow_path_wq);
return ret;
}
-static void __exit
-cleanup_ccw_bus_type (void)
-{
- css_driver_unregister(&io_subchannel_driver);
- bus_unregister(&ccw_bus_type);
- destroy_workqueue(ccw_device_work);
-}
-
-subsys_initcall(init_ccw_bus_type);
-module_exit(cleanup_ccw_bus_type);
/************************ device handling **************************/
@@ -293,51 +294,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
- if (test_and_clear_bit(1, &cdev->private->registered))
+ if (device_is_registered(&cdev->dev)) {
+ /* Undo device_add(). */
device_del(&cdev->dev);
+ }
+ if (cdev->private->flags.initialized) {
+ cdev->private->flags.initialized = 0;
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ }
}
-static void ccw_device_remove_orphan_cb(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- ccw_device_unregister(cdev);
- put_device(&cdev->dev);
- /* Release cdev reference for workqueue processing. */
- put_device(&cdev->dev);
-}
-
-static void
-ccw_device_remove_disconnected(struct ccw_device *cdev)
-{
- unsigned long flags;
-
- /*
- * Forced offline in disconnected state means
- * 'throw away device'.
- */
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
- if (ccw_device_is_orphan(cdev)) {
- /*
- * Deregister ccw device.
- * Unfortunately, we cannot do this directly from the
- * attribute method.
- */
- spin_lock_irqsave(cdev->ccwlock, flags);
- cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_remove_orphan_cb);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
- /* Deregister subchannel, which will kill the ccw device. */
- ccw_device_schedule_sch_unregister(cdev);
-}
+static void io_subchannel_quiesce(struct subchannel *);
/**
* ccw_device_set_offline() - disable a ccw device for I/O
@@ -352,7 +320,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
*/
int ccw_device_set_offline(struct ccw_device *cdev)
{
- int ret;
+ struct subchannel *sch;
+ int ret, state;
if (!cdev)
return -ENODEV;
@@ -364,32 +333,54 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret != 0)
return ret;
}
- cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
- if (ret == -ENODEV) {
- if (cdev->private->state != DEV_STATE_NOT_OPER) {
- cdev->private->state = DEV_STATE_OFFLINE;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- }
+ sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- return ret;
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
}
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0) {
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- } else {
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
+ do {
+ ret = ccw_device_offline(cdev);
+ if (!ret)
+ break;
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+ "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
- cdev->online = 1;
+ if (ret != -EBUSY)
+ goto error;
+ state = cdev->private->state;
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->state = state;
+ } while (ret == -EBUSY);
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Inform the user if set offline failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: The device entered boxed state while "
+ "being set offline\n", dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: The device stopped operating while "
+ "being set offline\n", dev_name(&cdev->dev));
}
- return ret;
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return 0;
+
+error:
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return -ENODEV;
}
/**
@@ -407,6 +398,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
+ int ret2;
if (!cdev)
return -ENODEV;
@@ -430,53 +422,88 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- if (cdev->private->state != DEV_STATE_ONLINE) {
+ spin_lock_irq(cdev->ccwlock);
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+ spin_unlock_irq(cdev->ccwlock);
+ /* Inform the user that set online failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is boxed\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is not operational\n",
+ dev_name(&cdev->dev));
+ }
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
}
- if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
- cdev->online = 1;
- return 0;
- }
+ spin_unlock_irq(cdev->ccwlock);
+ if (cdev->drv->set_online)
+ ret = cdev->drv->set_online(cdev);
+ if (ret)
+ goto rollback;
+
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
+ cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+
+rollback:
+ spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ret2 = ccw_device_offline(cdev);
+ if (ret2)
+ goto error;
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+
+error:
+ CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret2, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
- return (ret == 0) ? -ENODEV : ret;
+ return ret;
}
static int online_store_handle_offline(struct ccw_device *cdev)
{
- if (cdev->private->state == DEV_STATE_DISCONNECTED)
- ccw_device_remove_disconnected(cdev);
- else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
- return 0;
+ return -EINVAL;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
{
- int ret;
-
/* Do device recognition, if needed. */
if (cdev->private->state == DEV_STATE_BOXED) {
- ret = ccw_device_recognition(cdev);
- if (ret) {
- CIO_MSG_EVENT(0, "Couldn't start recognition "
- "for device 0.%x.%04x (ret=%d)\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- return ret;
- }
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q,
cdev->private->flags.recog_done);
if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -484,8 +511,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
- return 0;
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
}
static int online_store_handle_online(struct ccw_device *cdev, int force)
@@ -515,16 +542,19 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if ((cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_ONLINE &&
- cdev->private->state != DEV_STATE_BOXED &&
- cdev->private->state != DEV_STATE_DISCONNECTED) ||
- atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ /* Prevent conflict between multiple on-/offline processing requests. */
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
-
- if (cdev->drv && !try_module_get(cdev->drv->owner)) {
- atomic_set(&cdev->private->onoff, 0);
- return -EINVAL;
+ /* Prevent conflict between internal I/Os and on-/offline processing. */
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /* Prevent conflict between pending work and on-/offline processing.*/
+ if (work_pending(&cdev->private->todo_work)) {
+ ret = -EAGAIN;
+ goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
@@ -532,10 +562,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
ret = 0;
} else {
force = 0;
- ret = strict_strtoul(buf, 16, &i);
+ ret = kstrtoul(buf, 16, &i);
}
if (ret)
goto out;
+
+ device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
@@ -546,9 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
+ device_unlock(dev);
+
out:
- if (cdev->drv)
- module_put(cdev->drv->owner);
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -578,6 +610,33 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
}
}
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warning("Logging for subchannel 0.%x.%04x failed with "
+ "errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -585,10 +644,14 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
+ &dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
NULL,
};
@@ -610,94 +673,52 @@ static struct attribute_group ccwdev_attr_group = {
.attrs = ccwdev_attrs,
};
-static struct attribute_group *ccwdev_attr_groups[] = {
+static const struct attribute_group *ccwdev_attr_groups[] = {
&ccwdev_attr_group,
NULL,
};
-/* this is a simple abstraction for device_register that sets the
- * correct bus type and adds the bus specific files */
-static int ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_add(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
- int ret;
dev->bus = &ccw_bus_type;
-
- if ((ret = device_add(dev)))
- return ret;
-
- set_bit(1, &cdev->private->registered);
- return ret;
+ return device_add(dev);
}
-struct match_data {
- struct ccw_dev_id dev_id;
- struct ccw_device * sibling;
-};
-
-static int
-match_devno(struct device * dev, void * data)
+static int match_dev_id(struct device *dev, void *data)
{
- struct match_data * d = data;
- struct ccw_device * cdev;
-
- cdev = to_ccwdev(dev);
- if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
- !ccw_device_is_orphan(cdev) &&
- ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
- (cdev != d->sibling))
- return 1;
- return 0;
-}
-
-static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
- struct ccw_device *sibling)
-{
- struct device *dev;
- struct match_data data;
-
- data.dev_id = *dev_id;
- data.sibling = sibling;
- dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
-
- return dev ? to_ccwdev(dev) : NULL;
-}
-
-static int match_orphan(struct device *dev, void *data)
-{
- struct ccw_dev_id *dev_id;
- struct ccw_device *cdev;
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *dev_id = data;
- dev_id = data;
- cdev = to_ccwdev(dev);
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
-static struct ccw_device *
-get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
- struct ccw_dev_id *dev_id)
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ * If a device is found its reference count is increased and returned;
+ * else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{
struct device *dev;
- dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
- match_orphan);
+ dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
return dev ? to_ccwdev(dev) : NULL;
}
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
-void ccw_device_do_unbind_bind(struct work_struct *work)
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct subchannel *sch;
int ret;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
-
- if (test_bit(1, &cdev->private->registered)) {
+ if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
@@ -731,23 +752,51 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
return ERR_PTR(-ENOMEM);
}
+static void ccw_device_todo(struct work_struct *work);
+
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
- cdev->private->cdev = cdev;
- atomic_set(&cdev->private->onoff, 0);
+ struct ccw_device_private *priv = cdev->private;
+ int ret;
+
+ priv->cdev = cdev;
+ priv->int_class = IRQIO_CIO;
+ priv->state = DEV_STATE_NOT_OPER;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+ priv->schid = sch->schid;
+
+ INIT_WORK(&priv->todo_work, ccw_device_todo);
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
+
+ atomic_set(&priv->onoff, 0);
+ cdev->ccwlock = sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
- INIT_WORK(&cdev->private->kick_work, NULL);
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ goto out_put;
if (!get_device(&sch->dev)) {
- /* Release reference from device_initialize(). */
- put_device(&cdev->dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put;
}
+ priv->flags.initialized = 1;
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
return 0;
+
+out_put:
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ return ret;
}
static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -758,84 +807,13 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
cdev = io_subchannel_allocate_dev(sch);
if (!IS_ERR(cdev)) {
ret = io_subchannel_initialize_dev(sch, cdev);
- if (ret) {
- kfree(cdev);
+ if (ret)
cdev = ERR_PTR(ret);
- }
}
return cdev;
}
-static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
-
-static void sch_attach_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- css_update_ssd_info(sch);
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- cdev->private->schid = sch->schid;
- cdev->ccwlock = sch->lock;
- ccw_device_trigger_reprobe(cdev);
- spin_unlock_irq(sch->lock);
-}
-
-static void sch_attach_disconnected_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- struct subchannel *other_sch;
- int ret;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- other_sch = to_subchannel(cdev->dev.parent);
- /* Note: device_move() changes cdev->dev.parent */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_set_cdev(other_sch, NULL);
- /* No need to keep a subchannel without ccw device around. */
- css_sch_device_unregister(other_sch);
- sch_attach_device(sch, cdev);
- /* Put reference for old parent. */
- put_device(&other_sch->dev);
-}
-
-static void sch_attach_orphaned_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- int ret;
- struct subchannel *pseudo_sch;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- pseudo_sch = to_subchannel(cdev->dev.parent);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
- "failed (ret=%d)!\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_attach_device(sch, cdev);
- /* Put reference on pseudo subchannel. */
- put_device(&pseudo_sch->dev);
-}
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
static void sch_create_and_recog_new_device(struct subchannel *sch)
{
@@ -848,100 +826,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
css_sch_device_unregister(sch);
return;
}
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- spin_unlock_irq(sch->lock);
/* Start recognition for the new ccw device. */
- if (io_subchannel_recog(cdev, sch)) {
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, NULL);
- spin_unlock_irq(sch->lock);
- css_sch_device_unregister(sch);
- /* Put reference from io_subchannel_create_ccwdev(). */
- put_device(&sch->dev);
- /* Give up initial reference. */
- put_device(&cdev->dev);
- }
-}
-
-
-void ccw_device_move_to_orphanage(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct ccw_device *replacing_cdev;
- struct subchannel *sch;
- int ret;
- struct channel_subsystem *css;
- struct ccw_dev_id dev_id;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
- css = to_css(sch->dev.parent);
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
-
- /* Increase refcount for pseudo subchannel. */
- get_device(&css->pseudo_subchannel->dev);
- /*
- * Move the orphaned ccw device to the orphanage so the replacing
- * ccw device can take its place on the subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
- DPM_ORDER_NONE);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Decrease refcount for pseudo subchannel again. */
- put_device(&css->pseudo_subchannel->dev);
- return;
- }
- cdev->ccwlock = css->pseudo_subchannel->lock;
- /*
- * Search for the replacing ccw device
- * - among the disconnected devices
- * - in the orphanage
- */
- replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
- if (replacing_cdev) {
- sch_attach_disconnected_device(sch, replacing_cdev);
- /* Release reference from get_disc_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
- if (replacing_cdev) {
- sch_attach_orphaned_device(sch, replacing_cdev);
- /* Release reference from get_orphaned_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- sch_create_and_recog_new_device(sch);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
+ io_subchannel_recog(cdev, sch);
}
/*
* Register recognized device.
*/
-static void
-io_subchannel_register(struct work_struct *work)
+static void io_subchannel_register(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
- int ret;
+ int ret, adjust_init_count = 1;
unsigned long flags;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/*
* Check if subchannel is still registered. It may have become
@@ -968,6 +865,7 @@ io_subchannel_register(struct work_struct *work)
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
}
+ adjust_init_count = 0;
goto out;
}
/*
@@ -977,7 +875,7 @@ io_subchannel_register(struct work_struct *work)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
/* make it known to the system */
- ret = ccw_device_register(cdev);
+ ret = ccw_device_add(cdev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
@@ -993,41 +891,23 @@ out:
cdev->private->flags.recog_done = 1;
wake_up(&cdev->private->wait_q);
out_err:
- /* Release reference for workqueue processing. */
- put_device(&cdev->dev);
- if (atomic_dec_and_test(&ccw_device_init_count))
+ if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct work_struct *work)
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
/* Get subchannel reference for local processing. */
if (!get_device(cdev->dev.parent))
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- /* Release cdev reference for workqueue processing.*/
- put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
put_device(&sch->dev);
}
-void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
-{
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1044,9 +924,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
/* Remove device found not operational. */
- if (!get_device(&cdev->dev))
- break;
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
@@ -1055,102 +933,92 @@ io_subchannel_recog_done(struct ccw_device *cdev)
* We can't register the device in interrupt context so
* we schedule a work item.
*/
- if (!get_device(&cdev->dev))
- break;
- PREPARE_WORK(&cdev->private->kick_work,
- io_subchannel_register);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
break;
}
}
-static int
-io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
- int rc;
- struct ccw_device_private *priv;
-
- sch_set_cdev(sch, cdev);
- cdev->ccwlock = sch->lock;
-
- /* Init private data. */
- priv = cdev->private;
- priv->dev_id.devno = sch->schib.pmcw.dev;
- priv->dev_id.ssid = sch->schid.ssid;
- priv->schid = sch->schid;
- priv->state = DEV_STATE_NOT_OPER;
- INIT_LIST_HEAD(&priv->cmb_list);
- init_waitqueue_head(&priv->wait_q);
- init_timer(&priv->timer);
-
- /* Set an initial name for the device. */
- if (cio_is_console(sch->schid))
- cdev->dev.init_name = cio_get_console_cdev_name(sch);
- else
- dev_set_name(&cdev->dev, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
-
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
- rc = ccw_device_recognition(cdev);
+ ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
- if (rc) {
- if (atomic_dec_and_test(&ccw_device_init_count))
- wake_up(&ccw_device_init_wq);
- }
- return rc;
}
-static void ccw_device_move_to_sch(struct work_struct *work)
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+ struct subchannel *sch)
{
- struct ccw_device_private *priv;
- int rc;
- struct subchannel *sch;
- struct ccw_device *cdev;
- struct subchannel *former_parent;
+ struct subchannel *old_sch;
+ int rc, old_enabled = 0;
- priv = container_of(work, struct ccw_device_private, kick_work);
- sch = priv->sch;
- cdev = priv->cdev;
- former_parent = to_subchannel(cdev->dev.parent);
- /* Get reference for new parent. */
+ old_sch = to_subchannel(cdev->dev.parent);
+ /* Obtain child reference for new parent. */
if (!get_device(&sch->dev))
- return;
+ return -ENODEV;
+
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ old_enabled = old_sch->schib.pmcw.ena;
+ rc = 0;
+ if (old_enabled)
+ rc = cio_disable_subchannel(old_sch);
+ spin_unlock_irq(old_sch->lock);
+ if (rc == -EBUSY) {
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ }
+
mutex_lock(&sch->reg_mutex);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
- "0.%x.%04x failed (ret=%d)!\n",
+ CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, rc);
- css_sch_device_unregister(sch);
- /* Put reference for new parent again. */
+ sch->schib.pmcw.dev, rc);
+ if (old_enabled) {
+ /* Try to reenable the old subchannel. */
+ spin_lock_irq(old_sch->lock);
+ cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+ spin_unlock_irq(old_sch->lock);
+ }
+ /* Release child reference for new parent. */
put_device(&sch->dev);
- goto out;
+ return rc;
}
- if (!sch_is_pseudo_sch(former_parent)) {
- spin_lock_irq(former_parent->lock);
- sch_set_cdev(former_parent, NULL);
- spin_unlock_irq(former_parent->lock);
- css_sch_device_unregister(former_parent);
- /* Reset intparm to zeroes. */
- former_parent->config.intparm = 0;
- cio_commit_config(former_parent);
+ /* Clean up old subchannel. */
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ sch_set_cdev(old_sch, NULL);
+ spin_unlock_irq(old_sch->lock);
+ css_schedule_eval(old_sch->schid);
}
- sch_attach_device(sch, cdev);
-out:
- /* Put reference for old parent. */
- put_device(&former_parent->dev);
- put_device(&cdev->dev);
+ /* Release child reference for old parent. */
+ put_device(&old_sch->dev);
+ /* Initialize new subchannel. */
+ spin_lock_irq(sch->lock);
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ if (!sch_is_pseudo_sch(sch))
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ if (!sch_is_pseudo_sch(sch))
+ css_update_ssd_info(sch);
+ return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_subsystem *css = to_css(sch->dev.parent);
+
+ return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
}
static void io_subchannel_irq(struct subchannel *sch)
@@ -1159,19 +1027,18 @@ static void io_subchannel_irq(struct subchannel *sch)
cdev = sch_get_cdev(sch);
- CIO_TRACE_EVENT(3, "IRQ");
- CIO_TRACE_EVENT(3, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(6, "IRQ");
+ CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+ else
+ inc_irq_stat(IRQIO_CIO);
}
void io_subchannel_init_config(struct subchannel *sch)
{
memset(&sch->config, 0, sizeof(sch->config));
sch->config.csense = 1;
- /* Use subchannel mp mode when there is more than 1 installed CHPID. */
- if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
- sch->config.mp = 1;
}
static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1192,39 +1059,17 @@ static void io_subchannel_init_fields(struct subchannel *sch)
io_subchannel_init_config(sch);
}
-static void io_subchannel_do_unreg(struct work_struct *work)
-{
- struct subchannel *sch;
-
- sch = container_of(work, struct subchannel, work);
- css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- put_device(&sch->dev);
-}
-
-/* Schedule unregister if we have no cdev. */
-static void io_subchannel_schedule_removal(struct subchannel *sch)
-{
- get_device(&sch->dev);
- INIT_WORK(&sch->work, io_subchannel_do_unreg);
- queue_work(slow_path_wq, &sch->work);
-}
-
/*
* Note: We always return 0 so that we bind to the device even on error.
* This is needed so that our remove function is called on unregister.
*/
static int io_subchannel_probe(struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
int rc;
- unsigned long flags;
- struct ccw_dev_id dev_id;
- cdev = sch_get_cdev(sch);
- if (cdev) {
+ if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
@@ -1233,25 +1078,21 @@ static int io_subchannel_probe(struct subchannel *sch)
"0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc);
/*
- * This subchannel already has an associated ccw_device.
+ * The console subchannel already has an associated ccw_device.
* Throw the delayed uevent for the subchannel, register
- * the ccw_device and exit. This happens for all early
- * devices, e.g. the console.
+ * the ccw_device and exit.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- cdev->dev.groups = ccwdev_attr_groups;
- device_initialize(&cdev->dev);
- ccw_device_register(cdev);
- /*
- * Check if the device is already online. If it is
- * the reference count needs to be corrected since we
- * didn't obtain a reference in ccw_device_set_online.
- */
- if (cdev->private->state != DEV_STATE_NOT_OPER &&
- cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_BOXED)
- get_device(&cdev->dev);
+ cdev = sch_get_cdev(sch);
+ rc = ccw_device_add(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
@@ -1263,81 +1104,44 @@ static int io_subchannel_probe(struct subchannel *sch)
if (rc)
goto out_schedule;
/* Allocate I/O subchannel private data. */
- sch->private = kzalloc(sizeof(struct io_subchannel_private),
- GFP_KERNEL | GFP_DMA);
- if (!sch->private)
- goto out_err;
- /*
- * First check if a fitting device may be found amongst the
- * disconnected devices or in the orphanage.
- */
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
- cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
- if (!cdev)
- cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
- &dev_id);
- if (cdev) {
- /*
- * Schedule moving the device until when we have a registered
- * subchannel to move to and succeed the probe. We can
- * unregister later again, when the probe is through.
- */
- cdev->private->sch = sch;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_sch);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- return 0;
- }
- cdev = io_subchannel_create_ccwdev(sch);
- if (IS_ERR(cdev))
- goto out_err;
- rc = io_subchannel_recog(cdev, sch);
- if (rc) {
- spin_lock_irqsave(sch->lock, flags);
- io_subchannel_recog_done(cdev);
- spin_unlock_irqrestore(sch->lock, flags);
- }
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
+ goto out_schedule;
+
+ set_io_private(sch, io_priv);
+ css_schedule_eval(sch->schid);
return 0;
-out_err:
- kfree(sch->private);
- sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+
out_schedule:
- io_subchannel_schedule_removal(sch);
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ spin_unlock_irq(sch->lock);
return 0;
}
static int
io_subchannel_remove (struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
- unsigned long flags;
cdev = sch_get_cdev(sch);
if (!cdev)
- return 0;
+ goto out_free;
+ io_subchannel_quiesce(sch);
/* Set ccw device to not operational and drop reference. */
- spin_lock_irqsave(cdev->ccwlock, flags);
+ spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
- put_device(&cdev->dev);
- kfree(sch->private);
+out_free:
+ kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
-static int io_subchannel_notify(struct subchannel *sch, int event)
-{
- struct ccw_device *cdev;
-
- cdev = sch_get_cdev(sch);
- if (!cdev)
- return 0;
- return ccw_device_notify(cdev, event);
-}
-
static void io_subchannel_verify(struct subchannel *sch)
{
struct ccw_device *cdev;
@@ -1347,36 +1151,6 @@ static void io_subchannel_verify(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
-static int check_for_io_on_path(struct subchannel *sch, int mask)
-{
- if (cio_update_schib(sch))
- return 0;
- if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
- return 1;
- return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- if (cio_clear(sch)) {
- /* Recheck device in case clear failed. */
- sch->lpm = 0;
- if (cdev->online)
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- else
- css_schedule_eval(sch->schid);
- return;
- }
- cdev->private->state = DEV_STATE_CLEAR_VERIFY;
- /* Request retry of internal operation. */
- cdev->private->flags.intretry = 1;
- /* Call handler. */
- if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
-}
-
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{
struct ccw_device *cdev;
@@ -1384,23 +1158,30 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
cdev = sch_get_cdev(sch);
if (!cdev)
return;
- if (check_for_io_on_path(sch, mask)) {
- if (cdev->private->state == DEV_STATE_ONLINE)
- ccw_device_kill_io(cdev);
- else {
- terminate_internal_io(sch, cdev);
- /* Re-start path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- }
- } else
- /* trigger path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ if (cio_update_schib(sch))
+ goto err;
+ /* Check for I/O on path. */
+ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+ goto out;
+ if (cdev->private->state == DEV_STATE_ONLINE) {
+ ccw_device_kill_io(cdev);
+ goto out;
+ }
+ if (cio_clear(sch))
+ goto err;
+out:
+ /* Trigger path verification. */
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return;
+err:
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
+ struct ccw_device *cdev = sch_get_cdev(sch);
int mask;
mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1410,68 +1191,72 @@ static int io_subchannel_chp_event(struct subchannel *sch,
case CHP_VARY_OFF:
sch->opm &= ~mask;
sch->lpm &= ~mask;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_VARY_ON:
sch->opm |= mask;
sch->lpm |= mask;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
if (cio_update_schib(sch))
return -ENODEV;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
if (cio_update_schib(sch))
return -ENODEV;
sch->lpm |= mask & sch->opm;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
}
return 0;
}
-static void
-io_subchannel_shutdown(struct subchannel *sch)
+static void io_subchannel_quiesce(struct subchannel *sch)
{
struct ccw_device *cdev;
int ret;
+ spin_lock_irq(sch->lock);
cdev = sch_get_cdev(sch);
-
if (cio_is_console(sch->schid))
- return;
+ goto out_unlock;
if (!sch->schib.pmcw.ena)
- /* Nothing to do. */
- return;
+ goto out_unlock;
ret = cio_disable_subchannel(sch);
if (ret != -EBUSY)
- /* Subchannel is disabled, we're done. */
- return;
- cdev->private->state = DEV_STATE_QUIESCE;
+ goto out_unlock;
if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
- ret = ccw_device_cancel_halt_clear(cdev);
- if (ret == -EBUSY) {
- ccw_device_set_timeout(cdev, HZ/10);
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+ while (ret == -EBUSY) {
+ cdev->private->state = DEV_STATE_QUIESCE;
+ cdev->private->iretry = 255;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->state != DEV_STATE_QUIESCE);
+ spin_lock_irq(sch->lock);
+ }
+ ret = cio_disable_subchannel(sch);
}
- cio_disable_subchannel(sch);
+out_unlock:
+ spin_unlock_irq(sch->lock);
}
-static int io_subchannel_get_status(struct subchannel *sch)
+static void io_subchannel_shutdown(struct subchannel *sch)
{
- struct schib schib;
-
- if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
- return CIO_GONE;
- if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
- return CIO_REVALIDATE;
- if (!sch->lpm)
- return CIO_NO_PATH;
- return CIO_OPER;
+ io_subchannel_quiesce(sch);
}
static int device_is_disconnected(struct ccw_device *cdev)
@@ -1550,22 +1335,18 @@ static void ccw_device_schedule_recovery(void)
static int purge_fn(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_device_private *priv = cdev->private;
- int unreg;
+ struct ccw_dev_id *id = &cdev->private->dev_id;
spin_lock_irq(cdev->ccwlock);
- unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
- (priv->state == DEV_STATE_OFFLINE);
+ if (is_blacklisted(id->ssid, id->devno) &&
+ (cdev->private->state == DEV_STATE_OFFLINE) &&
+ (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+ id->devno);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
+ }
spin_unlock_irq(cdev->ccwlock);
- if (!unreg)
- goto out;
- if (!get_device(&cdev->dev))
- goto out;
- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
- priv->dev_id.devno);
- ccw_device_schedule_sch_unregister(cdev);
-
-out:
/* Abort loop in case of pending signal. */
if (signal_pending(current))
return -EINTR;
@@ -1585,7 +1366,7 @@ int ccw_purge_blacklisted(void)
return 0;
}
-static void device_set_disconnected(struct ccw_device *cdev)
+void ccw_device_set_disconnected(struct ccw_device *cdev)
{
if (!cdev)
return;
@@ -1607,181 +1388,308 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_NOT_OPER;
}
-static int io_subchannel_sch_event(struct subchannel *sch, int slow)
+enum io_sch_action {
+ IO_SCH_UNREG,
+ IO_SCH_ORPH_UNREG,
+ IO_SCH_ATTACH,
+ IO_SCH_UNREG_ATTACH,
+ IO_SCH_ORPH_ATTACH,
+ IO_SCH_REPROBE,
+ IO_SCH_VERIFY,
+ IO_SCH_DISC,
+ IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
{
- int event, ret, disc;
- unsigned long flags;
- enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
struct ccw_device *cdev;
- spin_lock_irqsave(sch->lock, flags);
cdev = sch_get_cdev(sch);
- disc = device_is_disconnected(cdev);
- if (disc && slow) {
- /* Disconnected devices are evaluated directly only.*/
- spin_unlock_irqrestore(sch->lock, flags);
- return 0;
+ if (cio_update_schib(sch)) {
+ /* Not operational. */
+ if (!cdev)
+ return IO_SCH_UNREG;
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG;
+ return IO_SCH_ORPH_UNREG;
}
- /* No interrupt after machine check - kill pending timers. */
- if (cdev)
- ccw_device_set_timeout(cdev, 0);
- if (!disc && !slow) {
- /* Non-disconnected devices are evaluated on the slow path. */
- spin_unlock_irqrestore(sch->lock, flags);
- return -EAGAIN;
+ /* Operational. */
+ if (!cdev)
+ return IO_SCH_ATTACH;
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_ORPH_ATTACH;
}
- event = io_subchannel_get_status(sch);
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
- sch->schid.ssid, sch->schid.sch_no, event,
- disc ? "disconnected" : "normal",
- slow ? "slow" : "fast");
- /* Analyze subchannel status. */
- action = NONE;
- switch (event) {
- case CIO_NO_PATH:
- if (disc) {
- /* Check if paths have become available. */
- action = REPROBE;
- break;
- }
- /* fall through */
- case CIO_GONE:
- /* Ask driver what to do with device. */
- if (io_subchannel_notify(sch, event))
- action = DISC;
- else
- action = UNREGISTER;
+ if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+ return IO_SCH_UNREG;
+ return IO_SCH_DISC;
+ }
+ if (device_is_disconnected(cdev))
+ return IO_SCH_REPROBE;
+ if (cdev->online && !cdev->private->flags.resuming)
+ return IO_SCH_VERIFY;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ unsigned long flags;
+ struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
+ enum io_sch_action action;
+ int rc = -EAGAIN;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+ cdev = sch_get_cdev(sch);
+ if (cdev && work_pending(&cdev->private->todo_work))
+ goto out_unlock;
+ action = sch_get_action(sch);
+ CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, process,
+ action);
+ /* Perform immediate actions while holding the lock. */
+ switch (action) {
+ case IO_SCH_REPROBE:
+ /* Trigger device recognition. */
+ ccw_device_trigger_reprobe(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_VERIFY:
+ /* Trigger path verification. */
+ io_subchannel_verify(sch);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_DISC:
+ ccw_device_set_disconnected(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ ccw_device_set_disconnected(cdev);
break;
- case CIO_REVALIDATE:
- /* Device will be removed, so no notify necessary. */
- if (disc)
- /* Reprobe because immediate unregister might block. */
- action = REPROBE;
- else
- action = UNREGISTER_PROBE;
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_UNREG:
+ if (!cdev)
+ break;
+ if (cdev->private->state == DEV_STATE_SENSE_ID) {
+ /*
+ * Note: delayed work triggered by this event
+ * and repeated calls to sch_event are synchronized
+ * by the above check for work_pending(cdev).
+ */
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else
+ ccw_device_set_notoper(cdev);
break;
- case CIO_OPER:
- if (disc)
- /* Get device operational again. */
- action = REPROBE;
+ case IO_SCH_NOP:
+ rc = 0;
+ goto out_unlock;
+ default:
break;
}
- /* Perform action. */
- ret = 0;
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* All other actions require process context. */
+ if (!process)
+ goto out;
+ /* Handle attached ccw device. */
switch (action) {
- case UNREGISTER:
- case UNREGISTER_PROBE:
- ccw_device_set_notoper(cdev);
- /* Unregister device (will use subchannel lock). */
- spin_unlock_irqrestore(sch->lock, flags);
- css_sch_device_unregister(sch);
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ /* Move ccw device to orphanage. */
+ rc = ccw_device_move_to_orph(cdev);
+ if (rc)
+ goto out;
+ break;
+ case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
-
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
+ if (cdev->private->flags.resuming) {
+ /* Device will be handled later. */
+ rc = 0;
+ goto out_unlock;
+ }
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Unregister ccw device. */
+ ccw_device_unregister(cdev);
break;
- case REPROBE:
- ccw_device_trigger_reprobe(cdev);
+ default:
break;
- case DISC:
- device_set_disconnected(cdev);
+ }
+ /* Handle subchannel. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_UNREG:
+ if (!cdev || !cdev->private->flags.resuming)
+ css_sch_device_unregister(sch);
+ break;
+ case IO_SCH_ORPH_ATTACH:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_ATTACH:
+ dev_id.ssid = sch->schid.ssid;
+ dev_id.devno = sch->schib.pmcw.dev;
+ cdev = get_ccwdev_by_dev_id(&dev_id);
+ if (!cdev) {
+ sch_create_and_recog_new_device(sch);
+ break;
+ }
+ rc = ccw_device_move_to_sch(cdev, sch);
+ if (rc) {
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ goto out;
+ }
+ spin_lock_irqsave(sch->lock, flags);
+ ccw_device_trigger_reprobe(cdev);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
break;
default:
break;
}
- spin_unlock_irqrestore(sch->lock, flags);
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
+ return 0;
- return ret;
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+out:
+ return rc;
}
-#ifdef CONFIG_CCW_CONSOLE
-static struct ccw_device console_cdev;
-static char console_cdev_name[10] = "0.x.xxxx";
-static struct ccw_device_private console_private;
-static int console_cdev_in_use;
-
-static DEFINE_SPINLOCK(ccw_console_lock);
-
-spinlock_t * cio_get_console_lock(void)
+static void ccw_device_set_int_class(struct ccw_device *cdev)
{
- return &ccw_console_lock;
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
}
-static int ccw_device_console_enable(struct ccw_device *cdev,
- struct subchannel *sch)
+#ifdef CONFIG_CCW_CONSOLE
+int __init ccw_device_enable_console(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
int rc;
- /* Attach subchannel private data. */
- sch->private = cio_get_console_priv();
- memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
- /* Initialize the ccw_device structure. */
- cdev->dev.parent= &sch->dev;
- rc = io_subchannel_recog(cdev, sch);
- if (rc)
- return rc;
-
+ io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- rc = -EIO;
- if (cdev->private->state != DEV_STATE_OFFLINE)
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
goto out_unlock;
- ccw_device_online(cdev);
+
while (!dev_fsm_final_state(cdev))
- wait_cons_dev();
- if (cdev->private->state != DEV_STATE_ONLINE)
- goto out_unlock;
- rc = 0;
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
- return 0;
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return rc;
}
-struct ccw_device *
-ccw_device_probe_console(void)
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
- int ret;
- if (xchg(&console_cdev_in_use, 1) != 0)
- return ERR_PTR(-EBUSY);
sch = cio_probe_console();
- if (IS_ERR(sch)) {
- console_cdev_in_use = 0;
- return (void *) sch;
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv) {
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
- memset(&console_cdev, 0, sizeof(struct ccw_device));
- memset(&console_private, 0, sizeof(struct ccw_device_private));
- console_cdev.private = &console_private;
- console_private.cdev = &console_cdev;
- ret = ccw_device_console_enable(&console_cdev, sch);
- if (ret) {
- cio_release_console();
- console_cdev_in_use = 0;
- return ERR_PTR(ret);
+ set_io_private(sch, io_priv);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
}
- console_cdev.online = 1;
- return &console_cdev;
+ cdev->drv = drv;
+ ccw_device_set_int_class(cdev);
+ return cdev;
}
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
-const char *cio_get_console_cdev_name(struct subchannel *sch)
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
+ }
+}
+
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(struct ccw_device *cdev)
{
- snprintf(console_cdev_name, 10, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
- return (const char *)console_cdev_name;
+ return ccw_device_pm_restore(&cdev->dev);
}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif
/*
@@ -1813,15 +1721,9 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
const char *bus_id)
{
struct device *dev;
- struct device_driver *drv;
- drv = get_driver(&cdrv->driver);
- if (!drv)
- return NULL;
-
- dev = driver_find_device(drv, NULL, (void *)bus_id,
+ dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
__ccwdev_check_busid);
- put_driver(drv);
return dev ? to_ccwdev(dev) : NULL;
}
@@ -1844,19 +1746,18 @@ ccw_device_probe (struct device *dev)
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
-
+ ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
-
if (ret) {
cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
return ret;
}
return 0;
}
-static int
-ccw_device_remove (struct device *dev)
+static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
@@ -1864,9 +1765,10 @@ ccw_device_remove (struct device *dev)
if (cdrv->remove)
cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
- spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
@@ -1879,9 +1781,12 @@ ccw_device_remove (struct device *dev)
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
}
@@ -1895,13 +1800,243 @@ static void ccw_device_shutdown(struct device *dev)
disable_cmf(cdev);
}
-struct bus_type ccw_bus_type = {
+static int ccw_device_pm_prepare(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (work_pending(&cdev->private->todo_work))
+ return -EAGAIN;
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&cdev->private->onoff))
+ return -EAGAIN;
+
+ if (cdev->online && cdev->drv && cdev->drv->prepare)
+ return cdev->drv->prepare(cdev);
+
+ return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (cdev->online && cdev->drv && cdev->drv->complete)
+ cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ /* Fail suspend while device is in transistional state. */
+ if (!dev_fsm_final_state(cdev))
+ return -EAGAIN;
+ if (!cdev->online)
+ return 0;
+ if (cdev->drv && cdev->drv->freeze) {
+ ret = cdev->drv->freeze(cdev);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(sch->lock);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (cm_enabled) {
+ /* Don't have the css write on memory. */
+ ret = ccw_set_cmf(cdev, 0);
+ if (ret)
+ return ret;
+ }
+ /* From here on, disallow device driver I/O. */
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ spin_unlock_irq(sch->lock);
+
+ return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ if (!cdev->online)
+ return 0;
+
+ spin_lock_irq(sch->lock);
+ /* Allow device driver I/O again. */
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ return ret;
+
+ if (cm_enabled) {
+ ret = ccw_set_cmf(cdev, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (cdev->drv && cdev->drv->thaw)
+ ret = cdev->drv->thaw(cdev);
+
+ return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ goto out_unlock;
+ }
+ /*
+ * While we were sleeping, devices may have gone or become
+ * available again. Kick re-detection.
+ */
+ cdev->private->flags.resuming = 1;
+ cdev->private->path_new_mask = LPM_ANYPATH;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ spin_unlock_irq(sch->lock);
+ css_wait_for_slow_path();
+
+ /* cdev may have been moved to a different subchannel. */
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED);
+ spin_lock_irq(sch->lock);
+
+out_unlock:
+ cdev->private->flags.resuming = 0;
+ spin_unlock_irq(sch->lock);
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_BOXED;
+ if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+ int ret = 0;
+
+ __ccw_device_pm_restore(cdev);
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid))
+ goto out_restore;
+
+ /* check recognition results */
+ switch (cdev->private->state) {
+ case DEV_STATE_OFFLINE:
+ case DEV_STATE_ONLINE:
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_BOXED:
+ ret = resume_handle_boxed(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ default:
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ /* check if the device type has changed */
+ if (!ccw_device_test_sense_data(cdev)) {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ if (!cdev->online)
+ goto out_unlock;
+
+ if (ccw_device_online(cdev)) {
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(sch->lock);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* reenable cmf, if needed */
+ if (cdev->private->cmb) {
+ spin_unlock_irq(sch->lock);
+ ret = ccw_set_cmf(cdev, 1);
+ spin_lock_irq(sch->lock);
+ if (ret) {
+ CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+ "(rc=%d)\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ ret = 0;
+ }
+ }
+
+out_restore:
+ spin_unlock_irq(sch->lock);
+ if (cdev->online && cdev->drv && cdev->drv->restore)
+ ret = cdev->drv->restore(cdev);
+ return ret;
+
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ return ret;
+}
+
+static const struct dev_pm_ops ccw_pm_ops = {
+ .prepare = ccw_device_pm_prepare,
+ .complete = ccw_device_pm_complete,
+ .freeze = ccw_device_pm_freeze,
+ .thaw = ccw_device_pm_thaw,
+ .restore = ccw_device_pm_restore,
+};
+
+static struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
.probe = ccw_device_probe,
.remove = ccw_device_remove,
.shutdown = ccw_device_shutdown,
+ .pm = &ccw_pm_ops,
};
/**
@@ -1917,8 +2052,6 @@ int ccw_driver_register(struct ccw_driver *cdriver)
struct device_driver *drv = &cdriver->driver;
drv->bus = &ccw_bus_type;
- drv->name = cdriver->name;
- drv->owner = cdriver->owner;
return driver_register(drv);
}
@@ -1934,15 +2067,91 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-/* Helper func for qdio. */
-struct subchannel_id
-ccw_device_get_subchannel_id(struct ccw_device *cdev)
+static void ccw_device_todo(struct work_struct *work)
{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
+ enum cdev_todo todo;
+ priv = container_of(work, struct ccw_device_private, todo_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- return sch->schid;
+ /* Find out todo. */
+ spin_lock_irq(cdev->ccwlock);
+ todo = priv->todo;
+ priv->todo = CDEV_TODO_NOTHING;
+ CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+ priv->dev_id.ssid, priv->dev_id.devno, todo);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Perform todo. */
+ switch (todo) {
+ case CDEV_TODO_ENABLE_CMF:
+ cmf_reenable(cdev);
+ break;
+ case CDEV_TODO_REBIND:
+ ccw_device_do_unbind_bind(cdev);
+ break;
+ case CDEV_TODO_REGISTER:
+ io_subchannel_register(cdev);
+ break;
+ case CDEV_TODO_UNREG_EVAL:
+ if (!sch_is_pseudo_sch(sch))
+ css_schedule_eval(sch->schid);
+ /* fall-through */
+ case CDEV_TODO_UNREG:
+ if (sch_is_pseudo_sch(sch))
+ ccw_device_unregister(cdev);
+ else
+ ccw_device_call_sch_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+ CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ todo);
+ if (cdev->private->todo >= todo)
+ return;
+ cdev->private->todo = todo;
+ /* Get workqueue ref. */
+ if (!get_device(&cdev->dev))
+ return;
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&cdev->dev);
+ }
+}
+
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
@@ -1950,6 +2159,3 @@ EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
-EXPORT_SYMBOL(ccw_bus_type);
-EXPORT_SYMBOL(ccw_device_work);
-EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index f1cbbd94ad4..8d1d2987317 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -2,9 +2,10 @@
#define S390_DEVICE_H
#include <asm/ccwdev.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
-
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
#include "io_sch.h"
/*
@@ -21,7 +22,6 @@ enum dev_state {
DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */
- DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
@@ -29,6 +29,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
+ DEV_STATE_STEAL_LOCK,
/* last element! */
NR_DEV_STATES
};
@@ -56,7 +57,16 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
- dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
+ int state = cdev->private->state;
+
+ if (dev_event == DEV_EVENT_INTERRUPT) {
+ if (state == DEV_STATE_ONLINE)
+ inc_irq_stat(cdev->private->int_class);
+ else if (state != DEV_STATE_CMFCHANGE &&
+ state != DEV_STATE_CMFUPDATE)
+ inc_irq_stat(IRQIO_CIO);
+ }
+ dev_jumptable[state][dev_event](cdev, dev_event);
}
/*
@@ -71,24 +81,24 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
-extern struct workqueue_struct *ccw_device_work;
-extern wait_queue_head_t ccw_device_init_wq;
-extern atomic_t ccw_device_init_count;
+int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-void ccw_device_do_unbind_bind(struct work_struct *);
-void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
-int ccw_device_recognition(struct ccw_device *);
+void ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -96,24 +106,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
-void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_id_done(struct ccw_device *, int);
/* Function prototypes for path grouping stuff. */
-void ccw_device_sense_pgid_start(struct ccw_device *);
-void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
-void ccw_device_sense_pgid_done(struct ccw_device *, int);
-
void ccw_device_verify_start(struct ccw_device *);
-void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
-void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
void ccw_device_disband_done(struct ccw_device *, int);
+void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
+void ccw_device_stlck_done(struct ccw_device *, void *, int);
+
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
@@ -122,16 +136,15 @@ int ccw_device_stlck(struct ccw_device *);
void ccw_device_trigger_reprobe(struct ccw_device *);
void ccw_device_kill_io(struct ccw_device *);
int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
-/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
-extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
-extern struct bus_type ccw_bus_type;
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
extern struct device_attribute dev_attr_cmb_enable;
#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e4604926156..0bc902b3cd8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -45,10 +44,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
- "device information:\n", get_clock());
+ "device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
@@ -174,32 +173,27 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
ret = cio_clear (sch);
return (ret == 0) ? -EBUSY : ret;
}
- panic("Can't stop i/o on subchannel.\n");
+ /* Function was unsuccessful */
+ CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
+ return -EIO;
}
-static int
-ccw_device_handle_oper(struct ccw_device *cdev)
+void ccw_device_update_sense_data(struct ccw_device *cdev)
{
- struct subchannel *sch;
+ memset(&cdev->id, 0, sizeof(cdev->id));
+ cdev->id.cu_type = cdev->private->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
- sch = to_subchannel(cdev->dev.parent);
- cdev->private->flags.recog_done = 1;
- /*
- * Check if cu type and device type still match. If
- * not, it is certainly another device and we have to
- * de- and re-register.
- */
- if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
- cdev->id.cu_model != cdev->private->senseid.cu_model ||
- cdev->id.dev_type != cdev->private->senseid.dev_type ||
- cdev->id.dev_model != cdev->private->senseid.dev_model) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- return 0;
- }
- cdev->private->flags.donotify = 1;
- return 1;
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+ return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+ cdev->id.cu_model == cdev->private->senseid.cu_model &&
+ cdev->id.dev_type == cdev->private->senseid.dev_type &&
+ cdev->id.dev_model == cdev->private->senseid.dev_model;
}
/*
@@ -233,12 +227,12 @@ static void
ccw_device_recog_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
- int notify, old_lpm, same_dev;
+ int old_lpm;
sch = to_subchannel(cdev->dev.parent);
- ccw_device_set_timeout(cdev, 0);
- cio_disable_subchannel(sch);
+ if (cio_disable_subchannel(sch))
+ state = DEV_STATE_NOT_OPER;
/*
* Now that we tried recognition, we have performed device selection
* through ssch() and the path information is up to date.
@@ -263,49 +257,32 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
return;
}
- notify = 0;
- same_dev = 0; /* Keep the compiler quiet... */
+ if (cdev->private->flags.resuming) {
+ cdev->private->state = state;
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
switch (state) {
case DEV_STATE_NOT_OPER:
- CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
- "subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
- if (cdev->online) {
- same_dev = ccw_device_handle_oper(cdev);
- notify = 1;
+ if (!cdev->online) {
+ ccw_device_update_sense_data(cdev);
+ break;
}
- /* fill out sense information */
- memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
- if (notify) {
- cdev->private->state = DEV_STATE_OFFLINE;
- if (same_dev) {
- /* Get device online again. */
- ccw_device_online(cdev);
- wake_up(&cdev->private->wait_q);
- }
- return;
+ cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->flags.recog_done = 1;
+ if (ccw_device_test_sense_data(cdev)) {
+ cdev->private->flags.donotify = 1;
+ ccw_device_online(cdev);
+ wake_up(&cdev->private->wait_q);
+ } else {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
- /* Issue device info message. */
- CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
- "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
- "%04X/%02X\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- cdev->id.cu_type, cdev->id.cu_model,
- cdev->id.dev_type, cdev->id.dev_model);
- break;
+ return;
case DEV_STATE_BOXED:
- CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
- " subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
if (cdev->id.cu_type != 0) { /* device was recognized before */
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_BOXED;
@@ -338,40 +315,54 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
}
+/**
+ * ccw_device_notify() - inform the device's driver about an event
+ * @cdev: device for which an event occurred
+ * @event: event that occurred
+ *
+ * Returns:
+ * -%EINVAL if the device is offline or has no driver.
+ * -%EOPNOTSUPP if the device's driver has no notifier registered.
+ * %NOTIFY_OK if the driver wants to keep the device.
+ * %NOTIFY_BAD if the driver doesn't want to keep the device.
+ */
int ccw_device_notify(struct ccw_device *cdev, int event)
{
+ int ret = -EINVAL;
+
if (!cdev->drv)
- return 0;
+ goto out;
if (!cdev->online)
- return 0;
+ goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event);
- return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
-}
-
-static void cmf_reenable_delayed(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- cmf_reenable(cdev);
+ if (!cdev->drv->notify) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (cdev->drv->notify(cdev, event))
+ ret = NOTIFY_OK;
+ else
+ ret = NOTIFY_BAD;
+out:
+ return ret;
}
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
- if (ccw_device_notify(cdev, CIO_OPER)) {
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
- PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+ /* Save indication for new paths. */
+ cdev->private->path_new_mask = sch->vpm;
return;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
/*
@@ -394,12 +385,37 @@ ccw_device_done(struct ccw_device *cdev, int state)
cdev->private->state = state;
- if (state == DEV_STATE_BOXED) {
+ switch (state) {
+ case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
- if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
- ccw_device_schedule_sch_unregister(cdev);
+ if (cdev->online &&
+ ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_NOT_OPER:
+ CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_DISCONNECTED:
+ CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
+ "%04x\n", cdev->private->dev_id.devno,
+ sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ } else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ default:
+ break;
}
if (cdev->private->flags.donotify) {
@@ -409,110 +425,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
}
-static int cmp_pgid(struct pgid *p1, struct pgid *p2)
-{
- char *c1;
- char *c2;
-
- c1 = (char *)p1;
- c2 = (char *)p2;
-
- return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
-}
-
-static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
-{
- int i;
- int last;
-
- last = 0;
- for (i = 0; i < 8; i++) {
- if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
- /* No PGID yet */
- continue;
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET) {
- /* First non-zero PGID */
- last = i;
- continue;
- }
- if (cmp_pgid(&cdev->private->pgid[i],
- &cdev->private->pgid[last]) == 0)
- /* Non-conflicting PGIDs */
- continue;
-
- /* PGID mismatch, can't pathgroup. */
- CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
- "0.%x.%04x, can't pathgroup\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- cdev->private->options.pgroup = 0;
- return;
- }
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET)
- /* No previous pgid found */
- memcpy(&cdev->private->pgid[0],
- &channel_subsystems[0]->global_pgid,
- sizeof(struct pgid));
- else
- /* Use existing pgid */
- memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
- sizeof(struct pgid));
-}
-
-/*
- * Function called from device_pgid.c after sense path ground has completed.
- */
-void
-ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
- cdev->private->options.pgroup = 0;
- break;
- case 0: /* success */
- case -EACCES: /* partial success, some paths not operational */
- /* Check if all pgids are equal or 0. */
- __ccw_device_get_common_pgid(cdev);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_done(cdev, DEV_STATE_BOXED);
- return;
- default:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
- }
- /* Start Path Group verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
-}
-
/*
* Start device recognition.
*/
-int
-ccw_device_recognition(struct ccw_device *cdev)
+void ccw_device_recognition(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
- (cdev->private->state != DEV_STATE_BOXED))
- return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret != 0)
- /* Couldn't enable the subchannel for i/o. Sick device. */
- return ret;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
/*
* We used to start here with a sense pgid to find out whether a device
@@ -524,87 +442,129 @@ ccw_device_recognition(struct ccw_device *cdev)
*/
cdev->private->flags.recog_done = 0;
cdev->private->state = DEV_STATE_SENSE_ID;
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
ccw_device_sense_id_start(cdev);
- return 0;
}
/*
- * Handle timeout in device recognition.
+ * Handle events for states that use the ccw request infrastructure.
*/
-static void
-ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ switch (e) {
+ case DEV_EVENT_NOTOPER:
+ ccw_request_notoper(cdev);
break;
- case -ENODEV:
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ case DEV_EVENT_INTERRUPT:
+ ccw_request_handler(cdev);
+ break;
+ case DEV_EVENT_TIMEOUT:
+ ccw_request_timeout(cdev);
break;
default:
- ccw_device_set_timeout(cdev, 3*HZ);
+ break;
}
}
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int path_event[8];
+ int chp, mask;
+
+ for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+ path_event[chp] = PE_NONE;
+ if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+ path_event[chp] |= PE_PATH_GONE;
+ if (mask & cdev->private->path_new_mask & sch->vpm)
+ path_event[chp] |= PE_PATH_AVAILABLE;
+ if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+ path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+ }
+ if (cdev->online && cdev->drv->path_event)
+ cdev->drv->path_event(cdev, path_event);
+}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+ cdev->private->path_gone_mask = 0;
+ cdev->private->path_new_mask = 0;
+ cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
if (cio_update_schib(sch)) {
- cdev->private->flags.donotify = 0;
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
+ err = -ENODEV;
+ goto callback;
}
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
if (cdev->private->flags.doverify) {
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
return;
}
+callback:
switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, just set online. */
- cdev->private->options.pgroup = 0;
case 0:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
+ ccw_device_report_path_events(cdev);
break;
case -ETIME:
+ case -EUSERS:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
+ case -EACCES:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+ break;
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- if (cdev->online) {
- ccw_device_set_timeout(cdev, 0);
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- } else
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
+ ccw_device_reset_path_events(cdev);
}
/*
@@ -627,17 +587,9 @@ ccw_device_online(struct ccw_device *cdev)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
- /* Do we want to do path grouping? */
- if (!cdev->private->options.pgroup) {
- /* Start initial path verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
- return 0;
- }
- /* Do a SensePGID first. */
- cdev->private->state = DEV_STATE_SENSE_PGID;
- ccw_device_sense_pgid_start(cdev);
+ /* Start initial path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
return 0;
}
@@ -653,7 +605,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
break;
default:
cdev->private->flags.donotify = 0;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -674,6 +625,10 @@ ccw_device_offline(struct ccw_device *cdev)
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
return 0;
}
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ return 0;
+ }
if (ccw_device_is_orphan(cdev)) {
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
@@ -686,7 +641,7 @@ ccw_device_offline(struct ccw_device *cdev)
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
- if (!cdev->private->options.pgroup) {
+ if (!cdev->private->flags.pgroup) {
/* No, set state offline immediately. */
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
@@ -698,45 +653,25 @@ ccw_device_offline(struct ccw_device *cdev)
}
/*
- * Handle timeout in device online/offline process.
- */
-static void
-ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
-{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_done(cdev, DEV_STATE_BOXED);
- break;
- case -ENODEV:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- break;
- default:
- ccw_device_set_timeout(cdev, 3*HZ);
- }
-}
-
-/*
- * Handle not oper event in device recognition.
+ * Handle not operational event in non-special state.
*/
-static void
-ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
}
/*
- * Handle not operational event in non-special state.
+ * Handle path verification event in offline state.
*/
-static void ccw_device_generic_notoper(struct ccw_device *cdev,
- enum dev_event dev_event)
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
- struct subchannel *sch;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
- ccw_device_set_notoper(cdev);
- sch = to_subchannel(cdev->dev.parent);
css_schedule_eval(sch->schid);
}
@@ -767,7 +702,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
(scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
- * to the device driver. Can't do path verfication now,
+ * to the device driver. Can't do path verification now,
* delay until final status was delivered.
*/
cdev->private->flags.doverify = 1;
@@ -775,11 +710,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
}
/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (cdev->online) {
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ else
+ ccw_device_online_verify(cdev, dev_event);
+ } else
+ css_schedule_eval(sch->schid);
+}
+
+/*
* Got an interrupt for a normal io (state online).
*/
static void
@@ -788,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb;
int is_cmd;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
@@ -832,13 +783,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV)
+ if (ret)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -853,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
- irb = (struct irb *) __LC_IRB;
+ irb = &__get_cpu_var(cio_irb);
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
@@ -877,12 +829,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
*/
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
- /* Retry Basic Sense if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- ccw_device_do_sense(cdev, irb);
- return;
- }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
@@ -897,6 +843,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
+ /* In case sensing interfered with setting the device online */
+ wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
@@ -904,26 +852,8 @@ call_handler:
}
static void
-ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- irb = (struct irb *) __LC_IRB;
- /* Accumulate status. We don't do basic sense. */
- ccw_device_accumulate_irb(cdev, irb);
- /* Remember to clear irb to avoid residuals. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try to start delayed device verification. */
- ccw_device_online_verify(cdev, 0);
- /* Note: Don't call handler for cio initiated clear! */
-}
-
-static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
@@ -954,6 +884,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
+ cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
@@ -975,32 +906,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
static void
-ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- switch (dev_event) {
- case DEV_EVENT_INTERRUPT:
- irb = (struct irb *) __LC_IRB;
- /* Check for unsolicited interrupt. */
- if ((scsw_stctl(&irb->scsw) ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
- (!scsw_cc(&irb->scsw)))
- /* FIXME: we should restart stlck here, but this
- * is extremely unlikely ... */
- goto out_wakeup;
-
- ccw_device_accumulate_irb(cdev, irb);
- /* We don't care about basic sense etc. */
- break;
- default: /* timeout */
- break;
- }
-out_wakeup:
- wake_up(&cdev->private->wait_q);
-}
-
-static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
@@ -1009,10 +914,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
-
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
@@ -1043,22 +944,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
- if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_orphanage);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+ css_schedule_eval(sch->schid);
+ else
ccw_device_start_id(cdev, 0);
}
-static void
-ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
- * An interrupt in state offline means a previous disable was not
+ * An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
@@ -1084,10 +983,7 @@ static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
- if (dev_event == DEV_EVENT_NOTOPER)
- cdev->private->state = DEV_STATE_NOT_OPER;
- else
- cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
@@ -1097,17 +993,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- cdev->private->state = DEV_STATE_OFFLINE;
- wake_up(&cdev->private->wait_q);
- break;
- case -ENODEV:
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ } else {
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
- break;
- default:
- ccw_device_set_timeout(cdev, HZ/10);
}
}
@@ -1121,50 +1011,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
}
/*
- * Bug operation action.
- */
-static void
-ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
-{
- CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
- "0.%x.%04x\n", cdev->private->state, dev_event,
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- BUG();
-}
-
-/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
- [DEV_EVENT_INTERRUPT] = ccw_device_bug,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
@@ -1180,24 +1057,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
- [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
- },
- /* states to wait for i/o completion before doing something */
- [DEV_STATE_CLEAR_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
+ [DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
+ /* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1214,13 +1085,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
- [DEV_EVENT_TIMEOUT] = ccw_device_bug,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_CMFCHANGE] = {
@@ -1235,6 +1106,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
+ [DEV_STATE_STEAL_LOCK] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34..d4fa30541a3 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
/*
- * drivers/s390/cio/device_id.c
+ * CCW device SENSE ID I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Sense ID functions.
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
#include <asm/ccwdev.h>
-#include <asm/delay.h>
+#include <asm/setup.h>
#include <asm/cio.h>
-#include <asm/lowcore.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define SENSE_ID_RETRIES 256
+#define SENSE_ID_TIMEOUT (10 * HZ)
+#define SENSE_ID_MIN_LEN 4
+#define SENSE_ID_BASIC_LEN 7
+
/**
- * vm_vdev_to_cu_type - Convert vm virtual device into control unit type
- * for certain devices.
- * @class: virtual device class
- * @type: virtual device type
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
*
- * Returns control unit type if a match was made or %0xffff otherwise.
+ * Return 0 on success, non-zero otherwise.
*/
-static int vm_vdev_to_cu_type(int class, int type)
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
{
static struct {
int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
};
int i;
- for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
- if (class == vm_devices[i].class && type == vm_devices[i].type)
- return vm_devices[i].cu_type;
+ /* Special case for osa devices. */
+ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+ senseid->cu_type = 0x3088;
+ senseid->cu_model = 0x60;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+ if (diag->vrdcvcla == vm_devices[i].class &&
+ diag->vrdcvtyp == vm_devices[i].type) {
+ senseid->cu_type = vm_devices[i].cu_type;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ }
- return 0xffff;
+ return -ENODEV;
}
/**
- * diag_get_dev_info - retrieve device information via DIAG X'210'
- * @devno: device number
- * @ps: pointer to sense ID data area
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
*/
-static int diag_get_dev_info(u16 devno, struct senseid *ps)
+static int diag210_get_dev_info(struct ccw_device *cdev)
{
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
struct diag210 diag_data;
- int ccode;
-
- CIO_TRACE_EVENT (4, "VMvdinf");
-
- diag_data = (struct diag210) {
- .vrdcdvno = devno,
- .vrdclen = sizeof (diag_data),
- };
-
- ccode = diag210 (&diag_data);
- if ((ccode == 0) || (ccode == 2)) {
- ps->reserved = 0xff;
-
- /* Special case for osa devices. */
- if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
- ps->cu_type = 0x3088;
- ps->cu_model = 0x60;
- return 0;
- }
- ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
- diag_data.vrdcvtyp);
- if (ps->cu_type != 0xffff)
- return 0;
- }
-
- CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
- "vdev class : %02X, vdev type : %04X \n ... "
- "rdev class : %02X, rdev type : %04X, "
- "rdev model: %02X\n",
- devno, ccode,
- diag_data.vrdcvcla, diag_data.vrdcvtyp,
- diag_data.vrdcrccl, diag_data.vrdccrty,
- diag_data.vrdccrmd);
-
+ int rc;
+
+ if (dev_id->ssid != 0)
+ return -ENODEV;
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id->devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ CIO_TRACE_EVENT(4, "diag210");
+ CIO_HEX_EVENT(4, &rc, sizeof(rc));
+ CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+ if (rc != 0 && rc != 2)
+ goto err_failed;
+ if (diag210_to_senseid(senseid, &diag_data))
+ goto err_unknown;
+ return 0;
+
+err_unknown:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+ dev_id->ssid, dev_id->devno);
+ return -ENODEV;
+err_failed:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+ dev_id->ssid, dev_id->devno, rc);
return -ENODEV;
}
/*
- * Start Sense ID helper function.
- * Try to obtain the 'control unit'/'device type' information
- * associated with the subchannel.
+ * Initialize SENSE ID data.
*/
-static int
-__ccw_device_sense_id_start(struct ccw_device *cdev)
-{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Setup sense channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_ID;
- ccw->cda = (__u32) __pa (&cdev->private->senseid);
- ccw->count = sizeof (struct senseid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- cdev->private->senseid.cu_type = 0xFFFF;
- if ((sch->opm & cdev->private->imask) != 0 &&
- cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- }
- return ret;
-}
-
-void
-ccw_device_sense_id_start(struct ccw_device *cdev)
+static void snsid_init(struct ccw_device *cdev)
{
- int ret;
-
- memset (&cdev->private->senseid, 0, sizeof (struct senseid));
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ cdev->private->flags.esid = 0;
+ memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+ cdev->private->senseid.cu_type = 0xffff;
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense ID was received.
+ * Check for complete SENSE ID data.
*/
-static int
-ccw_device_check_sense_id(struct ccw_device *cdev)
+static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
-
- /* Check the error cases. */
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense ID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
- /*
- * if the device doesn't support the SenseID
- * command further retries wouldn't help ...
- * NB: We don't check here for intervention required like we
- * did before, because tape devices with no tape inserted
- * may present this status *in conjunction with* the
- * sense id information. So, for intervention required,
- * we use the "whack it until it talks" strategy...
- */
- CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
- "0.%x.%04x reports cmd reject\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
+ struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+ if (cdev->private->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+ if (cdev->private->senseid.reserved != 0xff)
return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
- "lpum %02X, cnt %02d, sns :"
- " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+ cdev->private->flags.esid = 1;
+ return 0;
- lpm = to_io_private(sch)->orb.cmd.lpm;
- if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
- "on subchannel 0.%x.%04x is "
- "'not operational'\n", lpm,
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
- return -EACCES;
- }
-
- /* Did we get a proper answer ? */
- if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
- cdev->private->senseid.reserved == 0xFF) {
- if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
- cdev->private->flags.esid = 1;
- return 0; /* Success */
- }
-
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
- "subchannel 0.%x.%04x returns status %02X%02X\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no,
- irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+out_restart:
+ snsid_init(cdev);
return -EAGAIN;
}
/*
- * Got interrupt for Sense ID.
+ * Process SENSE ID request result.
*/
-void
-ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = (struct irb *) __LC_IRB;
- /* Retry sense id, if needed. */
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+ /* Try diag 0x210 fallback on z/VM. */
+ snsid_init(cdev);
+ if (diag210_get_dev_info(cdev) == 0) {
+ rc = 0;
+ vm = 1;
}
- return;
}
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- ret = ccw_device_check_sense_id(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
- case 0: /* Sense id succeeded. */
- case -ETIME: /* Sense id stopped by timeout. */
- ccw_device_sense_id_done(cdev, ret);
- break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* try again. */
- ret = __ccw_device_sense_id_start(cdev);
- if (ret == 0 || ret == -EBUSY)
- break;
- /* fall through. */
- default: /* Sense ID failed. Try asking VM. */
- if (MACHINE_IS_VM)
- ret = diag_get_dev_info(cdev->private->dev_id.devno,
- &cdev->private->senseid);
- else
- /*
- * If we can't couldn't identify the device type we
- * consider the device "not operational".
- */
- ret = -ENODEV;
+ CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+ "%04x/%02x%s\n", id->ssid, id->devno, rc,
+ senseid->cu_type, senseid->cu_model, senseid->dev_type,
+ senseid->dev_model, vm ? " (diag210)" : "");
+ ccw_device_sense_id_done(cdev, rc);
+}
- ccw_device_sense_id_done(cdev, ret);
- break;
- }
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Data setup. */
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+ cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->cp = cp;
+ req->timeout = SENSE_ID_TIMEOUT;
+ req->maxretries = SENSE_ID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->check = snsid_check;
+ req->callback = snsid_callback;
+ ccw_request_start(cdev);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 151754d5474..f3c417943da 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,10 +1,8 @@
/*
- * drivers/s390/cio/device_ops.c
+ * Copyright IBM Corp. 2002, 2009
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -13,6 +11,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
@@ -48,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -76,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -92,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+ cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
}
/**
+ * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+ return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+ return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter; value is only used if no I/O is
@@ -114,16 +140,17 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+
ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
@@ -161,18 +188,17 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY ||
- cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -187,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -339,16 +365,17 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+
ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
@@ -372,11 +399,11 @@ int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
@@ -391,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev)
int
ccw_device_call_handler(struct ccw_device *cdev)
{
- struct subchannel *sch;
unsigned int stctl;
int ending_status;
- sch = to_subchannel(cdev->dev.parent);
-
/*
* we allow for the device action handler if .
* - we received ending status
@@ -471,91 +495,91 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
+ if (!cdev->dev.parent)
return 0;
- else
- return sch->lpm;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->lpm;
}
-/*
- * Try to break the lock on a boxed device.
- */
-int
-ccw_device_stlck(struct ccw_device *cdev)
-{
- void *buf, *buf2;
- unsigned long flags;
- struct subchannel *sch;
- int ret;
+struct stlck_data {
+ struct completion done;
+ int rc;
+};
- if (!cdev)
- return -ENODEV;
+void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
+{
+ struct stlck_data *sdata = data;
- if (cdev->drv && !cdev->private->options.force)
- return -EINVAL;
+ sdata->rc = rc;
+ complete(&sdata->done);
+}
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT(2, "stl lock");
- CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct stlck_data data;
+ u8 *buffer;
+ int rc;
- buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf2) {
- kfree(buf);
- return -ENOMEM;
+ /* Check if steal lock operation is valid for this device. */
+ if (cdev->drv) {
+ if (!cdev->private->options.force)
+ return -EINVAL;
}
- spin_lock_irqsave(sch->lock, flags);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret)
- goto out_unlock;
- /*
- * Setup ccw. We chain an unconditional reserve and a release so we
- * only break the lock.
- */
- cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
- cdev->private->iccws[0].cda = (__u32) __pa(buf);
- cdev->private->iccws[0].count = 32;
- cdev->private->iccws[0].flags = CCW_FLAG_CC;
- cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
- cdev->private->iccws[1].cda = (__u32) __pa(buf2);
- cdev->private->iccws[1].count = 32;
- cdev->private->iccws[1].flags = 0;
- ret = cio_start(sch, cdev->private->iccws, 0);
- if (ret) {
- cio_disable_subchannel(sch); //FIXME: return code?
+ buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ init_completion(&data.done);
+ data.rc = -EIO;
+ spin_lock_irq(sch->lock);
+ rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+ if (rc)
goto out_unlock;
+ /* Perform operation. */
+ cdev->private->state = DEV_STATE_STEAL_LOCK,
+ ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+ spin_unlock_irq(sch->lock);
+ /* Wait for operation to finish. */
+ if (wait_for_completion_interruptible(&data.done)) {
+ /* Got a signal. */
+ spin_lock_irq(sch->lock);
+ ccw_request_cancel(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_for_completion(&data.done);
}
- cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- spin_unlock_irqrestore(sch->lock, flags);
- wait_event(cdev->private->wait_q,
- cdev->private->irb.scsw.cmd.actl == 0);
- spin_lock_irqsave(sch->lock, flags);
- cio_disable_subchannel(sch); //FIXME: return code?
- if ((cdev->private->irb.scsw.cmd.dstat !=
- (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
- (cdev->private->irb.scsw.cmd.cstat != 0))
- ret = -EIO;
- /* Clear irb. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = data.rc;
+ /* Check results. */
+ spin_lock_irq(sch->lock);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_BOXED;
out_unlock:
- kfree(buf);
- kfree(buf2);
- spin_unlock_irqrestore(sch->lock, flags);
- return ret;
+ spin_unlock_irq(sch->lock);
+ kfree(buffer);
+
+ return rc;
}
-void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+/**
+ * chp_get_chp_desc - return newly allocated channel-path descriptor
+ * @cdev: device to obtain the descriptor for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel path. Return %NULL on error.
+ */
+struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
+ int chp_idx)
{
struct subchannel *sch;
struct chp_id chpid;
sch = to_subchannel(cdev->dev.parent);
chp_id_init(&chpid);
- chpid.id = sch->schib.pmcw.chpid[chp_no];
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
return chp_get_chp_desc(chpid);
}
@@ -588,11 +612,23 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
int rc;
sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -667,6 +703,52 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
+ * ccw_device_get_mdc - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ int mdc = 0, i;
+
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (mask)
+ mask &= sch->lpm;
+ else
+ mask = sch->lpm;
+
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ if (!(mask & (0x80 >> i)))
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
+ return 0;
+ }
+ if (!chp->desc_fmt1.r)
+ mdc = 1;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
+ }
+
+ return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
* ccw_device_tm_intrg - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
@@ -677,6 +759,8 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
@@ -686,14 +770,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
-// FIXME: these have to go:
-
-int
-_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+/**
+ * ccw_device_get_schid - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
- return cdev->private->schid.sch_no;
-}
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -708,5 +796,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b..37ada05e82a 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,669 @@
/*
- * drivers/s390/cio/device_pgid.c
+ * CCW device PGID and path verification I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Path Group ID functions.
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
-
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/delay.h>
-#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define PGID_RETRIES 256
+#define PGID_TIMEOUT (10 * HZ)
+
+static void verify_start(struct ccw_device *cdev);
+
/*
- * Helper function called from interrupt context to decide whether an
- * operation should be tried again.
+ * Process path verification data and report result.
*/
-static int __ccw_device_should_retry(union scsw *scsw)
+static void verify_done(struct ccw_device *cdev, int rc)
{
- /* CC is only valid if start function bit is set. */
- if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
- return 1;
- /* No more activity. For sense and set PGID we stubbornly try again. */
- if (!scsw->cmd.actl)
- return 1;
- return 0;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ int mpath = cdev->private->flags.mpath;
+ int pgroup = cdev->private->flags.pgroup;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ if (sch->config.mp != mpath) {
+ sch->config.mp = mpath;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+ "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+ sch->vpm);
+ ccw_device_verify_done(cdev, rc);
}
/*
- * Start Sense Path Group ID helper function. Used in ccw_device_recog
- * and ccw_device_sense_pgid.
+ * Create channel program to perform a NOOP.
*/
-static int
-__ccw_device_sense_pgid_start(struct ccw_device *cdev)
+static void nop_build_cp(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Return if we already checked on all paths. */
- if (cdev->private->imask == 0)
- return (sch->lpm == 0) ? -ENODEV : -EACCES;
- i = 8 - ffs(cdev->private->imask);
-
- /* Setup sense path group id channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_PGID;
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- /* Try every path multiple times. */
- ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not "
- "operational'\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
-
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- i++;
- }
-
- return ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+ cp->count = 0;
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
}
-void
-ccw_device_sense_pgid_start(struct ccw_device *cdev)
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
{
- int ret;
-
- /* Set a timeout of 60s */
- ccw_device_set_timeout(cdev, 60*HZ);
-
- cdev->private->state = DEV_STATE_SENSE_PGID;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid));
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ nop_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense Path Group ID was received.
+ * Adjust NOOP I/O status.
*/
-static int
-__ccw_device_check_sense_pgid(struct ccw_device *cdev)
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+ struct irb *irb, enum io_status status)
{
- struct subchannel *sch;
- struct irb *irb;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons &&
- (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
- /*
- * If the device doesn't support the Sense Path Group ID
- * command further retries wouldn't help ...
- */
- return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
- "lpum %02X, cnt %02d, sns : "
- "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
-
- lpm = to_io_private(sch)->orb.cmd.lpm;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, lpm);
- return -EACCES;
- }
- i = 8 - ffs(cdev->private->imask);
- if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
- "is reserved by someone else\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
- return -EUSERS;
- }
- return 0;
+ /* Only subchannel status might indicate a path error. */
+ if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+ return IO_DONE;
+ return status;
}
/*
- * Got interrupt for Sense Path Group ID.
+ * Process NOOP request result for a single path.
*/
-void
-ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- irb = (struct irb *) __LC_IRB;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw)) {
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
- }
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_sense_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
- case -EOPNOTSUPP: /* Sense Path Group ID not supported */
- ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- ccw_device_sense_pgid_done(cdev, -ETIME);
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm;
break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- /* Fall through. */
- case 0: /* Sense Path Group ID successful. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* Fall through. */
- case -EAGAIN: /* Try again. */
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret != 0 && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
break;
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_sense_pgid_done(cdev, -EUSERS);
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
+ default:
+ goto err;
}
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ nop_do(cdev);
+ return;
+
+err:
+ verify_done(cdev, rc);
}
/*
- * Path Group ID helper function.
+ * Create channel program to perform SET PGID on a single path.
*/
-static int
-__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+ struct pgid *pgid = &cdev->private->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+ cp->cda = (u32) (addr_t) pgid;
+ cp->count = sizeof(*pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
- /* Setup sense path group id channel program. */
- cdev->private->pgid[0].inf.fc = func;
- ccw = cdev->private->iccws;
- if (cdev->private->flags.pgid_single)
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH;
- else
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
- ccw->cmd_code = CCW_CMD_SET_PGID;
- ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
}
- /* PGID command failed on this path. */
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
}
/*
- * Helper function to send a nop ccw down a path.
+ * Reset pathgroups and restart path verification, leave unusable paths out.
*/
-static int __ccw_device_do_nop(struct ccw_device *cdev)
+static void pgid_wipeout_start(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
- sch = to_subchannel(cdev->dev.parent);
-
- /* Setup nop channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_NOOP;
- ccw->cda = 0;
- ccw->count = 0;
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
+/*
+ * Perform establish/resign SET PGID on a single path.
+ */
+static void spid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ /* Use next available path that is not already in correct state. */
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ /* Channel program setup. */
+ if (req->lpm & sch->opm)
+ fn = SPID_FUNC_ESTABLISH;
+ else
+ fn = SPID_FUNC_RESIGN;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
}
- /* nop command failed on this path. */
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
-
/*
- * Called from interrupt context to check if a valid answer
- * to Set Path Group ID was received.
+ * Process SET PGID request result for a single path.
*/
-static int
-__ccw_device_check_pgid(struct ccw_device *cdev)
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Set PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm & sch->opm;
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ case -EOPNOTSUPP:
+ if (cdev->private->flags.mpath) {
+ /* Try without multipathing. */
+ cdev->private->flags.mpath = 0;
+ goto out_restart;
}
- return -ETIME;
+ /* Try without pathgrouping. */
+ cdev->private->flags.pgroup = 0;
+ goto out_restart;
+ default:
+ goto err;
}
- if (irb->esw.esw0.erw.cons) {
- if (irb->ecw[0] & SNS0_CMD_REJECT)
- return -EOPNOTSUPP;
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
- "cnt %02d, "
- "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
+ req->lpm >>= 1;
+ spid_do(cdev);
+ return;
+
+out_restart:
+ verify_start(cdev);
+ return;
+err:
+ verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ req->callback = spid_callback;
+ spid_do(cdev);
+}
+
+static int pgid_is_reset(struct pgid *p)
+{
+ char *c;
+
+ for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+ if (*c != 0)
+ return 0;
}
- return 0;
+ return 1;
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+ return memcmp((char *) p1 + 1, (char *) p2 + 1,
+ sizeof(struct pgid) - 1);
}
/*
- * Called from interrupt context to check the path status after a nop has
- * been send.
+ * Determine pathgroup state from PGID data.
*/
-static int __ccw_device_check_nop(struct ccw_device *cdev)
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, u8 *reserved, u8 *reset)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry NOP if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+
+ *mismatch = 0;
+ *reserved = 0;
+ *reset = 0;
+ for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
}
- return -ETIME;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
+ if (!first) {
+ first = pgid;
+ continue;
+ }
+ if (pgid_cmp(pgid, first) != 0)
+ *mismatch = 1;
}
- return 0;
+ if (!first)
+ first = &channel_subsystems[0]->global_pgid;
+ *p = first;
}
-static void
-__ccw_device_verify_start(struct ccw_device *cdev)
+static u8 pgid_to_donepm(struct ccw_device *cdev)
{
- struct subchannel *sch;
- __u8 func;
- int ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int i;
+ int lpm;
+ u8 donepm = 0;
- sch = to_subchannel(cdev->dev.parent);
- /* Repeat for all paths. */
- for (; cdev->private->imask; cdev->private->imask >>= 1,
- cdev->private->iretry = 5) {
- if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
- /* Path not available, try next. */
+ /* Set bits for paths which are already in the target state. */
+ for (i = 0; i < 8; i++) {
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- if (cdev->private->options.pgroup) {
- if (sch->opm & cdev->private->imask)
- func = SPID_FUNC_ESTABLISH;
- else
- func = SPID_FUNC_RESIGN;
- ret = __ccw_device_do_pgid(cdev, func);
- } else
- ret = __ccw_device_do_nop(cdev);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if (ret == 0 || ret == -EBUSY)
+ pgid = &cdev->private->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+ } else {
+ if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+ continue;
+ }
+ if (cdev->private->flags.mpath) {
+ if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+ continue;
+ } else {
+ if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+ continue;
+ }
+ donepm |= lpm;
+ }
+
+ return donepm;
+}
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
+
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+ else {
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
+ pgid_fill(cdev, pgid);
+ }
+out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+ case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
+ /* Anything left to do? */
+ if (cdev->private->pgid_todo_mask == 0) {
+ verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
return;
- /* Permanent path failure, try next. */
+ }
+ /* Perform path-grouping. */
+ spid_start(cdev);
+ break;
+ case -EOPNOTSUPP:
+ /* Path-grouping not supported. */
+ cdev->private->flags.pgroup = 0;
+ cdev->private->flags.mpath = 0;
+ verify_start(cdev);
+ break;
+ default:
+ verify_done(cdev, rc);
}
- /* Done with all paths. */
- ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
}
-
+
/*
- * Got interrupt for Set Path Group ID.
+ * Create channel program to perform a SENSE PGID on a single path.
*/
-void
-ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void snid_build_cp(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+ cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
- irb = (struct irb *) __LC_IRB;
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int ret;
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_verify_start(cdev);
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- if (cdev->private->options.pgroup)
- ret = __ccw_device_check_pgid(cdev);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ snid_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
else
- ret = __ccw_device_check_nop(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ ret = -EACCES;
+ snid_done(cdev, ret);
+}
+
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
+ switch (rc) {
case 0:
- /* Path verification ccw finished successfully, update lpm. */
- sch->vpm |= sch->opm & cdev->private->imask;
- /* Go on with next path. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ cdev->private->pgid_valid_mask |= req->lpm;
break;
- case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Set Path Group ID.
- */
- if (cdev->private->flags.pgid_single)
- cdev->private->options.pgroup = 0;
- else
- cdev->private->flags.pgid_single = 1;
- /* Retry */
- sch->vpm = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_verify_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_verify_done(cdev, -ETIME);
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
+ default:
+ goto err;
}
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ snid_do(cdev);
+ return;
+
+err:
+ snid_done(cdev, rc);
}
-void
-ccw_device_verify_start(struct ccw_device *cdev)
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw_dev_id *devid = &cdev->private->dev_id;
- cdev->private->flags.pgid_single = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
-
- /* Start with empty vpm. */
sch->vpm = 0;
-
- /* Get current pam. */
- if (cio_update_schib(sch)) {
- ccw_device_verify_done(cdev, -ENODEV);
- return;
+ sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ if (cdev->private->flags.pgroup) {
+ CIO_TRACE_EVENT(4, "snid");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->callback = snid_callback;
+ snid_do(cdev);
+ } else {
+ CIO_TRACE_EVENT(4, "nop");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->filter = nop_filter;
+ req->callback = nop_callback;
+ nop_do(cdev);
}
- /* After 60s path verification is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
- __ccw_device_verify_start(cdev);
}
-static void
-__ccw_device_disband_start(struct ccw_device *cdev)
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- while (cdev->private->imask != 0) {
- if (sch->lpm & cdev->private->imask) {
- ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
- if (ret == 0)
- return;
- }
- cdev->private->iretry = 5;
- cdev->private->imask >>= 1;
- }
- ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+ CIO_TRACE_EVENT(4, "vrfy");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /*
+ * Initialize pathgroup and multipath state with target values.
+ * They may change in the course of path verification.
+ */
+ cdev->private->flags.pgroup = cdev->private->options.pgroup;
+ cdev->private->flags.mpath = cdev->private->options.mpath;
+ cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
+ verify_start(cdev);
}
/*
- * Got interrupt for Unset Path Group ID.
+ * Process disband SET PGID request result.
*/
-void
-ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ cdev->private->flags.mpath = 0;
+ if (sch->config.mp) {
+ sch->config.mp = 0;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+ rc);
+ ccw_device_disband_done(cdev, rc);
+}
- irb = (struct irb *) __LC_IRB;
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_TRACE_EVENT(4, "disb");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
+ req->callback = disband_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_disband_start(cdev);
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
- case 0: /* disband successful. */
- ccw_device_disband_done(cdev, ret);
- break;
- case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Unset Path Group ID.
- */
- cdev->private->flags.pgid_single = 1;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_disband_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_disband_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_disband_start(cdev);
- break;
- }
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+ cp[0].count = 32;
+ cp[0].flags = CCW_FLAG_CC;
+ cp[1].cmd_code = CCW_CMD_RELEASE;
+ cp[1].cda = (u32) (addr_t) buf2;
+ cp[1].count = 32;
+ cp[1].flags = 0;
+ req->cp = cp;
}
-void
-ccw_device_disband_start(struct ccw_device *cdev)
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
{
- /* After 60s disbanding is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ ccw_device_stlck_done(cdev, data, rc);
+}
- cdev->private->flags.pgid_single = 0;
- cdev->private->iretry = 5;
- cdev->private->imask = 0x80;
- __ccw_device_disband_start(cdev);
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ * When finished, call ccw_device_stlck_done with a return code specifying the
+ * result.
+ */
+void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
+ void *buf2)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ CIO_TRACE_EVENT(4, "stlck");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->data = data;
+ req->callback = stlck_callback;
+ stlck_build_cp(cdev, buf1, buf2);
+ ccw_request_start(cdev);
}
+
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee241..15b56a15db1 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,8 +1,5 @@
/*
- * drivers/s390/cio/device_status.c
- *
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 2002
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
@@ -336,9 +333,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
-
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 00000000000..c4f7bf3e24c
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,418 @@
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (5 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do { \
+ debug_text_event(eadm_debug, imp, txt); \
+ } while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+ if (!debug_level_enabled(eadm_debug, level))
+ return;
+ while (length > 0) {
+ debug_event(eadm_debug, level, data, length);
+ length -= eadm_debug->buf_size;
+ data += eadm_debug->buf_size;
+ }
+}
+
+static void orb_init(union orb *orb)
+{
+ memset(orb, 0, sizeof(union orb));
+ orb->eadm.compat1 = 1;
+ orb->eadm.compat2 = 1;
+ orb->eadm.fmt = 1;
+ orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+ union orb *orb = &get_eadm_private(sch)->orb;
+ int cc;
+
+ orb_init(orb);
+ orb->eadm.aob = (u32)__pa(aob);
+ orb->eadm.intparm = (u32)(addr_t)sch;
+ orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+ EADM_LOG(6, "start");
+ EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+ break;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+ int cc;
+
+ cc = csch(sch->schid);
+ if (cc)
+ return -ENODEV;
+
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+}
+
+static void eadm_subchannel_timeout(unsigned long data)
+{
+ struct subchannel *sch = (struct subchannel *) data;
+
+ spin_lock_irq(sch->lock);
+ EADM_LOG(1, "timeout");
+ EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+ if (eadm_subchannel_clear(sch))
+ EADM_LOG(0, "clear failed");
+ spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ if (expires == 0) {
+ del_timer(&private->timer);
+ return;
+ }
+ if (timer_pending(&private->timer)) {
+ if (mod_timer(&private->timer, jiffies + expires))
+ return;
+ }
+ private->timer.function = eadm_subchannel_timeout;
+ private->timer.data = (unsigned long) sch;
+ private->timer.expires = jiffies + expires;
+ add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+ struct irb *irb = &__get_cpu_var(cio_irb);
+ int error = 0;
+
+ EADM_LOG(6, "irq");
+ EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+ inc_irq_stat(IRQIO_ADM);
+
+ if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+ && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+ error = -EIO;
+
+ if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+ error = -ETIMEDOUT;
+
+ eadm_subchannel_set_timeout(sch, 0);
+
+ if (private->state != EADM_BUSY) {
+ EADM_LOG(1, "irq unsol");
+ EADM_LOG_HEX(1, irb, sizeof(*irb));
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ return;
+ }
+ scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+ private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(private, &eadm_list, head) {
+ sch = private->sch;
+ spin_lock(sch->lock);
+ if (private->state == EADM_IDLE) {
+ private->state = EADM_BUSY;
+ list_move_tail(&private->head, &eadm_list);
+ spin_unlock(sch->lock);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return sch;
+ }
+ spin_unlock(sch->lock);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return NULL;
+}
+
+int eadm_start_aob(struct aob *aob)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+ int ret;
+
+ sch = eadm_get_idle_sch();
+ if (!sch)
+ return -EBUSY;
+
+ spin_lock_irqsave(sch->lock, flags);
+ eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+ ret = eadm_subchannel_start(sch, aob);
+ if (!ret)
+ goto out_unlock;
+
+ /* Handle start subchannel failure. */
+ eadm_subchannel_set_timeout(sch, 0);
+ private = get_eadm_private(sch);
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+ struct eadm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->head);
+ init_timer(&private->timer);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, private);
+ private->state = EADM_IDLE;
+ private->sch = sch;
+ sch->isc = EADM_SCH_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ kfree(private);
+ goto out;
+ }
+ spin_unlock_irq(sch->lock);
+
+ spin_lock_irq(&list_lock);
+ list_add(&private->head, &eadm_list);
+ spin_unlock_irq(&list_lock);
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+out:
+ return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
+ do {
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ spin_lock_irq(&list_lock);
+ list_del(&private->head);
+ spin_unlock_irq(&list_lock);
+
+ eadm_quiesce(sch);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+
+ kfree(private);
+
+ return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+ eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ struct eadm_private *private;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ if (cio_update_schib(sch)) {
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ goto out_unlock;
+ }
+ private = get_eadm_private(sch);
+ if (private->state == EADM_NOT_OPER)
+ private->state = EADM_IDLE;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+ .drv = {
+ .name = "eadm_subchannel",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = eadm_subchannel_ids,
+ .irq = eadm_subchannel_irq,
+ .probe = eadm_subchannel_probe,
+ .remove = eadm_subchannel_remove,
+ .shutdown = eadm_subchannel_shutdown,
+ .sch_event = eadm_subchannel_sch_event,
+ .freeze = eadm_subchannel_freeze,
+ .thaw = eadm_subchannel_restore,
+ .restore = eadm_subchannel_restore,
+};
+
+static int __init eadm_sch_init(void)
+{
+ int ret;
+
+ if (!css_general_characteristics.eadm)
+ return -ENXIO;
+
+ eadm_debug = debug_register("eadm_log", 16, 1, 16);
+ if (!eadm_debug)
+ return -ENOMEM;
+
+ debug_register_view(eadm_debug, &debug_hex_ascii_view);
+ debug_set_level(eadm_debug, 2);
+
+ isc_register(EADM_SCH_ISC);
+ ret = css_driver_register(&eadm_subchannel_driver);
+ if (ret)
+ goto cleanup;
+
+ return ret;
+
+cleanup:
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+ return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+ css_driver_unregister(&eadm_subchannel_driver);
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 00000000000..9664e4653f9
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,22 @@
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+ union orb orb;
+ enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
+ struct timer_list timer;
+ struct list_head head;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9..ca5e9bb9d45 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
- memset(tcat, 0, sizeof(tcat));
+ memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
*/
void tsb_init(struct tsb *tsb)
{
- memset(tsb, 0, sizeof(tsb));
+ memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index cf8f24a4b5e..5a999084a22 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/cio/idset.c
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "idset.h"
#include "css.h"
@@ -18,7 +17,7 @@ struct idset {
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
- return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)
@@ -78,7 +77,7 @@ static inline int idset_get_first(struct idset *set, int *ssid, int *id)
struct idset *idset_sch_new(void)
{
- return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
+ return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
}
void idset_sch_add(struct idset *set, struct subchannel_id schid)
@@ -91,6 +90,14 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid)
idset_del(set, schid.ssid, schid.sch_no);
}
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+ int pos = schid.ssid * set->num_id + schid.sch_no;
+
+ bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
{
return idset_contains(set, schid.ssid, schid.sch_no);
@@ -110,3 +117,15 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
}
return rc;
}
+
+int idset_is_empty(struct idset *set)
+{
+ return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
+
+ bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 528065cb502..06d3bc01bb0 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/idset.h
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
@@ -19,7 +17,10 @@ void idset_fill(struct idset *set);
struct idset *idset_sch_new(void);
void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index c4f3e7c9a85..b108f4a5c7d 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,73 +1,93 @@
#ifndef S390_IO_SCH_H
#define S390_IO_SCH_H
+#include <linux/types.h>
#include <asm/schid.h>
-
-/*
- * command-mode operation request block
- */
-struct cmd_orb {
- u32 intparm; /* interruption parameter */
- u32 key : 4; /* flags, like key, suspend control, etc. */
- u32 spnd : 1; /* suspend control */
- u32 res1 : 1; /* reserved */
- u32 mod : 1; /* modification control */
- u32 sync : 1; /* synchronize control */
- u32 fmt : 1; /* format control */
- u32 pfch : 1; /* prefetch control */
- u32 isic : 1; /* initial-status-interruption control */
- u32 alcc : 1; /* address-limit-checking control */
- u32 ssic : 1; /* suppress-suspended-interr. control */
- u32 res2 : 1; /* reserved */
- u32 c64 : 1; /* IDAW/QDIO 64 bit control */
- u32 i2k : 1; /* IDAW 2/4kB block size control */
- u32 lpm : 8; /* logical path mask */
- u32 ils : 1; /* incorrect length */
- u32 zero : 6; /* reserved zeros */
- u32 orbx : 1; /* ORB extension control */
- u32 cpa; /* channel program address */
-} __attribute__ ((packed, aligned(4)));
-
-/*
- * transport-mode operation request block
- */
-struct tm_orb {
- u32 intparm;
- u32 key:4;
- u32 :9;
- u32 b:1;
- u32 :2;
- u32 lpm:8;
- u32 :7;
- u32 x:1;
- u32 tcw;
- u32 prio:8;
- u32 :8;
- u32 rsvpgm:8;
- u32 :8;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
-} __attribute__ ((packed, aligned(4)));
-
-union orb {
- struct cmd_orb cmd;
- struct tm_orb tm;
-} __attribute__ ((packed, aligned(4)));
+#include <asm/ccwdev.h>
+#include <asm/irq.h>
+#include "css.h"
+#include "orb.h"
struct io_subchannel_private {
union orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
-} __attribute__ ((aligned(8)));
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+} __aligned(8);
+
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
-#define to_io_private(n) ((struct io_subchannel_private *)n->private)
-#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
-#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
#define MAX_CIWS 8
/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+ IO_DONE,
+ IO_RUNNING,
+ IO_STATUS_ERROR,
+ IO_PATH_ERROR,
+ IO_REJECTED,
+ IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ */
+struct ccw_request {
+ struct ccw1 *cp;
+ unsigned long timeout;
+ u16 maxretries;
+ u8 lpm;
+ int (*check)(struct ccw_device *, void *);
+ enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+ enum io_status);
+ void (*callback)(struct ccw_device *, void *, int);
+ void *data;
+ unsigned int singlepath:1;
+ /* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
+ u16 mask;
+ u16 retries;
+ int drc;
+} __attribute__((packed));
+
+/*
* sense-id response buffer layout
*/
struct senseid {
@@ -82,31 +102,55 @@ struct senseid {
struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed, aligned(4)));
+enum cdev_todo {
+ CDEV_TODO_NOTHING,
+ CDEV_TODO_ENABLE_CMF,
+ CDEV_TODO_REBIND,
+ CDEV_TODO_REGISTER,
+ CDEV_TODO_UNREG,
+ CDEV_TODO_UNREG_EVAL,
+};
+
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
int state; /* device state */
atomic_t onoff;
- unsigned long registered;
struct ccw_dev_id dev_id; /* device id */
struct subchannel_id schid; /* subchannel number */
- u8 imask; /* lpm mask for SNID/SID/SPGID */
- int iretry; /* retry counter SNID/SID/SPGID */
+ struct ccw_request req; /* internal I/O request */
+ int iretry;
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
+ u8 path_gone_mask; /* mask of paths, that became unavailable */
+ u8 path_new_mask; /* mask of paths, that became available */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
unsigned int pgroup:1; /* do path grouping */
unsigned int force:1; /* allow forced online */
+ unsigned int mpath:1; /* do multipathing */
} __attribute__ ((packed)) options;
struct {
- unsigned int pgid_single:1; /* use single path for Set PGID */
unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
- unsigned int intretry:1; /* retry internal operation */
+ unsigned int fake_irb:2; /* deliver faked irb */
+ unsigned int resuming:1; /* recognition while resume */
+ unsigned int pgroup:1; /* pathgroup is set up */
+ unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
+ unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -114,32 +158,17 @@ struct ccw_device_private {
struct senseid senseid; /* SenseID info */
struct pgid pgid[8]; /* path group IDs per chpid*/
struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
- struct work_struct kick_work;
+ struct work_struct todo_work;
+ enum cdev_todo todo;
wait_queue_head_t wait_q;
struct timer_list timer;
void *cmb; /* measurement information */
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ enum interruption_class int_class;
};
-static inline int ssch(struct subchannel_id schid, union orb *addr)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode = -EIO;
-
- asm volatile(
- " ssch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc", "memory");
- return ccode;
-}
-
static inline int rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
@@ -155,21 +184,6 @@ static inline int rsch(struct subchannel_id schid)
return ccode;
}
-static inline int csch(struct subchannel_id schid)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode;
-
- asm volatile(
- " csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
- return ccode;
-}
-
static inline int hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 75926279263..4d80fc67a06 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
/*
* TPI info structure
@@ -23,21 +25,6 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(struct subchannel_id schid, struct schib *addr)
-{
- register struct subchannel_id reg1 asm ("1") = schid;
- int ccode;
-
- asm volatile(
- " stsch 0(%3)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode), "=m" (*addr)
- : "d" (reg1), "a" (addr)
- : "cc");
- return ccode;
-}
-
static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
@@ -102,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
return ccode;
}
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
static inline int tpi(struct tpi_info *addr)
{
int ccode;
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index 17da9ab932e..358ee16d10a 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -42,7 +42,7 @@
* size_t size;
*
* size = itcw_calc_size(1, 2, 0);
- * buffer = kmalloc(size, GFP_DMA);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
* if (!buffer)
* return -ENOMEM;
* itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
{
size_t len;
+ int cross_count;
/* Main data. */
len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
}
+
/* Maximum required alignment padding. */
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
- /* Maximum padding for structures that may not cross 4k boundary. */
- if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
- len += max(max_tidaws, intrg_max_tidaws) *
- sizeof(struct tidaw) - 1;
+
+ /* TIDAW lists may not cross a 4k boundary. To cross a
+ * boundary we need to add a TTIC TIDAW. We need to reserve
+ * one additional TIDAW for a TTIC that we may need to add due
+ * to the placement of the data chunk in memory, and a further
+ * TIDAW for each page boundary that the TIDAW list may cross
+ * due to it's own size.
+ */
+ if (max_tidaws) {
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ if (intrg_max_tidaws) {
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
return len;
}
EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
void *chunk;
addr_t start;
addr_t end;
+ int cross_count;
/* Check for 2G limit. */
start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
if (IS_ERR(chunk))
return chunk;
itcw = chunk;
- itcw->max_tidaws = max_tidaws;
- itcw->intrg_max_tidaws = intrg_max_tidaws;
+ /* allow for TTIC tidaws that may be needed to cross a page boundary */
+ cross_count = 0;
+ if (max_tidaws)
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->max_tidaws = max_tidaws + cross_count;
+ cross_count = 0;
+ if (intrg_max_tidaws)
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
/* Main TCW. */
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
/* Data TIDAL. */
if (max_tidaws > 0) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
- max_tidaws, 16, 1);
+ itcw->max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
/* Interrogate data TIDAL. */
if (intrg && (intrg_max_tidaws > 0)) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
- intrg_max_tidaws, 16, 1);
+ itcw->intrg_max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
* available space.
*
- * Note: the tidaw-list is assumed to be contiguous with no ttics. The
- * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
*/
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
{
+ struct tidaw *following;
+
if (itcw->num_tidaws >= itcw->max_tidaws)
return ERR_PTR(-ENOSPC);
+ /*
+ * Is the tidaw, which follows the one we are about to fill, on the next
+ * page? Then we have to insert a TTIC tidaw first, that points to the
+ * tidaw on the new page.
+ */
+ following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+ + itcw->num_tidaws + 1;
+ if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+ tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+ TIDAW_FLAGS_TTIC, following, 0);
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ }
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
}
EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 00000000000..7a640530e7f
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,91 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:4;
+ u32 compat1:1;
+ u32 compat2:1;
+ u32:21;
+ u32 x:1;
+ u32 aob;
+ u32 css_prio:8;
+ u32:8;
+ u32 scm_prio:8;
+ u32:8;
+ u32:29;
+ u32 fmt:3;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+ struct eadm_orb eadm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb811438..a563e4c0059 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/qdio.h
- *
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -13,16 +11,10 @@
#include <asm/debug.h>
#include "chsc.h"
-#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
-#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
-
-/*
- * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
- * till next initiative to give transmitted skbs back to the stack is too long.
- * Therefore polling is started in case of multicast queue is filled more
- * than 50 percent.
- */
-#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
+#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
@@ -42,6 +34,7 @@ enum qdio_irq_states {
#define SLSB_STATE_NOT_INIT 0x0
#define SLSB_STATE_EMPTY 0x1
#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_PENDING 0x3
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_TYPE_INPUT 0x0
@@ -65,6 +58,8 @@ enum qdio_irq_states {
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
#define SLSB_P_OUTPUT_EMPTY \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
#define SLSB_CU_OUTPUT_PRIMED \
(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
#define SLSB_P_OUTPUT_HALTED \
@@ -82,14 +77,12 @@ enum qdio_irq_states {
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
-/* qdio adapter-characteristics-1 flag */
-#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
-#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
-#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
-#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
-#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
-#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
-#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
+/* SIGA flags */
+#define QDIO_SIGA_WRITE 0x00
+#define QDIO_SIGA_READ 0x01
+#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEQ 0x04
+#define QDIO_SIGA_QEBSM_FLAG 0x80
#ifdef CONFIG_64BIT
static inline int do_sqbs(u64 token, unsigned char state, int queue,
@@ -142,97 +135,102 @@ struct siga_flag {
u8 input:1;
u8 output:1;
u8 sync:1;
- u8 no_sync_ti:1;
- u8 no_sync_out_ti:1;
- u8 no_sync_out_pci:1;
- u8:2;
+ u8 sync_after_ai:1;
+ u8 sync_out_after_pci:1;
+ u8:3;
} __attribute__ ((packed));
-struct chsc_ssqd_area {
- struct chsc_header request;
- u16:10;
- u8 ssid:2;
- u8 fmt:4;
- u16 first_sch;
- u16:16;
- u16 last_sch;
- u32:32;
- struct chsc_header response;
- u32:32;
- struct qdio_ssqd_desc qdio_ssqd;
-} __attribute__ ((packed));
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int outbound_queue_full;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+ unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+ /*
+ * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+ * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+ * aka 127 SBALs found.
+ */
+ unsigned int nr_sbals[8];
+ unsigned int nr_sbal_error;
+ unsigned int nr_sbal_nop;
+ unsigned int nr_sbal_total;
+};
-struct scssc_area {
- struct chsc_header request;
- u16 operation_code;
- u16:16;
- u32:32;
- u32:32;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
- u32 ks:4;
- u32 kc:4;
- u32:21;
- u32 isc:3;
- u32 word_with_d_bit;
- u32:32;
- struct subchannel_id schid;
- u32 reserved[1004];
- struct chsc_header response;
- u32:32;
-} __attribute__ ((packed));
+enum qdio_queue_irq_states {
+ QDIO_QUEUE_IRQS_DISABLED,
+};
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
-
/* first ACK'ed buffer */
int ack_start;
-
/* how much sbals are acknowledged with qebsm */
int ack_count;
-
/* last time of noticing incoming data */
u64 timestamp;
+ /* upper-layer polling flag */
+ unsigned long queue_irq_state;
+ /* callback to start upper-layer polling */
+ void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};
struct qdio_output_q {
/* PCIs are enabled for the queue */
int pci_out_enabled;
-
- /* IQDIO: output multiple buffers (enhanced SIGA) */
- int use_enh_siga;
-
+ /* cq: use asynchronous output buffers */
+ int use_cq;
+ /* cq: aobs used for particual SBAL */
+ struct qaob **aobs;
+ /* cq: sbal state related to asynchronous operation */
+ struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */
struct timer_list timer;
+ /* used SBALs before tasklet schedule */
+ int scan_threshold;
};
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
struct qdio_q {
struct slsb slsb;
+
union {
struct qdio_input_q in;
struct qdio_output_q out;
} u;
- /* queue number */
- int nr;
-
- /* bitmask of queue number */
- int mask;
-
- /* input or output queue */
- int is_input_q;
-
- /* list of thinint input queues */
- struct list_head entry;
-
- /* upper-layer program handler */
- qdio_handler_t (*handler);
-
/*
* inbound: next buffer the program should check for
- * outbound: next buffer to check for having been processed
- * by the card
+ * outbound: next buffer to check if adapter processed it
*/
int first_to_check;
@@ -245,20 +243,38 @@ struct qdio_q {
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
- struct qdio_irq *irq_ptr;
- struct tasklet_struct tasklet;
-
/* error condition during a data transfer */
unsigned int qdio_error;
- struct sl *sl;
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ /* last scan of the queue */
+ u64 timestamp;
+
+ struct tasklet_struct tasklet;
+ struct qdio_queue_perf_stat q_stats;
+
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+ /* queue number */
+ int nr;
+
+ /* bitmask of queue number */
+ int mask;
+
+ /* input or output queue */
+ int is_input_q;
+
+ /* list of thinint input queues */
+ struct list_head entry;
+
+ /* upper-layer program handler */
+ qdio_handler_t (*handler);
+ struct dentry *debugfs_q;
+ struct qdio_irq *irq_ptr;
+ struct sl *sl;
/*
- * Warning: Leave this member at the end so it won't be cleared in
- * qdio_fill_qs. A page is allocated under this pointer and used for
- * slib and sl. slib is 2048 bytes big and sl points to offset
- * PAGE_SIZE / 2.
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -267,6 +283,8 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
+ struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -284,13 +302,10 @@ struct qdio_irq {
struct ciw aqueue;
struct qdio_ssqd_desc ssqd_desc;
-
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
- /*
- * Warning: Leave these members together at the end so they won't be
- * cleared in qdio_setup_irq.
- */
+ int perf_stat_enabled;
+
struct qdr *qdr;
unsigned long chsc_page;
@@ -299,6 +314,7 @@ struct qdio_irq {
debug_info_t *debug_area;
struct mutex setup_mutex;
+ struct qdio_dev_perf_stat perf_stat;
};
/* helper functions */
@@ -309,6 +325,21 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
+#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr) \
+({ \
+ struct qdio_irq *qdev = (__q)->irq_ptr; \
+ if (qdev->perf_stat_enabled) \
+ (qdev->perf_stat.__attr)++; \
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+ q->q_stats.nr_sbal_error += count;
+ q->q_stats.nr_sbal_total += count;
+}
+
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
@@ -316,30 +347,24 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-static inline unsigned long long get_usecs(void)
-{
- return monotonic_clock() >> 12;
-}
-
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
-#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
-#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
-#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
-
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
@@ -350,16 +375,14 @@ static inline unsigned long long get_usecs(void)
#define sub_buf(bufnr, dec) \
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
-/* prototypes for thin interrupt */
-void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
- int auto_ack);
-void qdio_check_outbound_after_thinint(struct qdio_q *q);
-int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_handler(struct qdio_q *q);
-void qdio_stop_polling(struct qdio_q *q);
-int qdio_siga_sync_q(struct qdio_q *q);
+#define queue_irqs_enabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+extern u64 last_ai_time;
+
+/* prototypes for thin interrupt */
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -370,6 +393,8 @@ int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
@@ -391,5 +416,10 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b34f86..f1f3baa8e6e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,14 +1,13 @@
/*
- * drivers/s390/cio/qdio_debug.c
- *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <asm/qdio.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -17,15 +16,52 @@ debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
-#define MAX_DEBUGFS_QUEUES 32
-static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
-static DEFINE_MUTEX(debugfs_mutex);
-#define QDIO_DEBUGFS_NAME_LEN 40
+#define QDIO_DEBUGFS_NAME_LEN 10
+#define QDIO_DBF_NAME_LEN 20
+
+struct qdio_dbf_entry {
+ char dbf_name[QDIO_DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+ struct qdio_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+ return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+ struct qdio_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
{
- char text[20];
+ char text[QDIO_DBF_NAME_LEN];
+ struct qdio_dbf_entry *new_entry;
DBF_EVENT("qfmt:%1d", init_data->q_format);
DBF_HEX(init_data->adapter_name, 8);
@@ -38,17 +74,39 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
DBF_HEX(&init_data->input_handler, sizeof(void *));
DBF_HEX(&init_data->output_handler, sizeof(void *));
DBF_HEX(&init_data->int_parm, sizeof(long));
- DBF_HEX(&init_data->flags, sizeof(long));
DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
- snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
- irq_ptr->debug_area = debug_register(text, 2, 1, 16);
- debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
- debug_set_level(irq_ptr->debug_area, DBF_WARN);
- DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+ dev_name(&init_data->cdev->dev));
+ irq_ptr->debug_area = qdio_get_dbf_entry(text);
+ if (irq_ptr->debug_area)
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+ else {
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ if (!irq_ptr->debug_area)
+ return -ENOMEM;
+ if (debug_register_view(irq_ptr->debug_area,
+ &debug_hex_ascii_view)) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+ if (!new_entry) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ new_entry->dbf_info = irq_ptr->debug_area;
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qdio_dbf_list);
+ mutex_unlock(&qdio_dbf_list_mutex);
+ }
+ return 0;
}
static int qstat_show(struct seq_file *m, void *v)
@@ -60,24 +118,33 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
- seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d\n", q->first_to_check);
- seq_printf(m, "last_move: %d\n", q->last_move);
- seq_printf(m, "polling: %d\n", q->u.in.polling);
- seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
- seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
- seq_printf(m, "slsb buffer states:\n");
+ seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
+ q->timestamp, last_ai_time);
+ seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
+ atomic_read(&q->nr_buf_used),
+ q->first_to_check, q->last_move);
+ if (q->is_input_q) {
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start,
+ q->u.in.ack_count);
+ seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
+ *(u32 *)q->irq_ptr->dsci,
+ test_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state));
+ }
+ seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
- qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state, 0);
+ debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
+ case SLSB_P_OUTPUT_PENDING:
+ seq_printf(m, "P");
+ break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
@@ -105,73 +172,142 @@ static int qstat_show(struct seq_file *m, void *v)
}
seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
- return 0;
-}
-
-static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
- size_t count, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct qdio_q *q = seq->private;
- if (!q)
+ seq_printf(m, "\nSBAL statistics:");
+ if (!q->irq_ptr->perf_stat_enabled) {
+ seq_printf(m, " disabled\n");
return 0;
+ }
- if (q->is_input_q)
- xchg(q->irq_ptr->dsci, 1);
- local_bh_disable();
- tasklet_schedule(&q->tasklet);
- local_bh_enable();
- return count;
+ seq_printf(m, "\n1 2.. 4.. 8.. "
+ "16.. 32.. 64.. 127\n");
+ for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+ seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+ seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
+ q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+ q->q_stats.nr_sbal_total);
+ return 0;
}
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
-static void remove_debugfs_entry(struct qdio_q *q)
+static const struct file_operations debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qstat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound queue full",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial",
+ "Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
int i;
- for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
- if (!debugfs_queues[i])
- continue;
- if (debugfs_queues[i]->d_inode->i_private == q) {
- debugfs_remove(debugfs_queues[i]);
- debugfs_queues[i] = NULL;
- }
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_irq *irq_ptr = seq->private;
+ struct qdio_q *q;
+ unsigned long val;
+ int ret, i;
+
+ if (!irq_ptr)
+ return 0;
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ for_each_input_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ for_each_output_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
}
+ return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qperf_show,
+ file_inode(filp)->i_private);
}
-static struct file_operations debugfs_fops = {
+static const struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
- .open = qstat_seq_open,
+ .open = qperf_seq_open,
.read = seq_read,
- .write = qstat_seq_write,
+ .write = qperf_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
-static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
+static void setup_debugfs_entry(struct qdio_q *q)
{
- int i = 0;
char name[QDIO_DEBUGFS_NAME_LEN];
- while (debugfs_queues[i] != NULL) {
- i++;
- if (i >= MAX_DEBUGFS_QUEUES)
- return;
- }
- snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
- dev_name(&cdev->dev),
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
- debugfs_root, q, &debugfs_fops);
- if (IS_ERR(debugfs_queues[i]))
- debugfs_queues[i] = NULL;
+ q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+ q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+ if (IS_ERR(q->debugfs_q))
+ q->debugfs_q = NULL;
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -179,30 +315,40 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ debugfs_root);
+ if (IS_ERR(irq_ptr->debugfs_dev))
+ irq_ptr->debugfs_dev = NULL;
+
+ irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr,
+ &debugfs_perf_fops);
+ if (IS_ERR(irq_ptr->debugfs_perf))
+ irq_ptr->debugfs_perf = NULL;
+
for_each_input_queue(irq_ptr, q, i)
- setup_debugfs_entry(q, cdev);
+ setup_debugfs_entry(q);
for_each_output_queue(irq_ptr, q, i)
- setup_debugfs_entry(q, cdev);
- mutex_unlock(&debugfs_mutex);
+ setup_debugfs_entry(q);
}
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
+ debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
- mutex_unlock(&debugfs_mutex);
+ debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_perf);
+ debugfs_remove(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
- debugfs_root = debugfs_create_dir("qdio_queues", NULL);
+ debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
@@ -218,6 +364,7 @@ int __init qdio_debug_init(void)
void qdio_debug_exit(void)
{
+ qdio_clear_dbf_list();
debugfs_remove(debugfs_root);
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 5d70bd162ae..f33ce857761 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/qdio_debug.h
- *
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
@@ -18,12 +16,6 @@
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_error;
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
- return (level <= dbf_grp->level);
-}
-
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
@@ -39,10 +31,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
- } while (0)
+static inline void DBF_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+ len -= qdio_dbf_setup->buf_size;
+ addr += qdio_dbf_setup->buf_size;
+ }
+}
#define DBF_ERROR(text...) \
do { \
@@ -51,32 +47,39 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_ERROR_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
- } while (0)
-
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+ len -= qdio_dbf_error->buf_size;
+ addr += qdio_dbf_error->buf_size;
+ }
+}
#define DBF_DEV_EVENT(level, device, text...) \
do { \
char debug_buffer[QDIO_DBF_LEN]; \
- if (qdio_dbf_passes(device->debug_area, level)) { \
+ if (debug_level_enabled(device->debug_area, level)) { \
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
debug_text_event(device->debug_area, level, debug_buffer); \
} \
} while (0)
-#define DBF_DEV_HEX(level, device, addr, len) \
- do { \
- debug_event(device->debug_area, level, (void*)(addr), len); \
- } while (0)
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+ int len, int level)
+{
+ while (len > 0) {
+ debug_event(dev->debug_area, level, addr, len);
+ len -= dev->debug_area->buf_size;
+ addr += dev->debug_area->buf_size;
+ }
+}
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
int qdio_debug_init(void);
void qdio_debug_exit(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index accd957454e..848e3b64ea6 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/qdio_main.c
- *
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -13,27 +11,30 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
+#include <asm/ipl.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
-static inline int do_siga_sync(struct subchannel_id schid,
- unsigned int out_mask, unsigned int in_mask)
+static inline int do_siga_sync(unsigned long schid,
+ unsigned int out_mask, unsigned int in_mask,
+ unsigned int fc)
{
- register unsigned long __fc asm ("0") = 2;
- register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
@@ -47,10 +48,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
return cc;
}
-static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+ unsigned int fc)
{
- register unsigned long __fc asm ("0") = 1;
- register struct subchannel_id __schid asm ("1") = schid;
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
@@ -59,7 +61,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
+ : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
return cc;
}
@@ -70,26 +72,27 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
*
- * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
+ * Returns condition code.
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc)
+ unsigned int *bb, unsigned int fc,
+ unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
- int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
+ register unsigned long __aob asm("3") = aob;
+ int cc;
asm volatile(
" siga 0\n"
- "0: ipm %0\n"
+ " ipm %0\n"
" srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
- : : "cc", "memory");
- *bb = ((unsigned int) __fc) >> 31;
+ : "=d" (cc), "+d" (__fc), "+d" (__aob)
+ : "d" (__schid), "d" (__mask)
+ : "cc");
+ *bb = __fc >> 31;
return cc;
}
@@ -98,9 +101,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
- /* not all buffers processed */
- if (ccq == 96 || ccq == 97)
+ /* no buffer processed */
+ if (ccq == 97)
return 1;
+ /* not all buffers processed */
+ if (ccq == 96)
+ return 2;
/* notify devices immediately */
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
return -EIO;
@@ -120,13 +126,10 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- int tmp_count = count, tmp_start = start;
- int nr = q->nr;
- int rc;
- BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
+ qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -134,29 +137,33 @@ again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
rc = qdio_check_ccq(q, ccq);
-
- /* At least one buffer was processed, return and extract the remaining
- * buffers later.
- */
- if ((ccq == 96) && (count != tmp_count)) {
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
- return (count - tmp_count);
- }
+ if (!rc)
+ return count - tmp_count;
if (rc == 1) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
+ if (rc == 2) {
+ qperf_inc(q, eqbs_partial);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ tmp_count);
+ /*
+ * Retry once, if that fails bail out and process the
+ * extracted buffers before trying again.
+ */
+ if (!retried++)
+ goto again;
+ else
+ return count - tmp_count;
}
- return count - tmp_count;
+
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
/**
@@ -180,50 +187,51 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
-
- BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
+ qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
- if (rc == 1) {
+ if (!rc) {
+ WARN_ON_ONCE(tmp_count);
+ return count - tmp_count;
+ }
+
+ if (rc == 1 || rc == 2) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
+ qperf_inc(q, sqbs_partial);
goto again;
}
- if (rc < 0) {
- DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
- DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
- q->handler(q->irq_ptr->cdev,
- QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
- return 0;
- }
- WARN_ON(tmp_count);
- return count - tmp_count;
+
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
}
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
- int auto_ack)
+ int auto_ack, int merge_pending)
{
unsigned char __state = 0;
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
- if (!__state)
+ if (!__state) {
__state = q->slsb.val[bufnr];
- else if (q->slsb.val[bufnr] != __state)
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+ } else if (merge_pending) {
+ if ((q->slsb.val[bufnr] & __state) != __state)
+ break;
+ } else if (q->slsb.val[bufnr] != __state)
break;
bufnr = next_buf(bufnr);
}
@@ -231,10 +239,10 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
return i;
}
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, int auto_ack)
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
{
- return get_buf_states(q, bufnr, state, 1, auto_ack);
+ return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -243,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@@ -263,7 +268,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr,
}
/* set slsb states to initial state */
-void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
@@ -276,24 +281,28 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
QDIO_MAX_BUFFERS_PER_Q);
}
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_SYNC;
int cc;
- if (!need_siga_sync(q))
- return 0;
-
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_sync);
+ qperf_inc(q, siga_sync);
- cc = do_siga_sync(q->irq_ptr->schid, output, input);
- if (cc)
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_sync(schid, output, input, fc);
+ if (unlikely(cc))
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
- return cc;
+ return (cc) ? -EIO : 0;
}
-inline int qdio_siga_sync_q(struct qdio_q *q)
+static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
@@ -301,82 +310,95 @@ inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static inline int qdio_siga_sync_out(struct qdio_q *q)
-{
- return qdio_siga_sync(q, ~0U, 0);
-}
-
-static inline int qdio_siga_sync_all(struct qdio_q *q)
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+ unsigned long aob)
{
- return qdio_siga_sync(q, ~0U, ~0U);
-}
-
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
-{
- unsigned long schid;
- unsigned int fc = 0;
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
- int cc;
+ int retries = 0, cc;
+ unsigned long laob = 0;
- if (q->u.out.use_enh_siga)
- fc = 3;
+ if (q->u.out.use_cq && aob != 0) {
+ fc = QDIO_SIGA_WRITEQ;
+ laob = aob;
+ }
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
- fc |= 0x80;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
}
- else
- schid = *((u32 *)&q->irq_ptr->schid);
-
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc);
+ WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
+ (aob && fc != QDIO_SIGA_WRITEQ));
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
/* hipersocket busy condition */
- if (*busy_bit) {
- WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+ if (unlikely(*busy_bit)) {
+ retries++;
if (!start_time) {
- start_time = get_usecs();
+ start_time = get_tod_clock_fast();
goto again;
}
- if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
return cc;
}
static inline int qdio_siga_input(struct qdio_q *q)
{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_READ;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_in);
+ qperf_inc(q, siga_read);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
- cc = do_siga_input(q->irq_ptr->schid, q->mask);
- if (cc)
+ cc = do_siga_input(schid, q->mask, fc);
+ if (unlikely(cc))
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
- return cc;
+ return (cc) ? -EIO : 0;
}
-/* called from thinint inbound handler */
-void qdio_sync_after_thinint(struct qdio_q *q)
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
{
- if (pci_out_supported(q)) {
- if (need_siga_sync_thinint(q))
- qdio_siga_sync_all(q);
- else if (need_siga_sync_out_thinint(q))
- qdio_siga_sync_out(q);
- } else
+ /* PCI capable outbound queues will also be scanned so sync them too */
+ if (pci_out_supported(q))
+ qdio_siga_sync_all(q);
+ else
+ qdio_siga_sync_q(q);
+}
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ if (need_siga_sync(q))
qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0, 0);
}
-inline void qdio_stop_polling(struct qdio_q *q)
+static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
q->u.in.polling = 0;
- qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+ qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
@@ -387,32 +409,55 @@ inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
-static void announce_buffer_error(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
+{
+ int pos;
+
+ q->q_stats.nr_sbal_total += count;
+ if (count == QDIO_MAX_BUFFERS_MASK) {
+ q->q_stats.nr_sbals[7]++;
+ return;
+ }
+ pos = ilog2(count);
+ q->q_stats.nr_sbals[pos]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, int count)
{
- q->qdio_error |= QDIO_ERROR_SLSB_STATE;
+ unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
+ SLSB_P_OUTPUT_NOT_INIT;
+
+ q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
- (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
- qdio_perf_stat_inc(&perf_stats.outbound_target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
+ (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+ qperf_inc(q, target_full);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
- return;
+ goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].flags & 0xff,
- q->sbal[q->first_to_check]->element[15].flags & 0xff);
+ q->sbal[q->first_to_check]->element[14].sflags,
+ q->sbal[q->first_to_check]->element[15].sflags);
+
+set:
+ /*
+ * Interrupts may be avoided as long as the error is present
+ * so change the buffer state immediately to avoid starvation.
+ */
+ set_buf_states(q, q->first_to_check, state, count);
}
static inline void inbound_primed(struct qdio_q *q, int count)
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
@@ -449,19 +494,16 @@ static inline void inbound_primed(struct qdio_q *q, int count)
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
+ /* need to change ALL buffers to get more interrupts */
+ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
+
+ q->timestamp = get_tod_clock_fast();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -470,107 +512,151 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 1);
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
+ count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
if (!count)
goto out;
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * tiqdio_is_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ break;
case SLSB_P_INPUT_ERROR:
- announce_buffer_error(q, count);
- /* process the buffer, the upper layer will take care of it */
+ process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
}
-int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
bufnr = get_inbound_buffer_frontier(q);
- if ((bufnr != q->last_move) || q->qdio_error) {
+ if (bufnr != q->last_move) {
q->last_move = bufnr;
- if (!need_siga_sync(q) && !pci_out_supported(q))
- q->u.in.timestamp = get_usecs();
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
+ if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
}
-static int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
- /*
- * We need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance.
- */
- qdio_siga_sync_q(q);
-
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
- if (state == SLSB_P_INPUT_PRIMED)
- /* we got something to do */
+
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
+ /* more work coming */
return 0;
- /* on VM, we don't poll, so the q is always done here */
- if (need_siga_sync(q) || pci_out_supported(q))
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
return 1;
/*
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
+ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
- q->first_to_check);
+ } else
return 0;
+}
+
+static inline int contains_aobs(struct qdio_q *q)
+{
+ return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+ unsigned char state = 0;
+ int j, b = start;
+
+ if (!contains_aobs(q))
+ return;
+
+ for (j = 0; j < count; ++j) {
+ get_buf_state(q, b, &state, 0);
+ if (state == SLSB_P_OUTPUT_PENDING) {
+ struct qaob *aob = q->u.out.aobs[b];
+ if (aob == NULL)
+ continue;
+
+ q->u.out.sbal_state[b].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[b] = NULL;
+ } else if (state == SLSB_P_OUTPUT_EMPTY) {
+ q->u.out.sbal_state[b].aob = NULL;
+ }
+ b = next_buf(b);
+ }
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+ int bufnr)
+{
+ unsigned long phys_aob = 0;
+
+ if (!q->use_cq)
+ goto out;
+
+ if (!q->aobs[bufnr]) {
+ struct qaob *aob = qdio_allocate_aob();
+ q->aobs[bufnr] = aob;
}
+ if (q->aobs[bufnr]) {
+ q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
+ q->sbal_state[bufnr].aob = q->aobs[bufnr];
+ q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+ phys_aob = virt_to_phys(q->aobs[bufnr]);
+ WARN_ON_ONCE(phys_aob & 0xFF);
+ }
+
+out:
+ return phys_aob;
}
-void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
@@ -582,13 +668,16 @@ void qdio_kick_handler(struct qdio_q *q)
count = sub_buf(end, start);
if (q->is_input_q) {
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
+ qperf_inc(q, inbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
} else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
}
+ qdio_handle_aobs(q, start, count);
+
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
@@ -599,27 +688,34 @@ void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
-again:
+ qperf_inc(q, tasklet_inbound);
+
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
- goto again;
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+ }
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q))
- goto again;
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
}
-/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
@@ -629,11 +725,16 @@ void qdio_inbound_processing(unsigned long data)
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
- if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
- (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
- qdio_siga_sync_q(q);
+ q->timestamp = get_tod_clock_fast();
+
+ if (need_siga_sync(q))
+ if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+ !pci_out_supported(q)) ||
+ (queue_type(q) == QDIO_IQDIO_QFMT &&
+ multicast_outbound(q)))
+ qdio_siga_sync_q(q);
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -641,49 +742,47 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
- return q->first_to_check;
+ goto out;
- count = get_buf_states(q, q->first_to_check, &state, count, 0);
+ count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
if (!count)
- return q->first_to_check;
+ goto out;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+
+ break;
case SLSB_P_OUTPUT_ERROR:
- announce_buffer_error(q, count);
- /* process the buffer, the upper layer will take care of it */
+ process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+ q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
+
+out:
return q->first_to_check;
}
@@ -699,7 +798,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
bufnr = get_outbound_buffer_frontier(q);
- if ((bufnr != q->last_move) || q->qdio_error) {
+ if (bufnr != q->last_move) {
q->last_move = bufnr;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
return 1;
@@ -707,40 +806,52 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
return 0;
}
-static int qdio_kick_outbound_q(struct qdio_q *q)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
{
+ int retries = 0, cc;
unsigned int busy_bit;
- int cc;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_out);
+retry:
+ qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit);
+ cc = qdio_siga_output(q, &busy_bit, aob);
switch (cc) {
case 0:
break;
case 2:
if (busy_bit) {
- DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
- cc |= QDIO_ERROR_SIGA_BUSY;
- } else
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+ cc = -EBUSY;
+ } else {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+ cc = -ENOBUFS;
+ }
break;
case 1:
case 3:
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+ cc = -EIO;
break;
}
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
return cc;
}
static void __qdio_outbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
- BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+ qperf_inc(q, tasklet_outbound);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@@ -749,30 +860,19 @@ static void __qdio_outbound_processing(struct qdio_q *q)
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
goto sched;
- /* bail out for HiperSockets unicast queues */
- if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
- return;
-
- if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
- (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
- goto sched;
-
if (q->u.out.pci_out_enabled)
return;
/*
* Now we know that queue type is either qeth without pci enabled
- * or HiperSockets multicast. Make sure buffer switch from PRIMED to
- * EMPTY is noticed and outbound_handler is called after some time.
+ * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+ * is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
- else {
- if (!timer_pending(&q->u.out.timer)) {
+ else
+ if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
- }
- }
return;
sched:
@@ -797,8 +897,7 @@ void qdio_outbound_timer(unsigned long data)
tasklet_schedule(&q->tasklet);
}
-/* called from thinint inbound tasklet */
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
@@ -811,6 +910,49 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
tasklet_schedule(&out->tasklet);
}
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qperf_inc(q, tasklet_inbound);
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+ qdio_sync_queues(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -838,10 +980,20 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- qdio_perf_stat_inc(&perf_stats.pci_int);
-
- for_each_input_queue(irq_ptr, q, i)
- tasklet_schedule(&q->tasklet);
+ for_each_input_queue(irq_ptr, q, i) {
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
+ tasklet_schedule(&q->tasklet);
+ }
+ }
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
@@ -849,10 +1001,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
for_each_output_queue(irq_ptr, q, i) {
if (qdio_outbound_q_done(q))
continue;
-
- if (!siga_syncs_out_pci(q))
+ if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q);
-
tasklet_schedule(&q->tasklet);
}
}
@@ -862,6 +1012,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
+ int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -875,48 +1026,39 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
dump_stack();
goto no_handler;
}
- q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, irq_ptr->int_parm);
+
+ count = sub_buf(q->first_to_check, q->first_to_kick);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
+ q->nr, q->first_to_kick, count, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ /*
+ * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+ * Therefore we call the LGR detection function here.
+ */
+ lgr_info_log();
}
-static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
- int dstat)
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+ int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
- DBF_ERROR("EQ:ck con");
- goto error;
- }
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
- if (!(dstat & DEV_STAT_DEV_END)) {
- DBF_ERROR("EQ:no dev");
+ if (cstat)
goto error;
- }
-
- if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
- DBF_ERROR("EQ: bad io");
+ if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
- }
- return 0;
+ if (!(dstat & DEV_STAT_DEV_END))
+ goto error;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+ return;
+
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
-
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- return 1;
-}
-
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
- int dstat)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
- if (!qdio_establish_check_errors(cdev, cstat, dstat))
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
}
/* qdio interrupt handler */
@@ -926,27 +1068,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- qdio_perf_stat_inc(&perf_stats.qdio_int);
-
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
}
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
if (IS_ERR(irb)) {
- switch (PTR_ERR(irb)) {
- case -EIO:
- DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- default:
- WARN_ON(1);
- return;
- }
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
}
qdio_irq_check_sense(irq_ptr, irb);
-
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
@@ -954,24 +1090,23 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(cdev, cstat, dstat);
break;
-
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
break;
-
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
if (cstat & SCHN_STAT_PCI) {
qdio_int_handler_pci(irq_ptr);
return;
}
- if ((cstat & ~SCHN_STAT_PCI) || dstat) {
+ if (cstat || dstat)
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
- break;
- }
+ break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@@ -996,30 +1131,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-/**
- * qdio_cleanup - shutdown queues and free data structures
- * @cdev: associated ccw device
- * @how: use halt or clear to shutdown
- *
- * This function calls qdio_shutdown() for @cdev with method @how.
- * and qdio_free(). The qdio_free() return value is ignored since
- * !irq_ptr is already checked.
- */
-int qdio_cleanup(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- int rc;
-
- if (!irq_ptr)
- return -ENODEV;
-
- rc = qdio_shutdown(cdev, how);
-
- qdio_free(cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_cleanup);
-
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1049,7 +1160,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- BUG_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1070,7 +1181,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
- qdio_shutdown_debug_entries(irq_ptr, cdev);
+ qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -1122,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
return -ENODEV;
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
- if (irq_ptr->debug_area != NULL) {
- debug_unregister(irq_ptr->debug_area);
- irq_ptr->debug_area = NULL;
- }
+ irq_ptr->debug_area = NULL;
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1137,28 +1246,6 @@ int qdio_free(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(qdio_free);
/**
- * qdio_initialize - allocate and establish queues for a qdio subchannel
- * @init_data: initialization data
- *
- * This function first allocates queues via qdio_allocate() and on success
- * establishes them via qdio_establish().
- */
-int qdio_initialize(struct qdio_initialize *init_data)
-{
- int rc;
-
- rc = qdio_allocate(init_data);
- if (rc)
- return rc;
-
- rc = qdio_establish(init_data);
- if (rc)
- qdio_free(init_data->cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_initialize);
-
-/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
@@ -1186,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_err;
mutex_init(&irq_ptr->setup_mutex);
- qdio_allocate_dbf(init_data, irq_ptr);
+ if (qdio_allocate_dbf(init_data, irq_ptr))
+ goto out_rel;
/*
* Allocate a page for the chsc calls in qdio_establish.
@@ -1202,7 +1290,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
- WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@@ -1218,6 +1305,26 @@ out_err:
}
EXPORT_SYMBOL_GPL(qdio_allocate);
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q = irq_ptr->input_qs[0];
+ int i, use_cq = 0;
+
+ if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+ use_cq = 1;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (use_cq) {
+ if (qdio_enable_async_operation(&q->u.out) < 0) {
+ use_cq = 0;
+ continue;
+ }
+ } else
+ qdio_disable_async_operation(&q->u.out);
+ }
+ DBF_EVENT("use_cq:%d", use_cq);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
* @init_data: initialization data
@@ -1281,8 +1388,8 @@ int qdio_establish(struct qdio_initialize *init_data)
}
qdio_setup_ssqd_info(irq_ptr);
- DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
- DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
+
+ qdio_detect_hsicq(irq_ptr);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
@@ -1388,7 +1495,9 @@ static inline int buf_in_between(int bufnr, int start, int count)
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int used, diff;
+ int diff;
+
+ qperf_inc(q, inbound_call);
if (!q->u.in.polling)
goto set;
@@ -1419,16 +1528,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
-
- used = atomic_add_return(count, &q->nr_buf_used) - count;
- BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
-
- /* no need to signal as long as the adapter had free buffers */
- if (used)
- return 0;
+ atomic_add(count, &q->nr_buf_used);
if (need_siga_in(q))
return qdio_siga_input(q);
+
return 0;
}
@@ -1442,60 +1546,50 @@ set:
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- unsigned char state;
+ unsigned char state = 0;
int used, rc = 0;
- qdio_perf_stat_inc(&perf_stats.outbound_handler);
+ qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
- BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
- if (callflags & QDIO_FLAG_PCI_OUT)
+ if (used == QDIO_MAX_BUFFERS_PER_Q)
+ qperf_inc(q, outbound_queue_full);
+
+ if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
- else
+ qperf_inc(q, pci_request_int);
+ } else
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- if (multicast_outbound(q))
- rc = qdio_kick_outbound_q(q);
- else
- if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
- (count > 1) &&
- (count <= q->irq_ptr->ssqd_desc.mmwc)) {
- /* exploit enhanced SIGA */
- q->u.out.use_enh_siga = 1;
- rc = qdio_kick_outbound_q(q);
- } else {
- /*
- * One siga-w per buffer required for unicast
- * HiperSockets.
- */
- q->u.out.use_enh_siga = 0;
- while (count--) {
- rc = qdio_kick_outbound_q(q);
- if (rc)
- goto out;
- }
- }
- goto out;
- }
+ unsigned long phys_aob = 0;
- if (need_siga_sync(q)) {
- qdio_siga_sync_q(q);
- goto out;
- }
+ /* One SIGA-W per buffer required for unicast HSI */
+ WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q);
- else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
- qdio_perf_stat_inc(&perf_stats.fast_requeue);
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+ rc = qdio_kick_outbound_q(q, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
+ } else {
+ /* try to fast requeue buffers */
+ get_buf_state(q, prev_buf(bufnr), &state, 0);
+ if (state != SLSB_CU_OUTPUT_PRIMED)
+ rc = qdio_kick_outbound_q(q, 0);
+ else
+ qperf_inc(q, fast_requeue);
}
-out:
- tasklet_schedule(&q->tasklet);
+
+ /* in case of SIGA errors we must process the error immediately */
+ if (used >= q->u.out.scan_threshold || rc)
+ tasklet_schedule(&q->tasklet);
+ else
+ /* free the SBALs in case of no further traffic */
+ if (!timer_pending(&q->u.out.timer))
+ mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
@@ -1508,32 +1602,24 @@ out:
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (callflags & QDIO_FLAG_SYNC_INPUT)
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
- else
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+ "do%02x b:%02x c:%02x", callflags, bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
- return -EBUSY;
-
+ return -EIO;
+ if (!count)
+ return 0;
if (callflags & QDIO_FLAG_SYNC_INPUT)
return handle_inbound(irq_ptr->input_qs[q_nr],
callflags, bufnr, count);
@@ -1544,35 +1630,241 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
}
EXPORT_SYMBOL_GPL(do_QDIO);
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - success
+ * 1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ clear_nonshared_ind(irq_ptr);
+ qdio_stop_polling(q);
+ clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (test_nonshared_ind(irq_ptr))
+ goto rescan;
+ if (!qdio_inbound_q_done(q))
+ goto rescan;
+ return 0;
+
+rescan:
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ * < 0 - error
+ * = 0 - no new buffers found
+ * > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+ int *error)
+{
+ struct qdio_q *q;
+ int start, end;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ /*
+ * Cannot rely on automatic sync after interrupt since queues may
+ * also be examined without interrupt.
+ */
+ if (need_siga_sync(q))
+ qdio_sync_queues(q);
+
+ /* check the PCI capable outbound queues. */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return 0;
+
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+ start = q->first_to_kick;
+ end = q->first_to_check;
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_kick = end;
+ q->qdio_error = 0;
+ return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - interrupts were already disabled
+ * 1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
static int __init init_QDIO(void)
{
int rc;
- rc = qdio_setup_init();
+ rc = qdio_debug_init();
if (rc)
return rc;
+ rc = qdio_setup_init();
+ if (rc)
+ goto out_debug;
rc = tiqdio_allocate_memory();
if (rc)
goto out_cache;
- rc = qdio_debug_init();
- if (rc)
- goto out_ti;
- rc = qdio_setup_perf_stats();
- if (rc)
- goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
- goto out_perf;
+ goto out_ti;
return 0;
-out_perf:
- qdio_remove_perf_stats();
-out_debug:
- qdio_debug_exit();
out_ti:
tiqdio_free_memory();
out_cache:
qdio_setup_exit();
+out_debug:
+ qdio_debug_exit();
return rc;
}
@@ -1580,9 +1872,8 @@ static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_remove_perf_stats();
- qdio_debug_exit();
qdio_setup_exit();
+ qdio_debug_exit();
}
module_init(init_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 136d0f0b1e9..00000000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.c
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/ccwdev.h>
-
-#include "cio.h"
-#include "css.h"
-#include "device.h"
-#include "ioasm.h"
-#include "chsc.h"
-#include "qdio_debug.h"
-#include "qdio_perf.h"
-
-int qdio_performance_stats;
-struct qdio_perf_stats perf_stats;
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *qdio_perf_pde;
-#endif
-
-inline void qdio_perf_stat_inc(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_inc(count);
-}
-
-inline void qdio_perf_stat_dec(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_dec(count);
-}
-
-/*
- * procfs functions
- */
-static int qdio_perf_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.qdio_int));
- seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.pci_int));
- seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.thin_int));
- seq_printf(m, "\n");
- seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_inbound));
- seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_outbound));
- seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.tasklet_thinint),
- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
- seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound),
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
- seq_printf(m, "\n");
- seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_in));
- seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_out));
- seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_sync));
- seq_printf(m, "\n");
- seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.inbound_handler));
- seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_handler));
- seq_printf(m, "\n");
- seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
- (long)atomic_long_read(&perf_stats.fast_requeue));
- seq_printf(m, "Number of outbound target full condition\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_target_full));
- seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
- seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_stop_polling));
- seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
- seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
- seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
- seq_printf(m, "\n");
- return 0;
-}
-static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, qdio_perf_proc_show, NULL);
-}
-
-static struct file_operations qdio_perf_proc_fops = {
- .owner = THIS_MODULE,
- .open = qdio_perf_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*
- * sysfs functions
- */
-static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
-}
-
-static ssize_t qdio_perf_stats_store(struct bus_type *bus,
- const char *buf, size_t count)
-{
- unsigned long i;
-
- if (strict_strtoul(buf, 16, &i) != 0)
- return -EINVAL;
- if ((i != 0) && (i != 1))
- return -EINVAL;
- if (i == qdio_performance_stats)
- return count;
-
- qdio_performance_stats = i;
- /* reset performance statistics */
- if (i == 0)
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- return count;
-}
-
-static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
- qdio_perf_stats_store);
-
-int __init qdio_setup_perf_stats(void)
-{
- int rc;
-
- rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
- if (rc)
- return rc;
-
-#ifdef CONFIG_PROC_FS
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
- NULL, &qdio_perf_proc_fops);
-#endif
- return 0;
-}
-
-void qdio_remove_perf_stats(void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("qdio_perf", NULL);
-#endif
- bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index 7821ac4fa51..00000000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.h
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#ifndef QDIO_PERF_H
-#define QDIO_PERF_H
-
-#include <linux/types.h>
-#include <linux/device.h>
-#include <asm/atomic.h>
-
-struct qdio_perf_stats {
- /* interrupt handler calls */
- atomic_long_t qdio_int;
- atomic_long_t pci_int;
- atomic_long_t thin_int;
-
- /* tasklet runs */
- atomic_long_t tasklet_inbound;
- atomic_long_t tasklet_outbound;
- atomic_long_t tasklet_thinint;
- atomic_long_t tasklet_thinint_loop;
- atomic_long_t thinint_inbound;
- atomic_long_t thinint_inbound_loop;
- atomic_long_t thinint_inbound_loop2;
-
- /* signal adapter calls */
- atomic_long_t siga_out;
- atomic_long_t siga_in;
- atomic_long_t siga_sync;
-
- /* misc */
- atomic_long_t inbound_handler;
- atomic_long_t outbound_handler;
- atomic_long_t fast_requeue;
- atomic_long_t outbound_target_full;
-
- /* for debugging */
- atomic_long_t debug_tl_out_timer;
- atomic_long_t debug_stop_polling;
- atomic_long_t debug_eqbs_all;
- atomic_long_t debug_eqbs_incomplete;
- atomic_long_t debug_sqbs_all;
- atomic_long_t debug_sqbs_incomplete;
-};
-
-extern struct qdio_perf_stats perf_stats;
-extern int qdio_performance_stats;
-
-int qdio_setup_perf_stats(void);
-void qdio_remove_perf_stats(void);
-
-extern void qdio_perf_stat_inc(atomic_long_t *count);
-extern void qdio_perf_stat_dec(atomic_long_t *count);
-
-#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce..f5f4a91fab4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,13 +1,12 @@
/*
- * driver/s390/cio/qdio_setup.c
- *
* qdio queue initialization
*
- * Copyright (C) IBM Corp. 2008
+ * Copyright IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -19,6 +18,19 @@
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+ kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
/*
* qebsm is only available under 64bit but the adapter sets the feature
@@ -48,7 +60,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
if (!irq_ptr)
return;
- WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +93,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
- WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
- WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
@@ -109,10 +118,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- /* must be cleared by every qdio_establish */
- memset(q, 0, ((char *)&q->slib) - ((char *)q));
- memset(q->slib, 0, PAGE_SIZE);
+ struct slib *slib = q->slib;
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
@@ -129,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
- WARN_ON((unsigned long)q->sbal[j] & 0xff);
- }
/* fill in slib */
if (i > 0) {
@@ -147,11 +156,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
-
- DBF_EVENT("sl-slsb-sbal");
- DBF_HEX(q->sl, sizeof(void *));
- DBF_HEX(&q->slsb, sizeof(void *));
- DBF_HEX(q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -160,29 +164,39 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array =
+ qdio_init->output_sbal_state_array;
int i;
for_each_input_queue(irq_ptr, q, i) {
- DBF_EVENT("in-q:%1d", i);
+ DBF_EVENT("inq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
+
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
- if (is_thinint_irq(irq_ptr))
+ if (is_thinint_irq(irq_ptr)) {
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
- else
+ } else {
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
+ }
}
for_each_output_queue(irq_ptr, q, i) {
DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+ q->u.out.sbal_state = output_sbal_state_array;
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
q->is_input_q = 0;
+ q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
@@ -201,14 +215,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
irq_ptr->siga_flag.output = 1;
if (qdioac & AC1_SIGA_SYNC_NEEDED)
irq_ptr->siga_flag.sync = 1;
- if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
- irq_ptr->siga_flag.no_sync_ti = 1;
- if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
- irq_ptr->siga_flag.no_sync_out_pci = 1;
-
- if (irq_ptr->siga_flag.no_sync_out_pci &&
- irq_ptr->siga_flag.no_sync_ti)
- irq_ptr->siga_flag.no_sync_out_ti = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+ irq_ptr->siga_flag.sync_after_ai = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+ irq_ptr->siga_flag.sync_out_after_pci = 1;
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -244,40 +254,31 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
int rc;
DBF_EVENT("getssqd:%4x", schid->sch_no);
- if (irq_ptr != NULL)
- ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
- else
+ if (!irq_ptr) {
ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
- memset(ssqd, 0, PAGE_SIZE);
-
- ssqd->request = (struct chsc_header) {
- .length = 0x0010,
- .code = 0x0024,
- };
- ssqd->first_sch = schid->sch_no;
- ssqd->last_sch = schid->sch_no;
- ssqd->ssid = schid->ssid;
-
- if (chsc(ssqd))
- return -EIO;
- rc = chsc_error_from_response(ssqd->response.code);
+ if (!ssqd)
+ return -ENOMEM;
+ } else {
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ }
+
+ rc = chsc_ssqd(*schid, ssqd);
if (rc)
- return rc;
+ goto out;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != schid->sch_no))
- return -EINVAL;
-
- if (irq_ptr != NULL)
- memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
- sizeof(struct qdio_ssqd_desc));
- else {
- memcpy(data, &ssqd->qdio_ssqd,
- sizeof(struct qdio_ssqd_desc));
+ rc = -EINVAL;
+
+ if (!rc)
+ memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+ if (!irq_ptr)
free_page((unsigned long)ssqd);
- }
- return 0;
+
+ return rc;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
@@ -285,7 +286,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
unsigned char qdioac;
int rc;
- rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
+ rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
if (rc) {
DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%x", rc);
@@ -297,7 +298,8 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
- DBF_EVENT("qdioac:%4x", qdioac);
+ DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+ DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
@@ -319,6 +321,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
+ if (q->u.out.use_cq) {
+ int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+ struct qaob *aob = q->u.out.aobs[n];
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
+ }
+
+ qdio_disable_async_operation(&q->u.out);
+ }
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
@@ -341,10 +356,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
- irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
- irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -353,12 +368,13 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->ac = qdio_init->qdr_ac;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
- irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -374,6 +390,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+ irq_ptr->qib.rflags |= init_data->qib_rflags;
+
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
@@ -390,16 +408,23 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
- memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+ irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+ irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
-
- irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
@@ -446,7 +471,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
- "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n",
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -458,29 +483,65 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
(irq_ptr->siga_flag.sync) ? "S" : " ",
- (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ",
- (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ",
- (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
+ (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+ (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
printk(KERN_INFO "%s", s);
}
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+ outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+ GFP_ATOMIC);
+ if (!outq->aobs) {
+ outq->use_cq = 0;
+ return -ENOMEM;
+ }
+ outq->use_cq = 1;
+ return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+ kfree(q->aobs);
+ q->aobs = NULL;
+ q->use_cq = 0;
+}
+
int __init qdio_setup_init(void)
{
+ int rc;
+
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
+ qdio_aob_cache = kmem_cache_create("qdio_aob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0,
+ NULL);
+ if (!qdio_aob_cache) {
+ rc = -ENOMEM;
+ goto free_qdio_q_cache;
+ }
+
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
- return 0;
+ rc = 0;
+out:
+ return rc;
+free_qdio_q_cache:
+ kmem_cache_destroy(qdio_q_cache);
+ goto out;
}
void qdio_setup_exit(void)
{
+ kmem_cache_destroy(qdio_aob_cache);
kmem_cache_destroy(qdio_q_cache);
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d011a78..5d06253c2a7 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,15 +1,13 @@
/*
- * linux/drivers/s390/cio/thinint_qdio.c
- *
- * thin interrupt support for qdio
- *
- * Copyright 2000-2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
-#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
@@ -19,7 +17,6 @@
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -29,36 +26,27 @@
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
-/* list of thin interrupt input queues */
-static LIST_HEAD(tiq_list);
-DEFINE_MUTEX(tiq_list_lock);
-
-/* adapter local summary indicator */
-static unsigned char *tiqdio_alsi;
-
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
-static struct indicator_t *q_indicators;
-static void tiqdio_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
+
+/* Adapter interrupt definitions */
+static void tiqdio_thinint_handler(struct airq_struct *airq);
-static int css_qdio_omit_svs;
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
-static inline unsigned long do_clear_global_summary(void)
-{
- register unsigned long __fn asm("1") = 3;
- register unsigned long __tmp asm("2");
- register unsigned long __time asm("3");
-
- asm volatile(
- " .insn rre,0xb2650000,2,0"
- : "+d" (__fn), "=d" (__tmp), "=d" (__time));
- return __time;
-}
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
@@ -89,223 +77,167 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
- int i;
-
- /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
- if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
- css_qdio_omit_svs = 1;
-
mutex_lock(&tiq_list_lock);
- for_each_input_queue(irq_ptr, q, i)
- list_add_rcu(&q->entry, &tiq_list);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1);
+ xchg(irq_ptr->dsci, 1 << 7);
}
-/*
- * we cannot stop the tiqdio tasklet here since it is for all
- * thinint qdio devices and it must run as long as there is a
- * thinint device left
- */
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
- int i;
- for (i = 0; i < irq_ptr->nr_input_qs; i++) {
- q = irq_ptr->input_qs[i];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
- continue;
+ q = irq_ptr->input_qs[0];
+ /* if establish triggered an error */
+ if (!q || !q->entry.prev || !q->entry.next)
+ return;
- mutex_lock(&tiq_list_lock);
- list_del_rcu(&q->entry);
- mutex_unlock(&tiq_list_lock);
- synchronize_rcu();
- }
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
}
-static inline int tiqdio_inbound_q_done(struct qdio_q *q)
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
{
- unsigned char state = 0;
-
- if (!atomic_read(&q->nr_buf_used))
- return 1;
-
- qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
-
- if (state == SLSB_P_INPUT_PRIMED)
- /* more work coming */
- return 0;
- return 1;
+ return irq_ptr->nr_input_qs > 1;
}
-static inline int shared_ind(struct qdio_irq *irq_ptr)
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+static inline int shared_ind(struct qdio_irq *irq_ptr)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
-
- /*
- * Maybe we have work on our outbound queues... at least
- * we have to check the PCI capable queues.
- */
- qdio_check_outbound_after_thinint(q);
+ return references_shared_dsci(irq_ptr) ||
+ has_multiple_inq_on_dsci(irq_ptr);
+}
- if (!qdio_inbound_q_moved(q))
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
return;
-
- qdio_kick_handler(q);
-
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
+ if (shared_ind(irq_ptr))
+ return;
+ xchg(irq_ptr->dsci, 0);
}
-void tiqdio_inbound_processing(unsigned long data)
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q = (struct qdio_q *)data;
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ if (shared_ind(irq_ptr))
+ return 0;
+ if (*irq_ptr->dsci)
+ return 1;
+ else
+ return 0;
+}
- __tiqdio_inbound_processing(q);
+static inline u32 clear_shared_ind(void)
+{
+ if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+ return 0;
+ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
-/* check for work on all inbound thinint queues */
-static void tiqdio_tasklet_fn(unsigned long data)
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
{
struct qdio_q *q;
+ int i;
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
-again:
-
- /* protect tiq_list entries, only changed in activate or shutdown */
- rcu_read_lock();
-
- list_for_each_entry_rcu(q, &tiq_list, entry)
- /* only process queues from changed sets */
- if (*q->irq_ptr->dsci) {
-
- /* only clear it if the indicator is non-shared */
+ for_each_input_queue(irq, q, i) {
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(q->irq_ptr->dsci, 0);
+
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+
+ /* avoid dsci clear here, done after processing */
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
+
/*
- * don't call inbound processing directly since
- * that could starve other thinint queues
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
*/
tasklet_schedule(&q->tasklet);
}
-
- rcu_read_unlock();
-
- /*
- * if we used the shared indicator clear it now after all queues
- * were processed
- */
- if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
-
- /* prevent racing */
- if (*tiqdio_alsi)
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
- }
-
- /* check for more work */
- if (*tiqdio_alsi) {
- xchg(tiqdio_alsi, 0);
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
- goto again;
}
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
+ * @alsi: pointer to adapter local summary indicator
+ * @data: NULL
*/
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
+static void tiqdio_thinint_handler(struct airq_struct *airq)
{
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
-
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now, the tasklet will clean all dsci's
- */
- xchg((u8 *)ind, 0);
- tasklet_hi_schedule(&tiqdio_tasklet);
+ u32 si_used = clear_shared_ind();
+ struct qdio_q *q;
+
+ last_ai_time = S390_lowcore.int_clock;
+ inc_irq_stat(IRQIO_QAI);
+
+ /* protect tiq_list entries, only changed in activate or shutdown */
+ rcu_read_lock();
+
+ /* check for work on all inbound thinint queues */
+ list_for_each_entry_rcu(q, &tiq_list, entry) {
+ struct qdio_irq *irq;
+
+ /* only process queues from changed sets */
+ irq = q->irq_ptr;
+ if (unlikely(references_shared_dsci(irq))) {
+ if (!si_used)
+ continue;
+ } else if (!*irq->dsci)
+ continue;
+
+ tiqdio_call_inq_handlers(irq);
+
+ qperf_inc(q, adapter_int);
+ }
+ rcu_read_unlock();
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
- struct scssc_area *scssc_area;
+ struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+ u64 summary_indicator_addr, subchannel_indicator_addr;
int rc;
- scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
- memset(scssc_area, 0, PAGE_SIZE);
-
if (reset) {
- scssc_area->summary_indicator_addr = 0;
- scssc_area->subchannel_indicator_addr = 0;
+ summary_indicator_addr = 0;
+ subchannel_indicator_addr = 0;
} else {
- scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
- scssc_area->subchannel_indicator_addr =
- virt_to_phys(irq_ptr->dsci);
+ summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
}
- scssc_area->request = (struct chsc_header) {
- .length = 0x0fe0,
- .code = 0x0021,
- };
- scssc_area->operation_code = 0;
- scssc_area->ks = PAGE_DEFAULT_KEY;
- scssc_area->kc = PAGE_DEFAULT_KEY;
- scssc_area->isc = QDIO_AIRQ_ISC;
- scssc_area->schid = irq_ptr->schid;
-
- /* enable the time delay disablement facility */
- if (css_general_characteristics.aif_tdd)
- scssc_area->word_with_d_bit = 0x10000000;
-
- rc = chsc(scssc_area);
- if (rc)
- return -EIO;
-
- rc = chsc_error_from_response(scssc_area->response.code);
+ rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+ subchannel_indicator_addr);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
- scssc_area->response.code);
- DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
- return rc;
+ scssc->response.code);
+ goto out;
}
DBF_EVENT("setscind");
- DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
- DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
- return 0;
+ DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+ DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+ return rc;
}
/* allocate non-shared indicators and shared indicator */
@@ -325,14 +257,12 @@ void tiqdio_free_memory(void)
int __init tiqdio_register_thinints(void)
{
- isc_register(QDIO_AIRQ_ISC);
- tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
- NULL, QDIO_AIRQ_ISC);
- if (IS_ERR(tiqdio_alsi)) {
- DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
- tiqdio_alsi = NULL;
- isc_unregister(QDIO_AIRQ_ISC);
- return -ENOMEM;
+ int rc;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ return rc;
}
return 0;
}
@@ -341,12 +271,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
-
- /* Check for aif time delay disablement. If installed,
- * omit SVS even under LPAR
- */
- if (css_general_characteristics.aif_tdd)
- css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0);
}
@@ -364,17 +288,12 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
return;
/* reset adapter interrupt indicators */
- put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
+ put_indicator(irq_ptr->dsci);
}
void __exit tiqdio_unregister_thinints(void)
{
WARN_ON(!list_empty(&tiq_list));
-
- if (tiqdio_alsi) {
- s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
- isc_unregister(QDIO_AIRQ_ISC);
- }
- tasklet_kill(&tiqdio_tasklet);
+ unregister_adapter_interrupt(&tiqdio_airq);
}
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 00000000000..15268edc54a
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,288 @@
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+ .name = "scm",
+ .probe = scmdev_probe,
+ .remove = scmdev_remove,
+ .uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+ struct device_driver *drv = &scmdrv->drv;
+
+ drv->bus = &scm_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+ driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+void scm_irq_handler(struct aob *aob, int error)
+{
+ struct aob_rq_header *aobrq = (void *) aob->request.data;
+ struct scm_device *scmdev = aobrq->scmdev;
+ struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+ scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scm_device *scmdev = to_scm_dev(dev); \
+ int ret; \
+ \
+ device_lock(dev); \
+ ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ device_unlock(dev); \
+ \
+ return ret; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+ &dev_attr_persistence.attr,
+ &dev_attr_oper_state.attr,
+ &dev_attr_data_state.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_release.attr,
+ &dev_attr_res_id.attr,
+ NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+ .attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+ &scmdev_attr_group,
+ NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+ unsigned int size, unsigned int max_blk_count)
+{
+ dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+ scmdev->nr_max_block = max_blk_count;
+ scmdev->address = sale->sa;
+ scmdev->size = 1UL << size;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.persistence = sale->p;
+ scmdev->attrs.oper_state = sale->op_state;
+ scmdev->attrs.data_state = sale->data_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.release = sale->r;
+ scmdev->attrs.res_id = sale->rid;
+ scmdev->dev.parent = scm_root;
+ scmdev->dev.bus = &scm_bus_type;
+ scmdev->dev.release = scmdev_release;
+ scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+ struct scm_driver *scmdrv;
+ bool changed;
+
+ device_lock(&scmdev->dev);
+ changed = scmdev->attrs.rank != sale->rank ||
+ scmdev->attrs.oper_state != sale->op_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.oper_state = sale->op_state;
+ if (!scmdev->dev.driver)
+ goto out;
+ scmdrv = to_scm_drv(scmdev->dev.driver);
+ if (changed && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_CHANGE);
+out:
+ device_unlock(&scmdev->dev);
+ if (changed)
+ kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, void *data)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct sale *sale = data;
+
+ return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+ return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+ struct sale *sale, *scmal = scm_info->scmal;
+ struct scm_device *scmdev;
+ int ret;
+
+ for (sale = scmal; sale < scmal + num; sale++) {
+ scmdev = scmdev_find(sale);
+ if (scmdev) {
+ scmdev_update(scmdev, sale);
+ /* Release reference from scm_find(). */
+ put_device(&scmdev->dev);
+ continue;
+ }
+ scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+ if (!scmdev)
+ return -ENODEV;
+ scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+ ret = device_register(&scmdev->dev);
+ if (ret) {
+ /* Release reference from device_initialize(). */
+ put_device(&scmdev->dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int scm_update_information(void)
+{
+ struct chsc_scm_info *scm_info;
+ u64 token = 0;
+ size_t num;
+ int ret;
+
+ scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!scm_info)
+ return -ENOMEM;
+
+ do {
+ ret = chsc_scm_info(scm_info, token);
+ if (ret)
+ break;
+
+ num = (scm_info->response.length -
+ (offsetof(struct chsc_scm_info, scmal) -
+ offsetof(struct chsc_scm_info, response))
+ ) / sizeof(struct sale);
+
+ ret = scm_add(scm_info, num);
+ if (ret)
+ break;
+
+ token = scm_info->restok;
+ } while (token);
+
+ free_page((unsigned long)scm_info);
+
+ return ret;
+}
+
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ if (dev->driver && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_AVAIL);
+
+ return 0;
+}
+
+int scm_process_availability_information(void)
+{
+ return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
+static int __init scm_init(void)
+{
+ int ret;
+
+ ret = bus_register(&scm_bus_type);
+ if (ret)
+ return ret;
+
+ scm_root = root_device_register("scm");
+ if (IS_ERR(scm_root)) {
+ bus_unregister(&scm_bus_type);
+ return PTR_ERR(scm_root);
+ }
+
+ scm_update_information();
+ return 0;
+}
+subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
deleted file mode 100644
index f8da25ab576..00000000000
--- a/drivers/s390/cio/scsw.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Helper functions for scsw access.
- *
- * Copyright IBM Corp. 2008
- * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <asm/cio.h>
-#include "css.h"
-#include "chsc.h"
-
-/**
- * scsw_is_tm - check for transport mode scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the specified scsw is a transport mode scsw, zero
- * otherwise.
- */
-int scsw_is_tm(union scsw *scsw)
-{
- return css_general_characteristics.fcx && (scsw->tm.x == 1);
-}
-EXPORT_SYMBOL(scsw_is_tm);
-
-/**
- * scsw_key - return scsw key field
- * @scsw: pointer to scsw
- *
- * Return the value of the key field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_key(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.key;
- else
- return scsw->cmd.key;
-}
-EXPORT_SYMBOL(scsw_key);
-
-/**
- * scsw_eswf - return scsw eswf field
- * @scsw: pointer to scsw
- *
- * Return the value of the eswf field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_eswf(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.eswf;
- else
- return scsw->cmd.eswf;
-}
-EXPORT_SYMBOL(scsw_eswf);
-
-/**
- * scsw_cc - return scsw cc field
- * @scsw: pointer to scsw
- *
- * Return the value of the cc field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_cc(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.cc;
- else
- return scsw->cmd.cc;
-}
-EXPORT_SYMBOL(scsw_cc);
-
-/**
- * scsw_ectl - return scsw ectl field
- * @scsw: pointer to scsw
- *
- * Return the value of the ectl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_ectl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.ectl;
- else
- return scsw->cmd.ectl;
-}
-EXPORT_SYMBOL(scsw_ectl);
-
-/**
- * scsw_pno - return scsw pno field
- * @scsw: pointer to scsw
- *
- * Return the value of the pno field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_pno(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.pno;
- else
- return scsw->cmd.pno;
-}
-EXPORT_SYMBOL(scsw_pno);
-
-/**
- * scsw_fctl - return scsw fctl field
- * @scsw: pointer to scsw
- *
- * Return the value of the fctl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_fctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.fctl;
- else
- return scsw->cmd.fctl;
-}
-EXPORT_SYMBOL(scsw_fctl);
-
-/**
- * scsw_actl - return scsw actl field
- * @scsw: pointer to scsw
- *
- * Return the value of the actl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_actl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.actl;
- else
- return scsw->cmd.actl;
-}
-EXPORT_SYMBOL(scsw_actl);
-
-/**
- * scsw_stctl - return scsw stctl field
- * @scsw: pointer to scsw
- *
- * Return the value of the stctl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_stctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.stctl;
- else
- return scsw->cmd.stctl;
-}
-EXPORT_SYMBOL(scsw_stctl);
-
-/**
- * scsw_dstat - return scsw dstat field
- * @scsw: pointer to scsw
- *
- * Return the value of the dstat field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_dstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.dstat;
- else
- return scsw->cmd.dstat;
-}
-EXPORT_SYMBOL(scsw_dstat);
-
-/**
- * scsw_cstat - return scsw cstat field
- * @scsw: pointer to scsw
- *
- * Return the value of the cstat field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_cstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.cstat;
- else
- return scsw->cmd.cstat;
-}
-EXPORT_SYMBOL(scsw_cstat);
-
-/**
- * scsw_cmd_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_key(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_key);
-
-/**
- * scsw_cmd_is_valid_sctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_sctl(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
-
-/**
- * scsw_cmd_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_eswf(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
-
-/**
- * scsw_cmd_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_cc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
-
-/**
- * scsw_cmd_is_valid_fmt - check fmt field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fmt field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_fmt(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
-
-/**
- * scsw_cmd_is_valid_pfch - check pfch field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pfch field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_pfch(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
-
-/**
- * scsw_cmd_is_valid_isic - check isic field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the isic field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_isic(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
-
-/**
- * scsw_cmd_is_valid_alcc - check alcc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the alcc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_alcc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
-
-/**
- * scsw_cmd_is_valid_ssi - check ssi field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ssi field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_ssi(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
-
-/**
- * scsw_cmd_is_valid_zcc - check zcc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the zcc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_zcc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
-
-/**
- * scsw_cmd_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_ectl(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
-
-/**
- * scsw_cmd_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_pno(union scsw *scsw)
-{
- return (scsw->cmd.fctl != 0) &&
- (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
-
-/**
- * scsw_cmd_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_fctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
-
-/**
- * scsw_cmd_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_actl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
-
-/**
- * scsw_cmd_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_stctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
-
-/**
- * scsw_cmd_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_dstat(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->cmd.cc != 3);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
-
-/**
- * scsw_cmd_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_cstat(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->cmd.cc != 3);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
-
-/**
- * scsw_tm_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_key(union scsw *scsw)
-{
- return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_key);
-
-/**
- * scsw_tm_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_eswf(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
-
-/**
- * scsw_tm_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_cc(union scsw *scsw)
-{
- return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_cc);
-
-/**
- * scsw_tm_is_valid_fmt - check fmt field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fmt field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fmt(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
-
-/**
- * scsw_tm_is_valid_x - check x field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the x field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_x(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_x);
-
-/**
- * scsw_tm_is_valid_q - check q field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the q field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_q(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_q);
-
-/**
- * scsw_tm_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_ectl(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
-
-/**
- * scsw_tm_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_pno(union scsw *scsw)
-{
- return (scsw->tm.fctl != 0) &&
- (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_pno);
-
-/**
- * scsw_tm_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
-
-/**
- * scsw_tm_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_actl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_actl);
-
-/**
- * scsw_tm_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_stctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
-
-/**
- * scsw_tm_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_dstat(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->tm.cc != 3);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
-
-/**
- * scsw_tm_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_cstat(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->tm.cc != 3);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
-
-/**
- * scsw_tm_is_valid_fcxs - check fcxs field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fcxs field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fcxs(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
-
-/**
- * scsw_tm_is_valid_schxs - check schxs field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the schxs field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_schxs(union scsw *scsw)
-{
- return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
- SCHN_STAT_INTF_CTRL_CHK |
- SCHN_STAT_PROT_CHECK |
- SCHN_STAT_CHN_DATA_CHK));
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
-
-/**
- * scsw_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_actl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_actl(scsw);
- else
- return scsw_cmd_is_valid_actl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_actl);
-
-/**
- * scsw_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_cc(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_cc(scsw);
- else
- return scsw_cmd_is_valid_cc(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_cc);
-
-/**
- * scsw_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_cstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_cstat(scsw);
- else
- return scsw_cmd_is_valid_cstat(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_cstat);
-
-/**
- * scsw_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_dstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_dstat(scsw);
- else
- return scsw_cmd_is_valid_dstat(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_dstat);
-
-/**
- * scsw_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_ectl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_ectl(scsw);
- else
- return scsw_cmd_is_valid_ectl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_ectl);
-
-/**
- * scsw_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_eswf(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_eswf(scsw);
- else
- return scsw_cmd_is_valid_eswf(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_eswf);
-
-/**
- * scsw_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_fctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_fctl(scsw);
- else
- return scsw_cmd_is_valid_fctl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_fctl);
-
-/**
- * scsw_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_key(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_key(scsw);
- else
- return scsw_cmd_is_valid_key(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_key);
-
-/**
- * scsw_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_pno(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_pno(scsw);
- else
- return scsw_cmd_is_valid_pno(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_pno);
-
-/**
- * scsw_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_stctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_stctl(scsw);
- else
- return scsw_cmd_is_valid_stctl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_stctl);
-
-/**
- * scsw_cmd_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the command mode scsw indicates that the associated
- * status condition is solicited, zero if it is unsolicited.
- */
-int scsw_cmd_is_solicited(union scsw *scsw)
-{
- return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
-}
-EXPORT_SYMBOL(scsw_cmd_is_solicited);
-
-/**
- * scsw_tm_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the transport mode scsw indicates that the associated
- * status condition is solicited, zero if it is unsolicited.
- */
-int scsw_tm_is_solicited(union scsw *scsw)
-{
- return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
-}
-EXPORT_SYMBOL(scsw_tm_is_solicited);
-
-/**
- * scsw_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the transport or command mode scsw indicates that the
- * associated status condition is solicited, zero if it is unsolicited.
- */
-int scsw_is_solicited(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_solicited(scsw);
- else
- return scsw_cmd_is_solicited(scsw);
-}
-EXPORT_SYMBOL(scsw_is_solicited);