diff options
Diffstat (limited to 'drivers/s390/cio')
45 files changed, 16433 insertions, 9461 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index c490c2a1c2f..8c4a386e97f 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -2,9 +2,15 @@ # Makefile for the S/390 common i/o drivers # -obj-y += airq.o blacklist.o chsc.o cio.o css.o +obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ + fcx.o itcw.o crw.o ccwreq.o ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o +obj-$(CONFIG_CHSC_SCH) += chsc_sch.o +obj-$(CONFIG_EADM_SCH) += eadm_sch.o +obj-$(CONFIG_SCM_BUS) += scm.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o + +qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 83e6a060668..00bfbee0af9 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -1,87 +1,275 @@ /* - * drivers/s390/cio/airq.c - * S/390 common I/O routines -- support for adapter interruptions + * Support for adapter interruptions * - * $Revision: 1.12 $ - * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Ingo Adlung (adlung@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) - * Arnd Bergmann (arndb@de.ibm.com) + * Copyright IBM Corp. 1999, 2007 + * Author(s): Ingo Adlung <adlung@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Arnd Bergmann <arndb@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/init.h> +#include <linux/irq.h> +#include <linux/kernel_stat.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rculist.h> #include <linux/slab.h> -#include <linux/rcupdate.h> +#include <asm/airq.h> +#include <asm/isc.h> + +#include "cio.h" #include "cio_debug.h" -#include "airq.h" +#include "ioasm.h" -static adapter_int_handler_t adapter_handler; +static DEFINE_SPINLOCK(airq_lists_lock); +static struct hlist_head airq_lists[MAX_ISC+1]; -/* - * register for adapter interrupts +/** + * register_adapter_interrupt() - register adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor * - * With HiperSockets the zSeries architecture provides for - * means of adapter interrups, pseudo I/O interrupts that are - * not tied to an I/O subchannel, but to an adapter. However, - * it doesn't disclose the info how to enable/disable them, but - * to recognize them only. Perhaps we should consider them - * being shared interrupts, and thus build a linked list - * of adapter handlers ... to be evaluated ... + * Returns 0 on success, or -EINVAL. */ -int -s390_register_adapter_interrupt (adapter_int_handler_t handler) +int register_adapter_interrupt(struct airq_struct *airq) { - int ret; - char dbf_txt[15]; + char dbf_txt[32]; - CIO_TRACE_EVENT (4, "rgaint"); + if (!airq->handler || airq->isc > MAX_ISC) + return -EINVAL; + if (!airq->lsi_ptr) { + airq->lsi_ptr = kzalloc(1, GFP_KERNEL); + if (!airq->lsi_ptr) + return -ENOMEM; + airq->flags |= AIRQ_PTR_ALLOCATED; + } + if (!airq->lsi_mask) + airq->lsi_mask = 0xff; + snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + isc_register(airq->isc); + spin_lock(&airq_lists_lock); + hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]); + spin_unlock(&airq_lists_lock); + return 0; +} +EXPORT_SYMBOL(register_adapter_interrupt); - if (handler == NULL) - ret = -EINVAL; - else - ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); - if (!ret) - synchronize_sched(); /* Allow interrupts to complete. */ +/** + * unregister_adapter_interrupt - unregister adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor + */ +void unregister_adapter_interrupt(struct airq_struct *airq) +{ + char dbf_txt[32]; - sprintf (dbf_txt, "ret:%d", ret); - CIO_TRACE_EVENT (4, dbf_txt); + if (hlist_unhashed(&airq->list)) + return; + snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + spin_lock(&airq_lists_lock); + hlist_del_rcu(&airq->list); + spin_unlock(&airq_lists_lock); + synchronize_rcu(); + isc_unregister(airq->isc); + if (airq->flags & AIRQ_PTR_ALLOCATED) { + kfree(airq->lsi_ptr); + airq->lsi_ptr = NULL; + airq->flags &= ~AIRQ_PTR_ALLOCATED; + } +} +EXPORT_SYMBOL(unregister_adapter_interrupt); - return ret; +static irqreturn_t do_airq_interrupt(int irq, void *dummy) +{ + struct tpi_info *tpi_info; + struct airq_struct *airq; + struct hlist_head *head; + + __this_cpu_write(s390_idle.nohz_delay, 1); + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + head = &airq_lists[tpi_info->isc]; + rcu_read_lock(); + hlist_for_each_entry_rcu(airq, head, list) + if ((*airq->lsi_ptr & airq->lsi_mask) != 0) + airq->handler(airq); + rcu_read_unlock(); + + return IRQ_HANDLED; } -int -s390_unregister_adapter_interrupt (adapter_int_handler_t handler) +static struct irqaction airq_interrupt = { + .name = "AIO", + .handler = do_airq_interrupt, +}; + +void __init init_airq_interrupts(void) { - int ret; - char dbf_txt[15]; + irq_set_chip_and_handler(THIN_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(THIN_INTERRUPT, &airq_interrupt); +} - CIO_TRACE_EVENT (4, "urgaint"); +/** + * airq_iv_create - create an interrupt vector + * @bits: number of bits in the interrupt vector + * @flags: allocation flags + * + * Returns a pointer to an interrupt vector structure + */ +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags) +{ + struct airq_iv *iv; + unsigned long size; - if (handler == NULL) - ret = -EINVAL; - else { - adapter_handler = NULL; - synchronize_sched(); /* Allow interrupts to complete. */ - ret = 0; + iv = kzalloc(sizeof(*iv), GFP_KERNEL); + if (!iv) + goto out; + iv->bits = bits; + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); + iv->vector = kzalloc(size, GFP_KERNEL); + if (!iv->vector) + goto out_free; + if (flags & AIRQ_IV_ALLOC) { + iv->avail = kmalloc(size, GFP_KERNEL); + if (!iv->avail) + goto out_free; + memset(iv->avail, 0xff, size); + iv->end = 0; + } else + iv->end = bits; + if (flags & AIRQ_IV_BITLOCK) { + iv->bitlock = kzalloc(size, GFP_KERNEL); + if (!iv->bitlock) + goto out_free; + } + if (flags & AIRQ_IV_PTR) { + size = bits * sizeof(unsigned long); + iv->ptr = kzalloc(size, GFP_KERNEL); + if (!iv->ptr) + goto out_free; + } + if (flags & AIRQ_IV_DATA) { + size = bits * sizeof(unsigned int); + iv->data = kzalloc(size, GFP_KERNEL); + if (!iv->data) + goto out_free; } - sprintf (dbf_txt, "ret:%d", ret); - CIO_TRACE_EVENT (4, dbf_txt); + spin_lock_init(&iv->lock); + return iv; - return ret; +out_free: + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->avail); + kfree(iv->vector); + kfree(iv); +out: + return NULL; } +EXPORT_SYMBOL(airq_iv_create); -void -do_adapter_IO (void) +/** + * airq_iv_release - release an interrupt vector + * @iv: pointer to interrupt vector structure + */ +void airq_iv_release(struct airq_iv *iv) +{ + kfree(iv->data); + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->vector); + kfree(iv->avail); + kfree(iv); +} +EXPORT_SYMBOL(airq_iv_release); + +/** + * airq_iv_alloc - allocate irq bits from an interrupt vector + * @iv: pointer to an interrupt vector structure + * @num: number of consecutive irq bits to allocate + * + * Returns the bit number of the first irq in the allocated block of irqs, + * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been + * specified + */ +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num) { - CIO_TRACE_EVENT (6, "doaio"); + unsigned long bit, i, flags; - if (adapter_handler) - (*adapter_handler) (); + if (!iv->avail || num == 0) + return -1UL; + spin_lock_irqsave(&iv->lock, flags); + bit = find_first_bit_inv(iv->avail, iv->bits); + while (bit + num <= iv->bits) { + for (i = 1; i < num; i++) + if (!test_bit_inv(bit + i, iv->avail)) + break; + if (i >= num) { + /* Found a suitable block of irqs */ + for (i = 0; i < num; i++) + clear_bit_inv(bit + i, iv->avail); + if (bit + num >= iv->end) + iv->end = bit + num + 1; + break; + } + bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1); + } + if (bit + num > iv->bits) + bit = -1UL; + spin_unlock_irqrestore(&iv->lock, flags); + return bit; } +EXPORT_SYMBOL(airq_iv_alloc); + +/** + * airq_iv_free - free irq bits of an interrupt vector + * @iv: pointer to interrupt vector structure + * @bit: number of the first irq bit to free + * @num: number of consecutive irq bits to free + */ +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num) +{ + unsigned long i, flags; -EXPORT_SYMBOL (s390_register_adapter_interrupt); -EXPORT_SYMBOL (s390_unregister_adapter_interrupt); + if (!iv->avail || num == 0) + return; + spin_lock_irqsave(&iv->lock, flags); + for (i = 0; i < num; i++) { + /* Clear (possibly left over) interrupt bit */ + clear_bit_inv(bit + i, iv->vector); + /* Make the bit positions available again */ + set_bit_inv(bit + i, iv->avail); + } + if (bit + num >= iv->end) { + /* Find new end of bit-field */ + while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail)) + iv->end--; + } + spin_unlock_irqrestore(&iv->lock, flags); +} +EXPORT_SYMBOL(airq_iv_free); + +/** + * airq_iv_scan - scan interrupt vector for non-zero bits + * @iv: pointer to interrupt vector structure + * @start: bit number to start the search + * @end: bit number to end the search + * + * Returns the bit number of the next non-zero interrupt bit, or + * -1UL if the scan completed without finding any more any non-zero bits. + */ +unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, + unsigned long end) +{ + unsigned long bit; + + /* Find non-zero bit starting from 'ivs->next'. */ + bit = find_next_bit_inv(iv->vector, end, start); + if (bit >= end) + return -1UL; + clear_bit_inv(bit, iv->vector); + return bit; +} +EXPORT_SYMBOL(airq_iv_scan); diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h deleted file mode 100644 index 7d6be3fdcd6..00000000000 --- a/drivers/s390/cio/airq.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef S390_AINTERRUPT_H -#define S390_AINTERRUPT_H - -typedef int (*adapter_int_handler_t)(void); - -extern int s390_register_adapter_interrupt(adapter_int_handler_t handler); -extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler); -extern void do_adapter_IO (void); - -#endif diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index aac83ce6469..b3f791b2c1f 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -1,30 +1,31 @@ /* - * drivers/s390/cio/blacklist.c * S/390 common I/O routines -- blacklisting of specific devices - * $Revision: 1.34 $ * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999, 2013 * Author(s): Ingo Adlung (adlung@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) */ -#include <linux/config.h> +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/init.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/device.h> -#include <asm/cio.h> #include <asm/uaccess.h> +#include <asm/cio.h> +#include <asm/ipl.h> #include "blacklist.h" #include "cio.h" #include "cio_debug.h" #include "css.h" +#include "device.h" /* * "Blacklisting" of certain devices: @@ -34,172 +35,201 @@ * These can be single devices or ranges of devices */ -/* 65536 bits to indicate if a devno is blacklisted or not */ -#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \ +/* 65536 bits for each set to indicate if a devno is blacklisted or not */ +#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) -static unsigned long bl_dev[__BL_DEV_WORDS]; +static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS]; typedef enum {add, free} range_action; /* * Function: blacklist_range * (Un-)blacklist the devices from-to */ -static inline void -blacklist_range (range_action action, unsigned int from, unsigned int to) +static int blacklist_range(range_action action, unsigned int from_ssid, + unsigned int to_ssid, unsigned int from, + unsigned int to, int msgtrigger) { - if (!to) - to = from; + if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { + if (msgtrigger) + pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " + "range for cio_ignore\n", from_ssid, from, + to_ssid, to); - if (from > to || to > __MAX_SUBCHANNELS) { - printk (KERN_WARNING "Invalid blacklist range " - "0x%04x to 0x%04x, skipping\n", from, to); - return; + return 1; } - for (; from <= to; from++) { + + while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) && + (from <= to))) { if (action == add) - set_bit (from, bl_dev); + set_bit(from, bl_dev[from_ssid]); else - clear_bit (from, bl_dev); + clear_bit(from, bl_dev[from_ssid]); + from++; + if (from > __MAX_SUBCHANNEL) { + from_ssid++; + from = 0; + } } + + return 0; } -/* - * Function: blacklist_busid - * Get devno/busid from given string. - * Shamelessly grabbed from dasd_devmap.c. - */ -static inline int -blacklist_busid(char **str, int *id0, int *id1, int *devno) +static int pure_hex(char **cp, unsigned int *val, int min_digit, + int max_digit, int max_val) { - int val, old_style; - char *sav; + int diff; - sav = *str; + diff = 0; + *val = 0; - /* check for leading '0x' */ - old_style = 0; - if ((*str)[0] == '0' && (*str)[1] == 'x') { - *str += 2; - old_style = 1; - } - if (!isxdigit((*str)[0])) /* We require at least one hex digit */ - goto confused; - val = simple_strtoul(*str, str, 16); - if (old_style || (*str)[0] != '.') { - *id0 = *id1 = 0; - if (val < 0 || val > 0xffff) - goto confused; - *devno = val; - if ((*str)[0] != ',' && (*str)[0] != '-' && - (*str)[0] != '\n' && (*str)[0] != '\0') - goto confused; - return 0; + while (diff <= max_digit) { + int value = hex_to_bin(**cp); + + if (value < 0) + break; + *val = *val * 16 + value; + (*cp)++; + diff++; } - /* New style x.y.z busid */ - if (val < 0 || val > 0xff) - goto confused; - *id0 = val; - (*str)++; - if (!isxdigit((*str)[0])) /* We require at least one hex digit */ - goto confused; - val = simple_strtoul(*str, str, 16); - if (val < 0 || val > 0xff || (*str)++[0] != '.') - goto confused; - *id1 = val; - if (!isxdigit((*str)[0])) /* We require at least one hex digit */ - goto confused; - val = simple_strtoul(*str, str, 16); - if (val < 0 || val > 0xffff) - goto confused; - *devno = val; - if ((*str)[0] != ',' && (*str)[0] != '-' && - (*str)[0] != '\n' && (*str)[0] != '\0') - goto confused; + + if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) + return 1; + return 0; -confused: - strsep(str, ",\n"); - printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav); - return 1; } -static inline int -blacklist_parse_parameters (char *str, range_action action) +static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, + unsigned int *devno, int msgtrigger) { - unsigned int from, to, from_id0, to_id0, from_id1, to_id1; - - while (*str != 0 && *str != '\n') { - range_action ra = action; - while(*str == ',') - str++; - if (*str == '!') { - ra = !action; - ++str; + char *str_work; + int val, rc, ret; + + rc = 1; + + if (*str == '\0') + goto out; + + /* old style */ + str_work = str; + val = simple_strtoul(str, &str_work, 16); + + if (*str_work == '\0') { + if (val <= __MAX_SUBCHANNEL) { + *devno = val; + *ssid = 0; + *cssid = 0; + rc = 0; } + goto out; + } - /* - * Since we have to parse the proc commands and the - * kernel arguments we have to check four cases - */ - if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || - strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { - from = 0; - to = __MAX_SUBCHANNELS; - str += 3; - } else { - int rc; + /* new style */ + str_work = str; + ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); + if (ret || (str_work[0] != '\0')) + goto out; + + rc = 0; +out: + if (rc && msgtrigger) + pr_warning("%s is not a valid device for the cio_ignore " + "kernel parameter\n", str); + + return rc; +} - rc = blacklist_busid(&str, &from_id0, - &from_id1, &from); - if (rc) - continue; - to = from; - to_id0 = from_id0; - to_id1 = from_id1; - if (*str == '-') { - str++; - rc = blacklist_busid(&str, &to_id0, - &to_id1, &to); - if (rc) - continue; - } - if (*str == '-') { - printk(KERN_WARNING "invalid cio_ignore " - "parameter '%s'\n", - strsep(&str, ",\n")); +static int blacklist_parse_parameters(char *str, range_action action, + int msgtrigger) +{ + unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; + int rc, totalrc; + char *parm; + range_action ra; + + totalrc = 0; + + while ((parm = strsep(&str, ","))) { + rc = 0; + ra = action; + if (*parm == '!') { + if (ra == add) + ra = free; + else + ra = add; + parm++; + } + if (strcmp(parm, "all") == 0) { + from_cssid = 0; + from_ssid = 0; + from = 0; + to_cssid = __MAX_CSSID; + to_ssid = __MAX_SSID; + to = __MAX_SUBCHANNEL; + } else if (strcmp(parm, "ipldev") == 0) { + if (ipl_info.type == IPL_TYPE_CCW) { + from_cssid = 0; + from_ssid = ipl_info.data.ccw.dev_id.ssid; + from = ipl_info.data.ccw.dev_id.devno; + } else if (ipl_info.type == IPL_TYPE_FCP || + ipl_info.type == IPL_TYPE_FCP_DUMP) { + from_cssid = 0; + from_ssid = ipl_info.data.fcp.dev_id.ssid; + from = ipl_info.data.fcp.dev_id.devno; + } else { continue; } - if ((from_id0 != to_id0) || (from_id1 != to_id1)) { - printk(KERN_WARNING "invalid cio_ignore range " - "%x.%x.%04x-%x.%x.%04x\n", - from_id0, from_id1, from, - to_id0, to_id1, to); + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } else if (strcmp(parm, "condev") == 0) { + if (console_devno == -1) continue; + + from_cssid = to_cssid = 0; + from_ssid = to_ssid = 0; + from = to = console_devno; + } else { + rc = parse_busid(strsep(&parm, "-"), &from_cssid, + &from_ssid, &from, msgtrigger); + if (!rc) { + if (parm != NULL) + rc = parse_busid(parm, &to_cssid, + &to_ssid, &to, + msgtrigger); + else { + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } } } - /* FIXME: ignoring id0 and id1 here. */ - pr_debug("blacklist_setup: adding range " - "from 0.0.%04x to 0.0.%04x\n", from, to); - blacklist_range (ra, from, to); + if (!rc) { + rc = blacklist_range(ra, from_ssid, to_ssid, from, to, + msgtrigger); + if (rc) + totalrc = -EINVAL; + } else + totalrc = -EINVAL; } - return 1; + + return totalrc; } -/* Parsing the commandline for blacklist parameters, e.g. to blacklist - * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of: - * - cio_ignore=1234-1236 - * - cio_ignore=0x1234-0x1235,1236 - * - cio_ignore=0x1234,1235-1236 - * - cio_ignore=1236 cio_ignore=1234-0x1236 - * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235 - * - cio_ignore=0.0.1234-0.0.1236 - * - cio_ignore=0.0.1234,0x1235,1236 - * - ... - */ static int __init blacklist_setup (char *str) { CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); - return blacklist_parse_parameters (str, add); + if (blacklist_parse_parameters(str, add, 1)) + return 0; + return 1; } __setup ("cio_ignore=", blacklist_setup); @@ -213,137 +243,177 @@ __setup ("cio_ignore=", blacklist_setup); * Used by validate_subchannel() */ int -is_blacklisted (int devno) +is_blacklisted (int ssid, int devno) { - return test_bit (devno, bl_dev); + return test_bit (devno, bl_dev[ssid]); } #ifdef CONFIG_PROC_FS /* - * Function: s390_redo_validation - * Look for no longer blacklisted devices - * FIXME: there must be a better way to do this */ -static inline void -s390_redo_validation (void) -{ - unsigned int irq; - - CIO_TRACE_EVENT (0, "redoval"); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int ret; - struct subchannel *sch; - - sch = get_subchannel_by_schid(irq); - if (sch) { - /* Already known. */ - put_device(&sch->dev); - continue; - } - ret = css_probe_device(irq); - if (ret == -ENXIO) - break; /* We're through. */ - if (ret == -ENOMEM) - /* - * Stop validation for now. Bad, but no need for a - * panic. - */ - break; - } -} - -/* * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore */ -static inline void -blacklist_parse_proc_parameters (char *buf) +static int blacklist_parse_proc_parameters(char *buf) { - if (strncmp (buf, "free ", 5) == 0) { - blacklist_parse_parameters (buf + 5, free); - } else if (strncmp (buf, "add ", 4) == 0) { - /* - * We don't need to check for known devices since - * css_probe_device will handle this correctly. - */ - blacklist_parse_parameters (buf + 4, add); - } else { - printk (KERN_WARNING "cio_ignore: Parse error; \n" - KERN_WARNING "try using 'free all|<devno-range>," - "<devno-range>,...'\n" - KERN_WARNING "or 'add <devno-range>," - "<devno-range>,...'\n"); - return; - } + int rc; + char *parm; + + parm = strsep(&buf, " "); + + if (strcmp("free", parm) == 0) { + rc = blacklist_parse_parameters(buf, free, 0); + css_schedule_eval_all_unreg(0); + } else if (strcmp("add", parm) == 0) + rc = blacklist_parse_parameters(buf, add, 0); + else if (strcmp("purge", parm) == 0) + return ccw_purge_blacklisted(); + else + return -EINVAL; + - s390_redo_validation (); + return rc; } -/* FIXME: These should be real bus ids and not home-grown ones! */ -static int cio_ignore_read (char *page, char **start, off_t off, - int count, int *eof, void *data) +/* Iterator struct for all devices. */ +struct ccwdev_iter { + int devno; + int ssid; + int in_range; +}; + +static void * +cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) { - const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ - long devno; - int len; - - len = 0; - for (devno = off; /* abuse the page variable - * as counter, see fs/proc/generic.c */ - devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { - if (!test_bit(devno, bl_dev)) - continue; - len += sprintf(page + len, "0.0.%04lx", devno); - if (test_bit(devno + 1, bl_dev)) { /* print range */ - while (++devno < __MAX_SUBCHANNELS) - if (!test_bit(devno, bl_dev)) - break; - len += sprintf(page + len, "-0.0.%04lx", --devno); - } - len += sprintf(page + len, "\n"); - } + struct ccwdev_iter *iter = s->private; + + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + return NULL; + memset(iter, 0, sizeof(*iter)); + iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); + iter->devno = *offset % (__MAX_SUBCHANNEL + 1); + return iter; +} + +static void +cio_ignore_proc_seq_stop(struct seq_file *s, void *it) +{ +} - if (devno < __MAX_SUBCHANNELS) - *eof = 1; - *start = (char *) (devno - off); /* number of checked entries */ - return len; +static void * +cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct ccwdev_iter *iter; + + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + return NULL; + iter = it; + if (iter->devno == __MAX_SUBCHANNEL) { + iter->devno = 0; + iter->ssid++; + if (iter->ssid > __MAX_SSID) + return NULL; + } else + iter->devno++; + (*offset)++; + return iter; } -static int cio_ignore_write(struct file *file, const char __user *user_buf, - unsigned long user_len, void *data) +static int +cio_ignore_proc_seq_show(struct seq_file *s, void *it) +{ + struct ccwdev_iter *iter; + + iter = it; + if (!is_blacklisted(iter->ssid, iter->devno)) + /* Not blacklisted, nothing to output. */ + return 0; + if (!iter->in_range) { + /* First device in range. */ + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->ssid, iter->devno + 1)) + /* Singular device. */ + return seq_printf(s, "0.%x.%04x\n", + iter->ssid, iter->devno); + iter->in_range = 1; + return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno); + } + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->ssid, iter->devno + 1)) { + /* Last device in range. */ + iter->in_range = 0; + return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); + } + return 0; +} + +static ssize_t +cio_ignore_write(struct file *file, const char __user *user_buf, + size_t user_len, loff_t *offset) { char *buf; + ssize_t rc, ret, i; + if (*offset) + return -EINVAL; if (user_len > 65536) user_len = 65536; - buf = vmalloc (user_len + 1); /* maybe better use the stack? */ + buf = vzalloc(user_len + 1); /* maybe better use the stack? */ if (buf == NULL) return -ENOMEM; + if (strncpy_from_user (buf, user_buf, user_len) < 0) { - vfree (buf); - return -EFAULT; + rc = -EFAULT; + goto out_free; } - buf[user_len] = '\0'; - blacklist_parse_proc_parameters (buf); + i = user_len - 1; + while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) { + buf[i] = '\0'; + i--; + } + ret = blacklist_parse_proc_parameters(buf); + if (ret) + rc = ret; + else + rc = user_len; +out_free: vfree (buf); - return user_len; + return rc; +} + +static const struct seq_operations cio_ignore_proc_seq_ops = { + .start = cio_ignore_proc_seq_start, + .stop = cio_ignore_proc_seq_stop, + .next = cio_ignore_proc_seq_next, + .show = cio_ignore_proc_seq_show, +}; + +static int +cio_ignore_proc_open(struct inode *inode, struct file *file) +{ + return seq_open_private(file, &cio_ignore_proc_seq_ops, + sizeof(struct ccwdev_iter)); } +static const struct file_operations cio_ignore_proc_fops = { + .open = cio_ignore_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, + .write = cio_ignore_write, +}; + static int cio_ignore_proc_init (void) { struct proc_dir_entry *entry; - entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, - &proc_root); + entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, + &cio_ignore_proc_fops); if (!entry) - return 0; - - entry->read_proc = cio_ignore_read; - entry->write_proc = cio_ignore_write; - - return 1; + return -ENOENT; + return 0; } __initcall (cio_ignore_proc_init); diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h index fb42cafbe57..95e25c1df92 100644 --- a/drivers/s390/cio/blacklist.h +++ b/drivers/s390/cio/blacklist.h @@ -1,6 +1,6 @@ #ifndef S390_BLACKLIST_H #define S390_BLACKLIST_H -extern int is_blacklisted (int devno); +extern int is_blacklisted (int ssid, int devno); #endif diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 91ea8e4777f..e443b0d0b23 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -1,12 +1,10 @@ /* - * drivers/s390/cio/ccwgroup.c * bus driver for ccwgroup - * $Revision: 1.29 $ * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Copyright IBM Corp. 2002, 2012 + * + * Author(s): Arnd Bergmann (arndb@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include <linux/module.h> #include <linux/errno.h> @@ -17,10 +15,14 @@ #include <linux/ctype.h> #include <linux/dcache.h> -#include <asm/semaphore.h> +#include <asm/cio.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> +#include "device.h" + +#define CCW_BUS_ID_SIZE 10 + /* In Linux 2.4, we had a channel device layer called "chandev" * that did all sorts of obscure stuff for networking devices. * This is another driver that serves as a replacement for just @@ -28,96 +30,220 @@ * to devices that use multiple subchannels. */ -/* a device matches a driver if all its slave devices match the same - * entry of the driver */ -static int -ccwgroup_bus_match (struct device * dev, struct device_driver * drv) +static struct bus_type ccwgroup_bus_type; + +static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; + int i; + char str[8]; - gdev = container_of(dev, struct ccwgroup_device, dev); - gdrv = container_of(drv, struct ccwgroup_driver, driver); + for (i = 0; i < gdev->count; i++) { + sprintf(str, "cdev%d", i); + sysfs_remove_link(&gdev->dev.kobj, str); + sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); + } +} - if (gdev->creator_id == gdrv->driver_id) - return 1; +/* + * Remove references from ccw devices to ccw group device and from + * ccw group device to ccw devices. + */ +static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev) +{ + struct ccw_device *cdev; + int i; - return 0; + for (i = 0; i < gdev->count; i++) { + cdev = gdev->cdev[i]; + if (!cdev) + continue; + spin_lock_irq(cdev->ccwlock); + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irq(cdev->ccwlock); + gdev->cdev[i] = NULL; + put_device(&cdev->dev); + } } -static int -ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer, - int buffer_size) + +/** + * ccwgroup_set_online() - enable a ccwgroup device + * @gdev: target ccwgroup device + * + * This function attempts to put the ccwgroup device into the online state. + * Returns: + * %0 on success and a negative error value on failure. + */ +int ccwgroup_set_online(struct ccwgroup_device *gdev) { - /* TODO */ - return 0; + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + int ret = -EINVAL; + + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state == CCWGROUP_ONLINE) + goto out; + if (gdrv->set_online) + ret = gdrv->set_online(gdev); + if (ret) + goto out; + + gdev->state = CCWGROUP_ONLINE; +out: + atomic_set(&gdev->onoff, 0); + return ret; } +EXPORT_SYMBOL(ccwgroup_set_online); -static struct bus_type ccwgroup_bus_type = { - .name = "ccwgroup", - .match = ccwgroup_bus_match, - .hotplug = ccwgroup_hotplug, -}; +/** + * ccwgroup_set_offline() - disable a ccwgroup device + * @gdev: target ccwgroup device + * + * This function attempts to put the ccwgroup device into the offline state. + * Returns: + * %0 on success and a negative error value on failure. + */ +int ccwgroup_set_offline(struct ccwgroup_device *gdev) +{ + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + int ret = -EINVAL; + + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + if (gdrv->set_offline) + ret = gdrv->set_offline(gdev); + if (ret) + goto out; + + gdev->state = CCWGROUP_OFFLINE; +out: + atomic_set(&gdev->onoff, 0); + return ret; +} +EXPORT_SYMBOL(ccwgroup_set_offline); -static inline void -__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) +static ssize_t ccwgroup_online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - int i; - char str[8]; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + unsigned long value; + int ret; - for (i = 0; i < gdev->count; i++) { - sprintf(str, "cdev%d", i); - sysfs_remove_link(&gdev->dev.kobj, str); - sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); + device_lock(dev); + if (!dev->driver) { + ret = -EINVAL; + goto out; } - + + ret = kstrtoul(buf, 0, &value); + if (ret) + goto out; + + if (value == 1) + ret = ccwgroup_set_online(gdev); + else if (value == 0) + ret = ccwgroup_set_offline(gdev); + else + ret = -EINVAL; +out: + device_unlock(dev); + return (ret == 0) ? count : ret; +} + +static ssize_t ccwgroup_online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + int online; + + online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0; + + return scnprintf(buf, PAGE_SIZE, "%d\n", online); } /* * Provide an 'ungroup' attribute so the user can remove group devices no * longer needed or accidentially created. Saves memory :) */ -static ssize_t -ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static void ccwgroup_ungroup(struct ccwgroup_device *gdev) { - struct ccwgroup_device *gdev; - - gdev = to_ccwgroupdev(dev); + mutex_lock(&gdev->reg_mutex); + if (device_is_registered(&gdev->dev)) { + __ccwgroup_remove_symlinks(gdev); + device_unregister(&gdev->dev); + __ccwgroup_remove_cdev_refs(gdev); + } + mutex_unlock(&gdev->reg_mutex); +} - if (gdev->state != CCWGROUP_OFFLINE) - return -EINVAL; +static ssize_t ccwgroup_ungroup_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + int rc = 0; - __ccwgroup_remove_symlinks(gdev); - device_unregister(dev); + /* Prevent concurrent online/offline processing and ungrouping. */ + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state != CCWGROUP_OFFLINE) { + rc = -EINVAL; + goto out; + } + if (device_remove_file_self(dev, attr)) + ccwgroup_ungroup(gdev); + else + rc = -ENODEV; +out: + if (rc) { + /* Release onoff "lock" when ungrouping failed. */ + atomic_set(&gdev->onoff, 0); + return rc; + } return count; } - static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); +static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); + +static struct attribute *ccwgroup_attrs[] = { + &dev_attr_online.attr, + &dev_attr_ungroup.attr, + NULL, +}; +static struct attribute_group ccwgroup_attr_group = { + .attrs = ccwgroup_attrs, +}; +static const struct attribute_group *ccwgroup_attr_groups[] = { + &ccwgroup_attr_group, + NULL, +}; -static void -ccwgroup_release (struct device *dev) +static void ccwgroup_ungroup_workfn(struct work_struct *work) { - struct ccwgroup_device *gdev; - int i; + struct ccwgroup_device *gdev = + container_of(work, struct ccwgroup_device, ungroup_work); - gdev = to_ccwgroupdev(dev); + ccwgroup_ungroup(gdev); + put_device(&gdev->dev); +} - for (i = 0; i < gdev->count; i++) { - gdev->cdev[i]->dev.driver_data = NULL; - put_device(&gdev->cdev[i]->dev); - } - kfree(gdev); +static void ccwgroup_release(struct device *dev) +{ + kfree(to_ccwgroupdev(dev)); } -static inline int -__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) +static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; int i, rc; for (i = 0; i < gdev->count; i++) { - rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, - "group_device"); + rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, + &gdev->dev.kobj, "group_device"); if (rc) { for (--i; i >= 0; i--) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, @@ -127,8 +253,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) } for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); - rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, - str); + rc = sysfs_create_link(&gdev->dev.kobj, + &gdev->cdev[i]->dev.kobj, str); if (rc) { for (--i; i >= 0; i--) { sprintf(str, "cdev%d", i); @@ -143,113 +269,182 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) return 0; } -/* - * try to add a new ccwgroup device for one driver - * argc and argv[] are a list of bus_id's of devices - * belonging to the driver. +static int __get_next_id(const char **buf, struct ccw_dev_id *id) +{ + unsigned int cssid, ssid, devno; + int ret = 0, len; + char *start, *end; + + start = (char *)*buf; + end = strchr(start, ','); + if (!end) { + /* Last entry. Strip trailing newline, if applicable. */ + end = strchr(start, '\n'); + if (end) + *end = '\0'; + len = strlen(start) + 1; + } else { + len = end - start + 1; + end++; + } + if (len <= CCW_BUS_ID_SIZE) { + if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3) + ret = -EINVAL; + } else + ret = -EINVAL; + + if (!ret) { + id->ssid = ssid; + id->devno = devno; + } + *buf = end; + return ret; +} + +/** + * ccwgroup_create_dev() - create and register a ccw group device + * @parent: parent device for the new device + * @gdrv: driver for the new group device + * @num_devices: number of slave devices + * @buf: buffer containing comma separated bus ids of slave devices + * + * Create and register a new ccw group device as a child of @parent. Slave + * devices are obtained from the list of bus ids given in @buf. + * Returns: + * %0 on success and an error code on failure. + * Context: + * non-atomic */ -int -ccwgroup_create(struct device *root, - unsigned int creator_id, - struct ccw_driver *cdrv, - int argc, char *argv[]) +int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, + int num_devices, const char *buf) { struct ccwgroup_device *gdev; - int i; - int rc; - int del_drvdata; - - if (argc > 256) /* disallow dumb users */ - return -EINVAL; + struct ccw_dev_id dev_id; + int rc, i; - gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); + gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), + GFP_KERNEL); if (!gdev) return -ENOMEM; - memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0])); atomic_set(&gdev->onoff, 0); - - del_drvdata = 0; - for (i = 0; i < argc; i++) { - gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); - - /* all devices have to be of the same type in - * order to be grouped */ - if (!gdev->cdev[i] - || gdev->cdev[i]->id.driver_info != + mutex_init(&gdev->reg_mutex); + mutex_lock(&gdev->reg_mutex); + INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn); + gdev->count = num_devices; + gdev->dev.bus = &ccwgroup_bus_type; + gdev->dev.parent = parent; + gdev->dev.release = ccwgroup_release; + device_initialize(&gdev->dev); + + for (i = 0; i < num_devices && buf; i++) { + rc = __get_next_id(&buf, &dev_id); + if (rc != 0) + goto error; + gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id); + /* + * All devices have to be of the same type in + * order to be grouped. + */ + if (!gdev->cdev[i] || !gdev->cdev[i]->drv || + gdev->cdev[i]->drv != gdev->cdev[0]->drv || + gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; - goto free_dev; + goto error; } /* Don't allow a device to belong to more than one group. */ - if (gdev->cdev[i]->dev.driver_data) { + spin_lock_irq(gdev->cdev[i]->ccwlock); + if (dev_get_drvdata(&gdev->cdev[i]->dev)) { + spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; - goto free_dev; + goto error; } + dev_set_drvdata(&gdev->cdev[i]->dev, gdev); + spin_unlock_irq(gdev->cdev[i]->ccwlock); } - for (i = 0; i < argc; i++) - gdev->cdev[i]->dev.driver_data = gdev; - del_drvdata = 1; - - gdev->creator_id = creator_id; - gdev->count = argc; - gdev->dev = (struct device ) { - .bus = &ccwgroup_bus_type, - .parent = root, - .release = ccwgroup_release, - }; - - snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", - gdev->cdev[0]->dev.bus_id); - - rc = device_register(&gdev->dev); - - if (rc) - goto free_dev; - get_device(&gdev->dev); - rc = device_create_file(&gdev->dev, &dev_attr_ungroup); - - if (rc) { - device_unregister(&gdev->dev); + /* Check for sufficient number of bus ids. */ + if (i < num_devices) { + rc = -EINVAL; goto error; } + /* Check for trailing stuff. */ + if (i == num_devices && strlen(buf) > 0) { + rc = -EINVAL; + goto error; + } + + dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); + gdev->dev.groups = ccwgroup_attr_groups; + if (gdrv) { + gdev->dev.driver = &gdrv->driver; + rc = gdrv->setup ? gdrv->setup(gdev) : 0; + if (rc) + goto error; + } + rc = device_add(&gdev->dev); + if (rc) + goto error; rc = __ccwgroup_create_symlinks(gdev); - if (!rc) { - put_device(&gdev->dev); - return 0; + if (rc) { + device_del(&gdev->dev); + goto error; } - device_remove_file(&gdev->dev, &dev_attr_ungroup); - device_unregister(&gdev->dev); + mutex_unlock(&gdev->reg_mutex); + return 0; error: - for (i = 0; i < argc; i++) + for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); + if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) + dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); - gdev->cdev[i]->dev.driver_data = NULL; + gdev->cdev[i] = NULL; } + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return rc; -free_dev: - for (i = 0; i < argc; i++) - if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - if (del_drvdata) - gdev->cdev[i]->dev.driver_data = NULL; - } - kfree(gdev); - return rc; } +EXPORT_SYMBOL(ccwgroup_create_dev); + +static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(data); + + if (action == BUS_NOTIFY_UNBIND_DRIVER) { + get_device(&gdev->dev); + schedule_work(&gdev->ungroup_work); + } + + return NOTIFY_OK; +} + +static struct notifier_block ccwgroup_nb = { + .notifier_call = ccwgroup_notifier +}; -static int __init -init_ccwgroup (void) +static int __init init_ccwgroup(void) { - return bus_register (&ccwgroup_bus_type); + int ret; + + ret = bus_register(&ccwgroup_bus_type); + if (ret) + return ret; + + ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb); + if (ret) + bus_unregister(&ccwgroup_bus_type); + + return ret; } -static void __exit -cleanup_ccwgroup (void) +static void __exit cleanup_ccwgroup(void) { - bus_unregister (&ccwgroup_bus_type); + bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb); + bus_unregister(&ccwgroup_bus_type); } module_init(init_ccwgroup); @@ -257,214 +452,190 @@ module_exit(cleanup_ccwgroup); /************************** driver stuff ******************************/ -static int -ccwgroup_set_online(struct ccwgroup_device *gdev) +static int ccwgroup_remove(struct device *dev) { - struct ccwgroup_driver *gdrv; - int ret; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) - return -EAGAIN; - if (gdev->state == CCWGROUP_ONLINE) { - ret = 0; - goto out; - } - if (!gdev->dev.driver) { - ret = -EINVAL; - goto out; - } - gdrv = to_ccwgroupdrv (gdev->dev.driver); - if ((ret = gdrv->set_online(gdev))) - goto out; + if (!dev->driver) + return 0; + if (gdrv->remove) + gdrv->remove(gdev); - gdev->state = CCWGROUP_ONLINE; - out: - atomic_set(&gdev->onoff, 0); - return ret; + return 0; } -static int -ccwgroup_set_offline(struct ccwgroup_device *gdev) +static void ccwgroup_shutdown(struct device *dev) { - struct ccwgroup_driver *gdrv; - int ret; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) - return -EAGAIN; - if (gdev->state == CCWGROUP_OFFLINE) { - ret = 0; - goto out; - } - if (!gdev->dev.driver) { - ret = -EINVAL; - goto out; - } - gdrv = to_ccwgroupdrv (gdev->dev.driver); - if ((ret = gdrv->set_offline(gdev))) - goto out; - - gdev->state = CCWGROUP_OFFLINE; - out: - atomic_set(&gdev->onoff, 0); - return ret; + if (!dev->driver) + return; + if (gdrv->shutdown) + gdrv->shutdown(gdev); } -static ssize_t -ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static int ccwgroup_pm_prepare(struct device *dev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; - unsigned int value; - int ret; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); - gdev = to_ccwgroupdev(dev); - if (!dev->driver) - return count; + /* Fail while device is being set online/offline. */ + if (atomic_read(&gdev->onoff)) + return -EAGAIN; - gdrv = to_ccwgroupdrv (gdev->dev.driver); - if (!try_module_get(gdrv->owner)) - return -EINVAL; + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; - value = simple_strtoul(buf, 0, 0); - ret = count; - if (value == 1) - ccwgroup_set_online(gdev); - else if (value == 0) - ccwgroup_set_offline(gdev); - else - ret = -EINVAL; - module_put(gdrv->owner); - return ret; + return gdrv->prepare ? gdrv->prepare(gdev) : 0; } -static ssize_t -ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf) +static void ccwgroup_pm_complete(struct device *dev) { - int online; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); - online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE); + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return; - return sprintf(buf, online ? "1\n" : "0\n"); + if (gdrv->complete) + gdrv->complete(gdev); } -static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); - -static int -ccwgroup_probe (struct device *dev) +static int ccwgroup_pm_freeze(struct device *dev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); - int ret; + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); + return gdrv->freeze ? gdrv->freeze(gdev) : 0; +} - if ((ret = device_create_file(dev, &dev_attr_online))) - return ret; +static int ccwgroup_pm_thaw(struct device *dev) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); - pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id); - ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV; - if (ret) - device_remove_file(dev, &dev_attr_online); + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; - return ret; + return gdrv->thaw ? gdrv->thaw(gdev) : 0; } -static int -ccwgroup_remove (struct device *dev) +static int ccwgroup_pm_restore(struct device *dev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); + if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) + return 0; - pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id); + return gdrv->restore ? gdrv->restore(gdev) : 0; +} - device_remove_file(dev, &dev_attr_online); +static const struct dev_pm_ops ccwgroup_pm_ops = { + .prepare = ccwgroup_pm_prepare, + .complete = ccwgroup_pm_complete, + .freeze = ccwgroup_pm_freeze, + .thaw = ccwgroup_pm_thaw, + .restore = ccwgroup_pm_restore, +}; - if (gdrv && gdrv->remove) - gdrv->remove(gdev); - return 0; -} +static struct bus_type ccwgroup_bus_type = { + .name = "ccwgroup", + .remove = ccwgroup_remove, + .shutdown = ccwgroup_shutdown, + .pm = &ccwgroup_pm_ops, +}; -int -ccwgroup_driver_register (struct ccwgroup_driver *cdriver) +/** + * ccwgroup_driver_register() - register a ccw group driver + * @cdriver: driver to be registered + * + * This function is mainly a wrapper around driver_register(). + */ +int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) { /* register our new driver with the core */ - cdriver->driver = (struct device_driver) { - .bus = &ccwgroup_bus_type, - .name = cdriver->name, - .probe = ccwgroup_probe, - .remove = ccwgroup_remove, - }; + cdriver->driver.bus = &ccwgroup_bus_type; return driver_register(&cdriver->driver); } +EXPORT_SYMBOL(ccwgroup_driver_register); -static int -__ccwgroup_driver_unregister_device(struct device *dev, void *data) +static int __ccwgroup_match_all(struct device *dev, void *data) { - __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); - device_unregister(dev); - put_device(dev); - return 0; + return 1; } -void -ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) +/** + * ccwgroup_driver_unregister() - deregister a ccw group driver + * @cdriver: driver to be deregistered + * + * This function is mainly a wrapper around driver_unregister(). + */ +void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) { + struct device *dev; + /* We don't want ccwgroup devices to live longer than their driver. */ - get_driver(&cdriver->driver); - driver_for_each_device(&cdriver->driver, NULL, NULL, - __ccwgroup_driver_unregister_device); - put_driver(&cdriver->driver); + while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, + __ccwgroup_match_all))) { + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + + ccwgroup_ungroup(gdev); + put_device(dev); + } driver_unregister(&cdriver->driver); } +EXPORT_SYMBOL(ccwgroup_driver_unregister); -int -ccwgroup_probe_ccwdev(struct ccw_device *cdev) +/** + * ccwgroup_probe_ccwdev() - probe function for slave devices + * @cdev: ccw device to be probed + * + * This is a dummy probe function for ccw devices that are slave devices in + * a ccw group device. + * Returns: + * always %0 + */ +int ccwgroup_probe_ccwdev(struct ccw_device *cdev) { return 0; } +EXPORT_SYMBOL(ccwgroup_probe_ccwdev); -static inline struct ccwgroup_device * -__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) -{ - struct ccwgroup_device *gdev; - - if (cdev->dev.driver_data) { - gdev = (struct ccwgroup_device *)cdev->dev.driver_data; - if (get_device(&gdev->dev)) { - if (klist_node_attached(&gdev->dev.knode_bus)) - return gdev; - put_device(&gdev->dev); - } - return NULL; - } - return NULL; -} - -void -ccwgroup_remove_ccwdev(struct ccw_device *cdev) +/** + * ccwgroup_remove_ccwdev() - remove function for slave devices + * @cdev: ccw device to be removed + * + * This is a remove function for ccw devices that are slave devices in a ccw + * group device. It sets the ccw device offline and also deregisters the + * embedding ccw group device. + */ +void ccwgroup_remove_ccwdev(struct ccw_device *cdev) { struct ccwgroup_device *gdev; /* Ignore offlining errors, device is gone anyway. */ ccw_device_set_offline(cdev); /* If one of its devices is gone, the whole group is done for. */ - gdev = __ccwgroup_get_gdev_by_cdev(cdev); - if (gdev) { - __ccwgroup_remove_symlinks(gdev); - device_unregister(&gdev->dev); - put_device(&gdev->dev); + spin_lock_irq(cdev->ccwlock); + gdev = dev_get_drvdata(&cdev->dev); + if (!gdev) { + spin_unlock_irq(cdev->ccwlock); + return; } + /* Get ccwgroup device reference for local processing. */ + get_device(&gdev->dev); + spin_unlock_irq(cdev->ccwlock); + /* Unregister group device. */ + ccwgroup_ungroup(gdev); + /* Release ccwgroup device reference for local processing. */ + put_device(&gdev->dev); } - -MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(ccwgroup_driver_register); -EXPORT_SYMBOL(ccwgroup_driver_unregister); -EXPORT_SYMBOL(ccwgroup_create); -EXPORT_SYMBOL(ccwgroup_probe_ccwdev); EXPORT_SYMBOL(ccwgroup_remove_ccwdev); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c new file mode 100644 index 00000000000..07676c22d51 --- /dev/null +++ b/drivers/s390/cio/ccwreq.c @@ -0,0 +1,367 @@ +/* + * Handling of internal CCW device requests. + * + * Copyright IBM Corp. 2009, 2011 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/types.h> +#include <linux/err.h> +#include <asm/ccwdev.h> +#include <asm/cio.h> + +#include "io_sch.h" +#include "cio.h" +#include "device.h" +#include "cio_debug.h" + +/** + * lpm_adjust - adjust path mask + * @lpm: path mask to adjust + * @mask: mask of available paths + * + * Shift @lpm right until @lpm and @mask have at least one bit in common or + * until @lpm is zero. Return the resulting lpm. + */ +int lpm_adjust(int lpm, int mask) +{ + while (lpm && ((lpm & mask) == 0)) + lpm >>= 1; + return lpm; +} + +/* + * Adjust path mask to use next path and reset retry count. Return resulting + * path mask. + */ +static u16 ccwreq_next_path(struct ccw_device *cdev) +{ + struct ccw_request *req = &cdev->private->req; + + if (!req->singlepath) { + req->mask = 0; + goto out; + } + req->retries = req->maxretries; + req->mask = lpm_adjust(req->mask >> 1, req->lpm); +out: + return req->mask; +} + +/* + * Clean up device state and report to callback. + */ +static void ccwreq_stop(struct ccw_device *cdev, int rc) +{ + struct ccw_request *req = &cdev->private->req; + + if (req->done) + return; + req->done = 1; + ccw_device_set_timeout(cdev, 0); + memset(&cdev->private->irb, 0, sizeof(struct irb)); + if (rc && rc != -ENODEV && req->drc) + rc = req->drc; + req->callback(cdev, req->data, rc); +} + +/* + * (Re-)Start the operation until retries and paths are exhausted. + */ +static void ccwreq_do(struct ccw_device *cdev) +{ + struct ccw_request *req = &cdev->private->req; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw1 *cp = req->cp; + int rc = -EACCES; + + while (req->mask) { + if (req->retries-- == 0) { + /* Retries exhausted, try next path. */ + ccwreq_next_path(cdev); + continue; + } + /* Perform start function. */ + memset(&cdev->private->irb, 0, sizeof(struct irb)); + rc = cio_start(sch, cp, (u8) req->mask); + if (rc == 0) { + /* I/O started successfully. */ + ccw_device_set_timeout(cdev, req->timeout); + return; + } + if (rc == -ENODEV) { + /* Permanent device error. */ + break; + } + if (rc == -EACCES) { + /* Permant path error. */ + ccwreq_next_path(cdev); + continue; + } + /* Temporary improper status. */ + rc = cio_clear(sch); + if (rc) + break; + return; + } + ccwreq_stop(cdev, rc); +} + +/** + * ccw_request_start - perform I/O request + * @cdev: ccw device + * + * Perform the I/O request specified by cdev->req. + */ +void ccw_request_start(struct ccw_device *cdev) +{ + struct ccw_request *req = &cdev->private->req; + + if (req->singlepath) { + /* Try all paths twice to counter link flapping. */ + req->mask = 0x8080; + } else + req->mask = req->lpm; + + req->retries = req->maxretries; + req->mask = lpm_adjust(req->mask, req->lpm); + req->drc = 0; + req->done = 0; + req->cancel = 0; + if (!req->mask) + goto out_nopath; + ccwreq_do(cdev); + return; + +out_nopath: + ccwreq_stop(cdev, -EACCES); +} + +/** + * ccw_request_cancel - cancel running I/O request + * @cdev: ccw device + * + * Cancel the I/O request specified by cdev->req. Return non-zero if request + * has already finished, zero otherwise. + */ +int ccw_request_cancel(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + int rc; + + if (req->done) + return 1; + req->cancel = 1; + rc = cio_clear(sch); + if (rc) + ccwreq_stop(cdev, rc); + return 0; +} + +/* + * Return the status of the internal I/O started on the specified ccw device. + * Perform BASIC SENSE if required. + */ +static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) +{ + struct irb *irb = &cdev->private->irb; + struct cmd_scsw *scsw = &irb->scsw.cmd; + enum uc_todo todo; + + /* Perform BASIC SENSE if needed. */ + if (ccw_device_accumulate_and_sense(cdev, lcirb)) + return IO_RUNNING; + /* Check for halt/clear interrupt. */ + if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) + return IO_KILLED; + /* Check for path error. */ + if (scsw->cc == 3 || scsw->pno) + return IO_PATH_ERROR; + /* Handle BASIC SENSE data. */ + if (irb->esw.esw0.erw.cons) { + CIO_TRACE_EVENT(2, "sensedata"); + CIO_HEX_EVENT(2, &cdev->private->dev_id, + sizeof(struct ccw_dev_id)); + CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); + /* Check for command reject. */ + if (irb->ecw[0] & SNS0_CMD_REJECT) + return IO_REJECTED; + /* Ask the driver what to do */ + if (cdev->drv && cdev->drv->uc_handler) { + todo = cdev->drv->uc_handler(cdev, lcirb); + CIO_TRACE_EVENT(2, "uc_response"); + CIO_HEX_EVENT(2, &todo, sizeof(todo)); + switch (todo) { + case UC_TODO_RETRY: + return IO_STATUS_ERROR; + case UC_TODO_RETRY_ON_NEW_PATH: + return IO_PATH_ERROR; + case UC_TODO_STOP: + return IO_REJECTED; + default: + return IO_STATUS_ERROR; + } + } + /* Assume that unexpected SENSE data implies an error. */ + return IO_STATUS_ERROR; + } + /* Check for channel errors. */ + if (scsw->cstat != 0) + return IO_STATUS_ERROR; + /* Check for device errors. */ + if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + return IO_STATUS_ERROR; + /* Check for final state. */ + if (!(scsw->dstat & DEV_STAT_DEV_END)) + return IO_RUNNING; + /* Check for other improper status. */ + if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS)) + return IO_STATUS_ERROR; + return IO_DONE; +} + +/* + * Log ccw request status. + */ +static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) +{ + struct ccw_request *req = &cdev->private->req; + struct { + struct ccw_dev_id dev_id; + u16 retries; + u8 lpm; + u8 status; + } __attribute__ ((packed)) data; + data.dev_id = cdev->private->dev_id; + data.retries = req->retries; + data.lpm = (u8) req->mask; + data.status = (u8) status; + CIO_TRACE_EVENT(2, "reqstat"); + CIO_HEX_EVENT(2, &data, sizeof(data)); +} + +/** + * ccw_request_handler - interrupt handler for I/O request procedure. + * @cdev: ccw device + * + * Handle interrupt during I/O request procedure. + */ +void ccw_request_handler(struct ccw_device *cdev) +{ + struct irb *irb = &__get_cpu_var(cio_irb); + struct ccw_request *req = &cdev->private->req; + enum io_status status; + int rc = -EOPNOTSUPP; + + /* Check status of I/O request. */ + status = ccwreq_status(cdev, irb); + if (req->filter) + status = req->filter(cdev, req->data, irb, status); + if (status != IO_RUNNING) + ccw_device_set_timeout(cdev, 0); + if (status != IO_DONE && status != IO_RUNNING) + ccwreq_log_status(cdev, status); + switch (status) { + case IO_DONE: + break; + case IO_RUNNING: + return; + case IO_REJECTED: + goto err; + case IO_PATH_ERROR: + goto out_next_path; + case IO_STATUS_ERROR: + goto out_restart; + case IO_KILLED: + /* Check if request was cancelled on purpose. */ + if (req->cancel) { + rc = -EIO; + goto err; + } + goto out_restart; + } + /* Check back with request initiator. */ + if (!req->check) + goto out; + switch (req->check(cdev, req->data)) { + case 0: + break; + case -EAGAIN: + goto out_restart; + case -EACCES: + goto out_next_path; + default: + goto err; + } +out: + ccwreq_stop(cdev, 0); + return; + +out_next_path: + /* Try next path and restart I/O. */ + if (!ccwreq_next_path(cdev)) { + rc = -EACCES; + goto err; + } +out_restart: + /* Restart. */ + ccwreq_do(cdev); + return; +err: + ccwreq_stop(cdev, rc); +} + + +/** + * ccw_request_timeout - timeout handler for I/O request procedure + * @cdev: ccw device + * + * Handle timeout during I/O request procedure. + */ +void ccw_request_timeout(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + int rc = -ENODEV, chp; + + if (cio_update_schib(sch)) + goto err; + + for (chp = 0; chp < 8; chp++) { + if ((0x80 >> chp) & sch->schib.pmcw.lpum) + pr_warning("%s: No interrupt was received within %lus " + "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n", + dev_name(&cdev->dev), req->timeout / HZ, + scsw_cstat(&sch->schib.scsw), + scsw_dstat(&sch->schib.scsw), + sch->schid.cssid, + sch->schib.pmcw.chpid[chp]); + } + + if (!ccwreq_next_path(cdev)) { + /* set the final return code for this request */ + req->drc = -ETIME; + } + rc = cio_clear(sch); + if (rc) + goto err; + return; + +err: + ccwreq_stop(cdev, rc); +} + +/** + * ccw_request_notoper - notoper handler for I/O request procedure + * @cdev: ccw device + * + * Handle notoper during I/O request procedure. + */ +void ccw_request_notoper(struct ccw_device *cdev) +{ + ccwreq_stop(cdev, -ENODEV); +} diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c new file mode 100644 index 00000000000..d497aa05a72 --- /dev/null +++ b/drivers/s390/cio/chp.c @@ -0,0 +1,792 @@ +/* + * Copyright IBM Corp. 1999, 2010 + * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) + * Arnd Bergmann (arndb@de.ibm.com) + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/bug.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <asm/chpid.h> +#include <asm/sclp.h> +#include <asm/crw.h> + +#include "cio.h" +#include "css.h" +#include "ioasm.h" +#include "cio_debug.h" +#include "chp.h" + +#define to_channelpath(device) container_of(device, struct channel_path, dev) +#define CHP_INFO_UPDATE_INTERVAL 1*HZ + +enum cfg_task_t { + cfg_none, + cfg_configure, + cfg_deconfigure +}; + +/* Map for pending configure tasks. */ +static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; +static DEFINE_MUTEX(cfg_lock); +static int cfg_busy; + +/* Map for channel-path status. */ +static struct sclp_chp_info chp_info; +static DEFINE_MUTEX(info_lock); + +/* Time after which channel-path status may be outdated. */ +static unsigned long chp_info_expires; + +/* Workqueue to perform pending configure tasks. */ +static struct workqueue_struct *chp_wq; +static struct work_struct cfg_work; + +/* Wait queue for configure completion events. */ +static wait_queue_head_t cfg_wait_queue; + +/* Set vary state for given chpid. */ +static void set_chp_logically_online(struct chp_id chpid, int onoff) +{ + chpid_to_chp(chpid)->state = onoff; +} + +/* On success return 0 if channel-path is varied offline, 1 if it is varied + * online. Return -ENODEV if channel-path is not registered. */ +int chp_get_status(struct chp_id chpid) +{ + return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV); +} + +/** + * chp_get_sch_opm - return opm for subchannel + * @sch: subchannel + * + * Calculate and return the operational path mask (opm) based on the chpids + * used by the subchannel and the status of the associated channel-paths. + */ +u8 chp_get_sch_opm(struct subchannel *sch) +{ + struct chp_id chpid; + int opm; + int i; + + opm = 0; + chp_id_init(&chpid); + for (i = 0; i < 8; i++) { + opm <<= 1; + chpid.id = sch->schib.pmcw.chpid[i]; + if (chp_get_status(chpid) != 0) + opm |= 1; + } + return opm; +} +EXPORT_SYMBOL_GPL(chp_get_sch_opm); + +/** + * chp_is_registered - check if a channel-path is registered + * @chpid: channel-path ID + * + * Return non-zero if a channel-path with the given chpid is registered, + * zero otherwise. + */ +int chp_is_registered(struct chp_id chpid) +{ + return chpid_to_chp(chpid) != NULL; +} + +/* + * Function: s390_vary_chpid + * Varies the specified chpid online or offline + */ +static int s390_vary_chpid(struct chp_id chpid, int on) +{ + char dbf_text[15]; + int status; + + sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, + chpid.id); + CIO_TRACE_EVENT(2, dbf_text); + + status = chp_get_status(chpid); + if (!on && !status) + return 0; + + set_chp_logically_online(chpid, on); + chsc_chp_vary(chpid, on); + return 0; +} + +/* + * Channel measurement related functions + */ +static ssize_t chp_measurement_chars_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct channel_path *chp; + struct device *device; + + device = container_of(kobj, struct device, kobj); + chp = to_channelpath(device); + if (!chp->cmg_chars) + return 0; + + return memory_read_from_buffer(buf, count, &off, + chp->cmg_chars, sizeof(struct cmg_chars)); +} + +static struct bin_attribute chp_measurement_chars_attr = { + .attr = { + .name = "measurement_chars", + .mode = S_IRUSR, + }, + .size = sizeof(struct cmg_chars), + .read = chp_measurement_chars_read, +}; + +static void chp_measurement_copy_block(struct cmg_entry *buf, + struct channel_subsystem *css, + struct chp_id chpid) +{ + void *area; + struct cmg_entry *entry, reference_buf; + int idx; + + if (chpid.id < 128) { + area = css->cub_addr1; + idx = chpid.id; + } else { + area = css->cub_addr2; + idx = chpid.id - 128; + } + entry = area + (idx * sizeof(struct cmg_entry)); + do { + memcpy(buf, entry, sizeof(*entry)); + memcpy(&reference_buf, entry, sizeof(*entry)); + } while (reference_buf.values[0] != buf->values[0]); +} + +static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct channel_path *chp; + struct channel_subsystem *css; + struct device *device; + unsigned int size; + + device = container_of(kobj, struct device, kobj); + chp = to_channelpath(device); + css = to_css(chp->dev.parent); + + size = sizeof(struct cmg_entry); + + /* Only allow single reads. */ + if (off || count < size) + return 0; + chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); + count = size; + return count; +} + +static struct bin_attribute chp_measurement_attr = { + .attr = { + .name = "measurement", + .mode = S_IRUSR, + }, + .size = sizeof(struct cmg_entry), + .read = chp_measurement_read, +}; + +void chp_remove_cmg_attr(struct channel_path *chp) +{ + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_attr); +} + +int chp_add_cmg_attr(struct channel_path *chp) +{ + int ret; + + ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); + if (ret) + return ret; + ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); + if (ret) + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + return ret; +} + +/* + * Files for the channel path entries. + */ +static ssize_t chp_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + int status; + + mutex_lock(&chp->lock); + status = chp->state; + mutex_unlock(&chp->lock); + + return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n"); +} + +static ssize_t chp_status_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_path *cp = to_channelpath(dev); + char cmd[10]; + int num_args; + int error; + + num_args = sscanf(buf, "%5s", cmd); + if (!num_args) + return count; + + if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) { + mutex_lock(&cp->lock); + error = s390_vary_chpid(cp->chpid, 1); + mutex_unlock(&cp->lock); + } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) { + mutex_lock(&cp->lock); + error = s390_vary_chpid(cp->chpid, 0); + mutex_unlock(&cp->lock); + } else + error = -EINVAL; + + return error < 0 ? error : count; +} + +static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); + +static ssize_t chp_configure_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *cp; + int status; + + cp = to_channelpath(dev); + status = chp_info_get_status(cp->chpid); + if (status < 0) + return status; + + return snprintf(buf, PAGE_SIZE, "%d\n", status); +} + +static int cfg_wait_idle(void); + +static ssize_t chp_configure_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_path *cp; + int val; + char delim; + + if (sscanf(buf, "%d %c", &val, &delim) != 1) + return -EINVAL; + if (val != 0 && val != 1) + return -EINVAL; + cp = to_channelpath(dev); + chp_cfg_schedule(cp->chpid, val); + cfg_wait_idle(); + + return count; +} + +static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); + +static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + u8 type; + + mutex_lock(&chp->lock); + type = chp->desc.desc; + mutex_unlock(&chp->lock); + return sprintf(buf, "%x\n", type); +} + +static DEVICE_ATTR(type, 0444, chp_type_show, NULL); + +static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->cmg == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->cmg); +} + +static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); + +static ssize_t chp_shared_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->shared == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->shared); +} + +static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); + +static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); + +static ssize_t chp_chid_external_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); + +static struct attribute *chp_attrs[] = { + &dev_attr_status.attr, + &dev_attr_configure.attr, + &dev_attr_type.attr, + &dev_attr_cmg.attr, + &dev_attr_shared.attr, + &dev_attr_chid.attr, + &dev_attr_chid_external.attr, + NULL, +}; +static struct attribute_group chp_attr_group = { + .attrs = chp_attrs, +}; +static const struct attribute_group *chp_attr_groups[] = { + &chp_attr_group, + NULL, +}; + +static void chp_release(struct device *dev) +{ + struct channel_path *cp; + + cp = to_channelpath(dev); + kfree(cp); +} + +/** + * chp_update_desc - update channel-path description + * @chp - channel-path + * + * Update the channel-path description of the specified channel-path. + * Return zero on success, non-zero otherwise. + */ +int chp_update_desc(struct channel_path *chp) +{ + int rc; + + rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); + if (rc) + return rc; + + rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + + return rc; +} + +/** + * chp_new - register a new channel-path + * @chpid - channel-path ID + * + * Create and register data structure representing new channel-path. Return + * zero on success, non-zero otherwise. + */ +int chp_new(struct chp_id chpid) +{ + struct channel_path *chp; + int ret; + + if (chp_is_registered(chpid)) + return 0; + chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); + if (!chp) + return -ENOMEM; + + /* fill in status, etc. */ + chp->chpid = chpid; + chp->state = 1; + chp->dev.parent = &channel_subsystems[chpid.cssid]->device; + chp->dev.groups = chp_attr_groups; + chp->dev.release = chp_release; + mutex_init(&chp->lock); + + /* Obtain channel path description and fill it in. */ + ret = chp_update_desc(chp); + if (ret) + goto out_free; + if ((chp->desc.flags & 0x80) == 0) { + ret = -ENODEV; + goto out_free; + } + /* Get channel-measurement characteristics. */ + if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { + ret = chsc_get_channel_measurement_chars(chp); + if (ret) + goto out_free; + } else { + chp->cmg = -1; + } + dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); + + /* make it known to the system */ + ret = device_register(&chp->dev); + if (ret) { + CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", + chpid.cssid, chpid.id, ret); + put_device(&chp->dev); + goto out; + } + mutex_lock(&channel_subsystems[chpid.cssid]->mutex); + if (channel_subsystems[chpid.cssid]->cm_enabled) { + ret = chp_add_cmg_attr(chp); + if (ret) { + device_unregister(&chp->dev); + mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); + goto out; + } + } + channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; + mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); + goto out; +out_free: + kfree(chp); +out: + return ret; +} + +/** + * chp_get_chp_desc - return newly allocated channel-path description + * @chpid: channel-path ID + * + * On success return a newly allocated copy of the channel-path description + * data associated with the given channel-path ID. Return %NULL on error. + */ +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) +{ + struct channel_path *chp; + struct channel_path_desc *desc; + + chp = chpid_to_chp(chpid); + if (!chp) + return NULL; + desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); + if (!desc) + return NULL; + + mutex_lock(&chp->lock); + memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); + mutex_unlock(&chp->lock); + return desc; +} + +/** + * chp_process_crw - process channel-path status change + * @crw0: channel report-word to handler + * @crw1: second channel-report word (always NULL) + * @overflow: crw overflow indication + * + * Handle channel-report-words indicating that the status of a channel-path + * has changed. + */ +static void chp_process_crw(struct crw *crw0, struct crw *crw1, + int overflow) +{ + struct chp_id chpid; + + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + /* + * Check for solicited machine checks. These are + * created by reset channel path and need not be + * handled here. + */ + if (crw0->slct) { + CIO_CRW_EVENT(2, "solicited machine check for " + "channel path %02X\n", crw0->rsid); + return; + } + chp_id_init(&chpid); + chpid.id = crw0->rsid; + switch (crw0->erc) { + case CRW_ERC_IPARM: /* Path has come. */ + if (!chp_is_registered(chpid)) + chp_new(chpid); + chsc_chp_online(chpid); + break; + case CRW_ERC_PERRI: /* Path has gone. */ + case CRW_ERC_PERRN: + chsc_chp_offline(chpid); + break; + default: + CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", + crw0->erc); + } +} + +int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link) +{ + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (!(ssd->path_mask & mask)) + continue; + if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) + continue; + if ((ssd->fla_valid_mask & mask) && + ((ssd->fla[i] & link->fla_mask) != link->fla)) + continue; + return mask; + } + return 0; +} +EXPORT_SYMBOL_GPL(chp_ssd_get_mask); + +static inline int info_bit_num(struct chp_id id) +{ + return id.id + id.cssid * (__MAX_CHPID + 1); +} + +/* Force chp_info refresh on next call to info_validate(). */ +static void info_expire(void) +{ + mutex_lock(&info_lock); + chp_info_expires = jiffies - 1; + mutex_unlock(&info_lock); +} + +/* Ensure that chp_info is up-to-date. */ +static int info_update(void) +{ + int rc; + + mutex_lock(&info_lock); + rc = 0; + if (time_after(jiffies, chp_info_expires)) { + /* Data is too old, update. */ + rc = sclp_chp_read_info(&chp_info); + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + } + mutex_unlock(&info_lock); + + return rc; +} + +/** + * chp_info_get_status - retrieve configure status of a channel-path + * @chpid: channel-path ID + * + * On success, return 0 for standby, 1 for configured, 2 for reserved, + * 3 for not recognized. Return negative error code on error. + */ +int chp_info_get_status(struct chp_id chpid) +{ + int rc; + int bit; + + rc = info_update(); + if (rc) + return rc; + + bit = info_bit_num(chpid); + mutex_lock(&info_lock); + if (!chp_test_bit(chp_info.recognized, bit)) + rc = CHP_STATUS_NOT_RECOGNIZED; + else if (chp_test_bit(chp_info.configured, bit)) + rc = CHP_STATUS_CONFIGURED; + else if (chp_test_bit(chp_info.standby, bit)) + rc = CHP_STATUS_STANDBY; + else + rc = CHP_STATUS_RESERVED; + mutex_unlock(&info_lock); + + return rc; +} + +/* Return configure task for chpid. */ +static enum cfg_task_t cfg_get_task(struct chp_id chpid) +{ + return chp_cfg_task[chpid.cssid][chpid.id]; +} + +/* Set configure task for chpid. */ +static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) +{ + chp_cfg_task[chpid.cssid][chpid.id] = cfg; +} + +/* Perform one configure/deconfigure request. Reschedule work function until + * last request. */ +static void cfg_func(struct work_struct *work) +{ + struct chp_id chpid; + enum cfg_task_t t; + int rc; + + mutex_lock(&cfg_lock); + t = cfg_none; + chp_id_for_each(&chpid) { + t = cfg_get_task(chpid); + if (t != cfg_none) { + cfg_set_task(chpid, cfg_none); + break; + } + } + mutex_unlock(&cfg_lock); + + switch (t) { + case cfg_configure: + rc = sclp_chp_configure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_online(chpid); + } + break; + case cfg_deconfigure: + rc = sclp_chp_deconfigure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_offline(chpid); + } + break; + case cfg_none: + /* Get updated information after last change. */ + info_update(); + mutex_lock(&cfg_lock); + cfg_busy = 0; + mutex_unlock(&cfg_lock); + wake_up_interruptible(&cfg_wait_queue); + return; + } + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_schedule - schedule chpid configuration request + * @chpid - channel-path ID + * @configure - Non-zero for configure, zero for deconfigure + * + * Schedule a channel-path configuration/deconfiguration request. + */ +void chp_cfg_schedule(struct chp_id chpid, int configure) +{ + CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, + configure); + mutex_lock(&cfg_lock); + cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); + cfg_busy = 1; + mutex_unlock(&cfg_lock); + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request + * @chpid - channel-path ID + * + * Cancel an active channel-path deconfiguration request if it has not yet + * been performed. + */ +void chp_cfg_cancel_deconfigure(struct chp_id chpid) +{ + CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); + mutex_lock(&cfg_lock); + if (cfg_get_task(chpid) == cfg_deconfigure) + cfg_set_task(chpid, cfg_none); + mutex_unlock(&cfg_lock); +} + +static int cfg_wait_idle(void) +{ + if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) + return -ERESTARTSYS; + return 0; +} + +static int __init chp_init(void) +{ + struct chp_id chpid; + int ret; + + ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); + if (ret) + return ret; + chp_wq = create_singlethread_workqueue("cio_chp"); + if (!chp_wq) { + crw_unregister_handler(CRW_RSC_CPATH); + return -ENOMEM; + } + INIT_WORK(&cfg_work, cfg_func); + init_waitqueue_head(&cfg_wait_queue); + if (info_update()) + return 0; + /* Register available channel-paths. */ + chp_id_for_each(&chpid) { + if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) + chp_new(chpid); + } + + return 0; +} + +subsys_initcall(chp_init); diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h new file mode 100644 index 00000000000..4efd5b867cc --- /dev/null +++ b/drivers/s390/cio/chp.h @@ -0,0 +1,72 @@ +/* + * Copyright IBM Corp. 2007, 2010 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef S390_CHP_H +#define S390_CHP_H S390_CHP_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <asm/chpid.h> +#include "chsc.h" +#include "css.h" + +#define CHP_STATUS_STANDBY 0 +#define CHP_STATUS_CONFIGURED 1 +#define CHP_STATUS_RESERVED 2 +#define CHP_STATUS_NOT_RECOGNIZED 3 + +#define CHP_ONLINE 0 +#define CHP_OFFLINE 1 +#define CHP_VARY_ON 2 +#define CHP_VARY_OFF 3 + +struct chp_link { + struct chp_id chpid; + u32 fla_mask; + u16 fla; +}; + +static inline int chp_test_bit(u8 *bitmap, int num) +{ + int byte = num >> 3; + int mask = 128 >> (num & 7); + + return (bitmap[byte] & mask) ? 1 : 0; +} + + +struct channel_path { + struct device dev; + struct chp_id chpid; + struct mutex lock; /* Serialize access to below members. */ + int state; + struct channel_path_desc desc; + struct channel_path_desc_fmt1 desc_fmt1; + /* Channel-measurement related stuff: */ + int cmg; + int shared; + void *cmg_chars; +}; + +/* Return channel_path struct for given chpid. */ +static inline struct channel_path *chpid_to_chp(struct chp_id chpid) +{ + return channel_subsystems[chpid.cssid]->chps[chpid.id]; +} + +int chp_get_status(struct chp_id chpid); +u8 chp_get_sch_opm(struct subchannel *sch); +int chp_is_registered(struct chp_id chpid); +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); +void chp_remove_cmg_attr(struct channel_path *chp); +int chp_add_cmg_attr(struct channel_path *chp); +int chp_update_desc(struct channel_path *chp); +int chp_new(struct chp_id chpid); +void chp_cfg_schedule(struct chp_id chpid, int configure); +void chp_cfg_cancel_deconfigure(struct chp_id chpid); +int chp_info_get_status(struct chp_id chpid); +int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *); +#endif /* S390_CHP_H */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index fa3c23b80e3..e3bf885f4a6 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1,363 +1,266 @@ /* - * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call - * $Revision: 1.120 $ * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999,2012 * Author(s): Ingo Adlung (adlung@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> -#include <linux/config.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/device.h> +#include <linux/pci.h> #include <asm/cio.h> +#include <asm/chpid.h> +#include <asm/chsc.h> +#include <asm/crw.h> +#include <asm/isc.h> #include "css.h" #include "cio.h" #include "cio_debug.h" #include "ioasm.h" +#include "chp.h" #include "chsc.h" -static struct channel_path *chps[NR_CHPIDS]; - static void *sei_page; +static void *chsc_page; +static DEFINE_SPINLOCK(chsc_page_lock); -static int new_channel_path(int chpid); - -static inline void -set_chp_logically_online(int chp, int onoff) -{ - chps[chp]->state = onoff; -} - -static int -get_chp_status(int chp) -{ - return (chps[chp] ? chps[chp]->state : -ENODEV); -} - -void -chsc_validate_chpids(struct subchannel *sch) -{ - int mask, chp; - - for (chp = 0; chp <= 7; chp++) { - mask = 0x80 >> chp; - if (!get_chp_status(sch->schib.pmcw.chpid[chp])) - /* disable using this path */ - sch->opm &= ~mask; - } -} - -void -chpid_is_actually_online(int chp) -{ - int state; - - state = get_chp_status(chp); - if (state < 0) { - need_rescan = 1; - queue_work(slow_path_wq, &slow_path_work); - } else - WARN_ON(!state); -} - -/* FIXME: this is _always_ called for every subchannel. shouldn't we - * process more than one at a time? */ -static int -chsc_get_sch_desc_irq(struct subchannel *sch, void *page) +/** + * chsc_error_from_response() - convert a chsc response to an error + * @response: chsc response code + * + * Returns an appropriate Linux error code for @response. + */ +int chsc_error_from_response(int response) { - int ccode, j; - - struct { - struct chsc_header request; - u16 reserved1; - u16 f_sch; /* first subchannel */ - u16 reserved2; - u16 l_sch; /* last subchannel */ - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 sch_valid : 1; - u8 dev_valid : 1; - u8 st : 3; /* subchannel type */ - u8 zeroes : 3; - u8 unit_addr; /* unit address */ - u16 devno; /* device number */ - u8 path_mask; - u8 fla_valid_mask; - u16 sch; /* subchannel */ - u8 chpid[8]; /* chpids 0-7 */ - u16 fla[8]; /* full link addresses 0-7 */ - } *ssd_area; - - ssd_area = page; - - ssd_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0004, - }; - - ssd_area->f_sch = sch->irq; - ssd_area->l_sch = sch->irq; - - ccode = chsc(ssd_area); - if (ccode > 0) { - pr_debug("chsc returned with ccode = %d\n", ccode); - return (ccode == 3) ? -ENODEV : -EBUSY; - } - - switch (ssd_area->response.code) { - case 0x0001: /* everything ok */ - break; + switch (response) { + case 0x0001: + return 0; case 0x0002: - CIO_CRW_EVENT(2, "Invalid command!\n"); - return -EINVAL; case 0x0003: - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); + case 0x0006: + case 0x0007: + case 0x0008: + case 0x000a: + case 0x0104: return -EINVAL; case 0x0004: - CIO_CRW_EVENT(2, "Model does not provide ssd\n"); return -EOPNOTSUPP; + case 0x000b: + case 0x0107: /* "Channel busy" for the op 0x003d */ + return -EBUSY; + case 0x0100: + case 0x0102: + return -ENOMEM; default: - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", - ssd_area->response.code); return -EIO; } - - /* - * ssd_area->st stores the type of the detected - * subchannel, with the following definitions: - * - * 0: I/O subchannel: All fields have meaning - * 1: CHSC subchannel: Only sch_val, st and sch - * have meaning - * 2: Message subchannel: All fields except unit_addr - * have meaning - * 3: ADM subchannel: Only sch_val, st and sch - * have meaning - * - * Other types are currently undefined. - */ - if (ssd_area->st > 3) { /* uhm, that looks strange... */ - CIO_CRW_EVENT(0, "Strange subchannel type %d" - " for sch %04x\n", ssd_area->st, sch->irq); - /* - * There may have been a new subchannel type defined in the - * time since this code was written; since we don't know which - * fields have meaning and what to do with it we just jump out - */ - return 0; - } else { - const char *type[4] = {"I/O", "chsc", "message", "ADM"}; - CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", - sch->irq, type[ssd_area->st]); - - sch->ssd_info.valid = 1; - sch->ssd_info.type = ssd_area->st; - } - - if (ssd_area->st == 0 || ssd_area->st == 2) { - for (j = 0; j < 8; j++) { - if (!((0x80 >> j) & ssd_area->path_mask & - ssd_area->fla_valid_mask)) - continue; - sch->ssd_info.chpid[j] = ssd_area->chpid[j]; - sch->ssd_info.fla[j] = ssd_area->fla[j]; - } - } - return 0; } - -int -css_get_ssd_info(struct subchannel *sch) +EXPORT_SYMBOL_GPL(chsc_error_from_response); + +struct chsc_ssd_area { + struct chsc_header request; + u16 :10; + u16 ssid:2; + u16 :4; + u16 f_sch; /* first subchannel */ + u16 :16; + u16 l_sch; /* last subchannel */ + u32 :32; + struct chsc_header response; + u32 :32; + u8 sch_valid : 1; + u8 dev_valid : 1; + u8 st : 3; /* subchannel type */ + u8 zeroes : 3; + u8 unit_addr; /* unit address */ + u16 devno; /* device number */ + u8 path_mask; + u8 fla_valid_mask; + u16 sch; /* subchannel */ + u8 chpid[8]; /* chpids 0-7 */ + u16 fla[8]; /* full link addresses 0-7 */ +} __attribute__ ((packed)); + +int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { + struct chsc_ssd_area *ssd_area; + int ccode; int ret; - void *page; + int i; + int mask; - page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!page) - return -ENOMEM; - spin_lock_irq(&sch->lock); - ret = chsc_get_sch_desc_irq(sch, page); - if (ret) { - static int cio_chsc_err_msg; - - if (!cio_chsc_err_msg) { - printk(KERN_ERR - "chsc_get_sch_descriptions:" - " Error %d while doing chsc; " - "processing some machine checks may " - "not work\n", ret); - cio_chsc_err_msg = 1; - } + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + ssd_area = chsc_page; + ssd_area->request.length = 0x0010; + ssd_area->request.code = 0x0004; + ssd_area->ssid = schid.ssid; + ssd_area->f_sch = schid.sch_no; + ssd_area->l_sch = schid.sch_no; + + ccode = chsc(ssd_area); + /* Check response. */ + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out; } - spin_unlock_irq(&sch->lock); - free_page((unsigned long)page); - if (!ret) { - int j, chpid; - /* Allocate channel path structures, if needed. */ - for (j = 0; j < 8; j++) { - chpid = sch->ssd_info.chpid[j]; - if (chpid && (get_chp_status(chpid) < 0)) - new_channel_path(chpid); + ret = chsc_error_from_response(ssd_area->response.code); + if (ret != 0) { + CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", + schid.ssid, schid.sch_no, + ssd_area->response.code); + goto out; + } + if (!ssd_area->sch_valid) { + ret = -ENODEV; + goto out; + } + /* Copy data */ + ret = 0; + memset(ssd, 0, sizeof(struct chsc_ssd_info)); + if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && + (ssd_area->st != SUBCHANNEL_TYPE_MSG)) + goto out; + ssd->path_mask = ssd_area->path_mask; + ssd->fla_valid_mask = ssd_area->fla_valid_mask; + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (ssd_area->path_mask & mask) { + chp_id_init(&ssd->chpid[i]); + ssd->chpid[i].id = ssd_area->chpid[i]; } + if (ssd_area->fla_valid_mask & mask) + ssd->fla[i] = ssd_area->fla[i]; } +out: + spin_unlock_irq(&chsc_page_lock); return ret; } -static int -s390_subchannel_remove_chpid(struct device *dev, void *data) +/** + * chsc_ssqd() - store subchannel QDIO data (SSQD) + * @schid: id of the subchannel on which SSQD is performed + * @ssqd: request and response block for SSQD + * + * Returns 0 on success. + */ +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) { - int j; - int mask; - struct subchannel *sch; - __u8 *chpid; - struct schib schib; - - sch = to_subchannel(dev); - chpid = data; - for (j = 0; j < 8; j++) - if (sch->schib.pmcw.chpid[j] == *chpid) - break; - if (j >= 8) - return 0; + memset(ssqd, 0, sizeof(*ssqd)); + ssqd->request.length = 0x0010; + ssqd->request.code = 0x0024; + ssqd->first_sch = schid.sch_no; + ssqd->last_sch = schid.sch_no; + ssqd->ssid = schid.ssid; + + if (chsc(ssqd)) + return -EIO; - mask = 0x80 >> j; - spin_lock(&sch->lock); - - stsch(sch->irq, &schib); - if (!schib.pmcw.dnv) - goto out_unreg; - memcpy(&sch->schib, &schib, sizeof(struct schib)); - /* Check for single path devices. */ - if (sch->schib.pmcw.pim == 0x80) - goto out_unreg; - if (sch->vpm == mask) - goto out_unreg; - - if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND | - SCSW_ACTL_HALT_PEND | - SCSW_ACTL_START_PEND | - SCSW_ACTL_RESUME_PEND)) && - (sch->schib.pmcw.lpum == mask)) { - int cc = cio_cancel(sch); - - if (cc == -ENODEV) - goto out_unreg; + return chsc_error_from_response(ssqd->response.code); +} +EXPORT_SYMBOL_GPL(chsc_ssqd); + +/** + * chsc_sadc() - set adapter device controls (SADC) + * @schid: id of the subchannel on which SADC is performed + * @scssc: request and response block for SADC + * @summary_indicator_addr: summary indicator address + * @subchannel_indicator_addr: subchannel indicator address + * + * Returns 0 on success. + */ +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr) +{ + memset(scssc, 0, sizeof(*scssc)); + scssc->request.length = 0x0fe0; + scssc->request.code = 0x0021; + scssc->operation_code = 0; - if (cc == -EINVAL) { - cc = cio_clear(sch); - if (cc == -ENODEV) - goto out_unreg; - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(&sch->dev); - goto out_unlock; - } - } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && - (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && - (sch->schib.pmcw.lpum == mask)) { - int cc; + scssc->summary_indicator_addr = summary_indicator_addr; + scssc->subchannel_indicator_addr = subchannel_indicator_addr; - cc = cio_clear(sch); - if (cc == -ENODEV) - goto out_unreg; - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(&sch->dev); - goto out_unlock; - } + scssc->ks = PAGE_DEFAULT_KEY >> 4; + scssc->kc = PAGE_DEFAULT_KEY >> 4; + scssc->isc = QDIO_AIRQ_ISC; + scssc->schid = schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc->word_with_d_bit = 0x10000000; + + if (chsc(scssc)) + return -EIO; + + return chsc_error_from_response(scssc->response.code); +} +EXPORT_SYMBOL_GPL(chsc_sadc); - /* trigger path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); -out_unlock: - spin_unlock(&sch->lock); +static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) +{ + spin_lock_irq(sch->lock); + if (sch->driver && sch->driver->chp_event) + if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) + goto out_unreg; + spin_unlock_irq(sch->lock); return 0; + out_unreg: - spin_unlock(&sch->lock); sch->lpm = 0; - if (css_enqueue_subchannel_slow(sch->irq)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } + spin_unlock_irq(sch->lock); + css_schedule_eval(sch->schid); return 0; } -static inline void -s390_set_chpid_offline( __u8 chpid) +void chsc_chp_offline(struct chp_id chpid) { char dbf_txt[15]; + struct chp_link link; - sprintf(dbf_txt, "chpr%x", chpid); + sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); - if (get_chp_status(chpid) <= 0) + if (chp_get_status(chpid) <= 0) return; - - bus_for_each_dev(&css_bus_type, NULL, &chpid, - s390_subchannel_remove_chpid); - - if (need_rescan || css_slow_subchannels_exist()) - queue_work(slow_path_wq, &slow_path_work); + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); + for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } -static int -s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, - struct subchannel *sch) +static int __s390_process_res_acc(struct subchannel *sch, void *data) { - int found; - int chp; - int ccode; - - found = 0; - for (chp = 0; chp <= 7; chp++) - /* - * check if chpid is in information updated by ssd - */ - if (sch->ssd_info.valid && - sch->ssd_info.chpid[chp] == chpid && - (sch->ssd_info.fla[chp] & fla_mask) == fla) { - found = 1; - break; - } - - if (found == 0) - return 0; - - /* - * Do a stsch to update our subchannel structure with the - * new path information and eventually check for logically - * offline chpids. - */ - ccode = stsch(sch->irq, &sch->schib); - if (ccode > 0) - return 0; + spin_lock_irq(sch->lock); + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, data, CHP_ONLINE); + spin_unlock_irq(sch->lock); - return 0x80 >> chp; + return 0; } -static int -s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) +static void s390_process_res_acc(struct chp_link *link) { - struct subchannel *sch; - int irq, rc; char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x", chpid); + sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, + link->chpid.id); CIO_TRACE_EVENT( 2, dbf_txt); - if (fla != 0) { - sprintf(dbf_txt, "fla%x", fla); + if (link->fla != 0) { + sprintf(dbf_txt, "fla%x", link->fla); CIO_TRACE_EVENT( 2, dbf_txt); } - + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); /* * I/O resources may have become accessible. * Scan through all subchannels that may be concerned and @@ -365,71 +268,8 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) * The more information we have (info), the less scanning * will we have to do. */ - - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int chp_mask, old_lpm; - - sch = get_subchannel_by_schid(irq); - if (!sch) { - struct schib schib; - int ret; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch(irq, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock_irq(&sch->lock); - - chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); - - if (chp_mask == 0) { - - spin_unlock_irq(&sch->lock); - continue; - } - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock_irq(&sch->lock); - put_device(&sch->dev); - if (fla_mask == 0xffff) - break; - } - return rc; + for_each_subchannel_staged(__s390_process_res_acc, NULL, link); + css_schedule_reprobe(); } static int @@ -445,9 +285,9 @@ __get_chpid_from_lir(void *data) u32 andesc[28]; /* incident-specific information */ u32 isinfo[28]; - } *lir; + } __attribute__ ((packed)) *lir; - lir = (struct lir*) data; + lir = data; if (!(lir->iq&0x80)) /* NULL link incident record */ return -EINVAL; @@ -462,462 +302,606 @@ __get_chpid_from_lir(void *data) return (u16) (lir->indesc[0]&0x000000ff); } -int -chsc_process_crw(void) +struct chsc_sei_nt0_area { + u8 flags; + u8 vf; /* validity flags */ + u8 rs; /* reporting source */ + u8 cc; /* content code */ + u16 fla; /* full link address */ + u16 rsid; /* reporting source id */ + u32 reserved1; + u32 reserved2; + /* ccdf has to be big enough for a link-incident record */ + u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ +} __packed; + +struct chsc_sei_nt2_area { + u8 flags; /* p and v bit */ + u8 reserved1; + u8 reserved2; + u8 cc; /* content code */ + u32 reserved3[13]; + u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ +} __packed; + +#define CHSC_SEI_NT0 (1ULL << 63) +#define CHSC_SEI_NT2 (1ULL << 61) + +struct chsc_sei { + struct chsc_header request; + u32 reserved1; + u64 ntsm; /* notification type mask */ + struct chsc_header response; + u32 :24; + u8 nt; + union { + struct chsc_sei_nt0_area nt0_area; + struct chsc_sei_nt2_area nt2_area; + u8 nt_area[PAGE_SIZE - 24]; + } u; +} __packed; + +static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) { - int chpid, ret; - struct { - struct chsc_header request; - u32 reserved1; - u32 reserved2; - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 flags; - u8 vf; /* validity flags */ - u8 rs; /* reporting source */ - u8 cc; /* content code */ - u16 fla; /* full link address */ - u16 rsid; /* reporting source id */ - u32 reserved5; - u32 reserved6; - u32 ccdf[96]; /* content-code dependent field */ - /* ccdf has to be big enough for a link-incident record */ - } *sei_area; - - if (!sei_page) - return 0; - /* - * build the chsc request block for store event information - * and do the call - * This function is only called by the machine check handler thread, - * so we don't need locking for the sei_page. - */ - sei_area = sei_page; + struct chp_id chpid; + int id; - CIO_TRACE_EVENT( 2, "prcss"); - ret = 0; - do { - int ccode, status; - memset(sei_area, 0, sizeof(*sei_area)); - - sei_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x000e, - }; - - ccode = chsc(sei_area); - if (ccode > 0) - return 0; - - switch (sei_area->response.code) { - /* for debug purposes, check for problems */ - case 0x0001: - CIO_CRW_EVENT(4, "chsc_process_crw: event information " - "successfully stored\n"); - break; /* everything ok */ - case 0x0002: - CIO_CRW_EVENT(2, - "chsc_process_crw: invalid command!\n"); - return 0; - case 0x0003: - CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " - "request block!\n"); - return 0; - case 0x0005: - CIO_CRW_EVENT(2, "chsc_process_crw: no event " - "information stored\n"); - return 0; - default: - CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", - sei_area->response.code); - return 0; - } + CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", + sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return; + id = __get_chpid_from_lir(sei_area->ccdf); + if (id < 0) + CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); + else { + chp_id_init(&chpid); + chpid.id = id; + chsc_chp_offline(chpid); + } +} + +static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) +{ + struct chp_link link; + struct chp_id chpid; + int status; + + CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " + "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return; + chp_id_init(&chpid); + chpid.id = sei_area->rsid; + /* allocate a new channel path structure, if needed */ + status = chp_get_status(chpid); + if (status < 0) + chp_new(chpid); + else if (!status) + return; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; + if ((sei_area->vf & 0xc0) != 0) { + link.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) + /* full link address */ + link.fla_mask = 0xffff; + else + /* link address */ + link.fla_mask = 0xff00; + } + s390_process_res_acc(&link); +} + +static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) +{ + struct channel_path *chp; + struct chp_id chpid; + u8 *data; + int num; - /* Check if we might have lost some information. */ - if (sei_area->flags & 0x40) - CIO_CRW_EVENT(2, "chsc_process_crw: Event information " - "has been lost due to overflow!\n"); + CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); + if (sei_area->rs != 0) + return; + data = sei_area->ccdf; + chp_id_init(&chpid); + for (num = 0; num <= __MAX_CHPID; num++) { + if (!chp_test_bit(data, num)) + continue; + chpid.id = num; - if (sei_area->rs != 4) { - CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " - "(%04X) isn't a chpid!\n", - sei_area->rsid); + CIO_CRW_EVENT(4, "Update information for channel path " + "%x.%02x\n", chpid.cssid, chpid.id); + chp = chpid_to_chp(chpid); + if (!chp) { + chp_new(chpid); continue; } + mutex_lock(&chp->lock); + chp_update_desc(chp); + mutex_unlock(&chp->lock); + } +} + +struct chp_config_data { + u8 map[32]; + u8 op; + u8 pc; +}; + +static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) +{ + struct chp_config_data *data; + struct chp_id chpid; + int num; + char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; - /* which kind of information was stored? */ - switch (sei_area->cc) { - case 1: /* link incident*/ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports link incident," - " reporting source is chpid %x\n", - sei_area->rsid); - chpid = __get_chpid_from_lir(sei_area->ccdf); - if (chpid < 0) - CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", - __FUNCTION__); - else - s390_set_chpid_offline(chpid); + CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); + if (sei_area->rs != 0) + return; + data = (struct chp_config_data *) &(sei_area->ccdf); + chp_id_init(&chpid); + for (num = 0; num <= __MAX_CHPID; num++) { + if (!chp_test_bit(data->map, num)) + continue; + chpid.id = num; + pr_notice("Processing %s for channel path %x.%02x\n", + events[data->op], chpid.cssid, chpid.id); + switch (data->op) { + case 0: + chp_cfg_schedule(chpid, 1); break; - - case 2: /* i/o resource accessibiliy */ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports some I/O " - "devices may have become accessible\n"); - pr_debug("Data received after sei: \n"); - pr_debug("Validity flags: %x\n", sei_area->vf); - - /* allocate a new channel path structure, if needed */ - status = get_chp_status(sei_area->rsid); - if (status < 0) - new_channel_path(sei_area->rsid); - else if (!status) - return 0; - if ((sei_area->vf & 0x80) == 0) { - pr_debug("chpid: %x\n", sei_area->rsid); - ret = s390_process_res_acc(sei_area->rsid, - 0, 0); - } else if ((sei_area->vf & 0xc0) == 0x80) { - pr_debug("chpid: %x link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xff00); - } else if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug("chpid: %x full link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xffff); - } - pr_debug("\n"); - + case 1: + chp_cfg_schedule(chpid, 0); break; - - default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", - sei_area->cc); + case 2: + chp_cfg_cancel_deconfigure(chpid); break; } - } while (sei_area->flags & 0x80); - return ret; + } } -static int -chp_add(int chpid) +static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) { - struct subchannel *sch; - int irq, ret, rc; - char dbf_txt[15]; + int ret; - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - sprintf(dbf_txt, "cadd%x", chpid); - CIO_TRACE_EVENT(2, dbf_txt); + CIO_CRW_EVENT(4, "chsc: scm change notification\n"); + if (sei_area->rs != 7) + return; - rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int i; + ret = scm_update_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: updating change notification" + " failed (rc=%d).\n", ret); +} - sch = get_subchannel_by_schid(irq); - if (!sch) { - struct schib schib; +static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) +{ + int ret; - if (stsch(irq, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock(&sch->lock); - for (i=0; i<8; i++) - if (sch->schib.pmcw.chpid[i] == chpid) { - if (stsch(sch->irq, &sch->schib) != 0) { - /* Endgame. */ - spin_unlock(&sch->lock); - return rc; - } - break; - } - if (i==8) { - spin_unlock(&sch->lock); - return rc; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | 0x80 >> i) & sch->opm; + CIO_CRW_EVENT(4, "chsc: scm available information\n"); + if (sei_area->rs != 7) + return; - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); + ret = scm_process_availability_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: process availability information" + " failed (rc=%d).\n", ret); +} - spin_unlock(&sch->lock); - put_device(&sch->dev); +static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) +{ + switch (sei_area->cc) { + case 1: + zpci_event_error(sei_area->ccdf); + break; + case 2: + zpci_event_availability(sei_area->ccdf); + break; + default: + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", + sei_area->cc); + break; } - return rc; } -/* - * Handling of crw machine checks with channel path source. - */ -int -chp_process_crw(int chpid, int on) +static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) { - if (on == 0) { - /* Path has gone. We use the link incident routine.*/ - s390_set_chpid_offline(chpid); - return 0; /* De-register is async anyway. */ + /* which kind of information was stored? */ + switch (sei_area->cc) { + case 1: /* link incident*/ + chsc_process_sei_link_incident(sei_area); + break; + case 2: /* i/o resource accessibility */ + chsc_process_sei_res_acc(sei_area); + break; + case 7: /* channel-path-availability information */ + chsc_process_sei_chp_avail(sei_area); + break; + case 8: /* channel-path-configuration notification */ + chsc_process_sei_chp_config(sei_area); + break; + case 12: /* scm change notification */ + chsc_process_sei_scm_change(sei_area); + break; + case 14: /* scm available notification */ + chsc_process_sei_scm_avail(sei_area); + break; + default: /* other stuff */ + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", + sei_area->cc); + break; + } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); } - /* - * Path has come. Allocate a new channel path structure, - * if needed. - */ - if (get_chp_status(chpid) < 0) - new_channel_path(chpid); - /* Avoid the extra overhead in process_rec_acc. */ - return chp_add(chpid); } -static inline int -__check_for_io_and_kill(struct subchannel *sch, int index) +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { - int cc; + static int ntsm_unsupported; - if (!device_is_online(sch)) - /* cio could be doing I/O. */ - return 0; - cc = stsch(sch->irq, &sch->schib); - if (cc) - return 0; - if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { - device_set_waiting(sch); - return 1; + while (true) { + memset(sei, 0, sizeof(*sei)); + sei->request.length = 0x0010; + sei->request.code = 0x000e; + if (!ntsm_unsupported) + sei->ntsm = ntsm; + + if (chsc(sei)) + break; + + if (sei->response.code != 0x0001) { + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); + + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; + } + break; + } + + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); + break; + } + + if (!(sei->u.nt0_area.flags & 0x80)) + break; } - return 0; } -static inline void -__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ +static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - int chp, old_lpm; - unsigned long flags; + struct chsc_sei *sei = sei_page; - if (!sch->ssd_info.valid) + if (overflow) { + css_schedule_eval_all(); return; - - spin_lock_irqsave(&sch->lock, flags); - old_lpm = sch->lpm; - for (chp = 0; chp < 8; chp++) { - if (sch->ssd_info.chpid[chp] != chpid) - continue; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); - if (on) { - sch->opm |= (0x80 >> chp); - sch->lpm |= (0x80 >> chp); - if (!old_lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - } else { - sch->opm &= ~(0x80 >> chp); - sch->lpm &= ~(0x80 >> chp); - /* - * Give running I/O a grace period in which it - * can successfully terminate, even using the - * just varied off path. Then kill it. - */ - if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { - if (css_enqueue_subchannel_slow(sch->irq)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - } - break; + CIO_TRACE_EVENT(2, "prcss"); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); +} + +void chsc_chp_online(struct chp_id chpid) +{ + char dbf_txt[15]; + struct chp_link link; + + sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); + CIO_TRACE_EVENT(2, dbf_txt); + + if (chp_get_status(chpid) != 0) { + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); + for_each_subchannel_staged(__s390_process_res_acc, NULL, + &link); + css_schedule_reprobe(); } - spin_unlock_irqrestore(&sch->lock, flags); } -static int -s390_subchannel_vary_chpid_off(struct device *dev, void *data) +static void __s390_subchannel_vary_chpid(struct subchannel *sch, + struct chp_id chpid, int on) { - struct subchannel *sch; - __u8 *chpid; + unsigned long flags; + struct chp_link link; + + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; + spin_lock_irqsave(sch->lock, flags); + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, &link, + on ? CHP_VARY_ON : CHP_VARY_OFF); + spin_unlock_irqrestore(sch->lock, flags); +} - sch = to_subchannel(dev); - chpid = data; +static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) +{ + struct chp_id *chpid = data; __s390_subchannel_vary_chpid(sch, *chpid, 0); return 0; } -static int -s390_subchannel_vary_chpid_on(struct device *dev, void *data) +static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) { - struct subchannel *sch; - __u8 *chpid; - - sch = to_subchannel(dev); - chpid = data; + struct chp_id *chpid = data; __s390_subchannel_vary_chpid(sch, *chpid, 1); return 0; } -/* - * Function: s390_vary_chpid - * Varies the specified chpid online or offline +/** + * chsc_chp_vary - propagate channel-path vary operation to subchannels + * @chpid: channl-path ID + * @on: non-zero for vary online, zero for vary offline */ -static int -s390_vary_chpid( __u8 chpid, int on) +int chsc_chp_vary(struct chp_id chpid, int on) { - char dbf_text[15]; - int status, irq, ret; - struct subchannel *sch; + struct channel_path *chp = chpid_to_chp(chpid); + + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); + /* + * Redo PathVerification on the devices the chpid connects to + */ + if (on) { + /* Try to update the channel path description. */ + chp_update_desc(chp); + for_each_subchannel_staged(s390_subchannel_vary_chpid_on, + NULL, &chpid); + css_schedule_reprobe(); + } else + for_each_subchannel_staged(s390_subchannel_vary_chpid_off, + NULL, &chpid); - sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); - CIO_TRACE_EVENT( 2, dbf_text); + return 0; +} - status = get_chp_status(chpid); - if (status < 0) { - printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); - return -EINVAL; - } +static void +chsc_remove_cmg_attr(struct channel_subsystem *css) +{ + int i; - if (!on && !status) { - printk(KERN_ERR "chpid %x is already offline\n", chpid); - return -EINVAL; + for (i = 0; i <= __MAX_CHPID; i++) { + if (!css->chps[i]) + continue; + chp_remove_cmg_attr(css->chps[i]); } +} - set_chp_logically_online(chpid, on); +static int +chsc_add_cmg_attr(struct channel_subsystem *css) +{ + int i, ret; - /* - * Redo PathVerification on the devices the chpid connects to - */ + ret = 0; + for (i = 0; i <= __MAX_CHPID; i++) { + if (!css->chps[i]) + continue; + ret = chp_add_cmg_attr(css->chps[i]); + if (ret) + goto cleanup; + } + return ret; +cleanup: + for (--i; i >= 0; i--) { + if (!css->chps[i]) + continue; + chp_remove_cmg_attr(css->chps[i]); + } + return ret; +} - bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? - s390_subchannel_vary_chpid_on : - s390_subchannel_vary_chpid_off); - if (!on) +int __chsc_do_secm(struct channel_subsystem *css, int enable) +{ + struct { + struct chsc_header request; + u32 operation_code : 2; + u32 : 30; + u32 key : 4; + u32 : 28; + u32 zeroes1; + u32 cub_addr1; + u32 zeroes2; + u32 cub_addr2; + u32 reserved[13]; + struct chsc_header response; + u32 status : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + } __attribute__ ((packed)) *secm_area; + int ret, ccode; + + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + secm_area = chsc_page; + secm_area->request.length = 0x0050; + secm_area->request.code = 0x0016; + + secm_area->key = PAGE_DEFAULT_KEY >> 4; + secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; + secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; + + secm_area->operation_code = enable ? 0 : 1; + + ccode = chsc(secm_area); + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; - /* Scan for new devices on varied on path. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - struct schib schib; + } - if (need_rescan) - break; - sch = get_subchannel_by_schid(irq); - if (sch) { - put_device(&sch->dev); - continue; - } - if (stsch(irq, &schib)) - /* We're through */ - break; - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } + switch (secm_area->response.code) { + case 0x0102: + case 0x0103: + ret = -EINVAL; + break; + default: + ret = chsc_error_from_response(secm_area->response.code); } + if (ret != 0) + CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", + secm_area->response.code); out: - if (need_rescan || css_slow_subchannels_exist()) - queue_work(slow_path_wq, &slow_path_work); - return 0; + spin_unlock_irq(&chsc_page_lock); + return ret; } -/* - * Files for the channel path entries. - */ -static ssize_t -chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) +int +chsc_secm(struct channel_subsystem *css, int enable) { - struct channel_path *chp = container_of(dev, struct channel_path, dev); + int ret; - if (!chp) - return 0; - return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : - sprintf(buf, "offline\n")); + if (enable && !css->cm_enabled) { + css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!css->cub_addr1 || !css->cub_addr2) { + free_page((unsigned long)css->cub_addr1); + free_page((unsigned long)css->cub_addr2); + return -ENOMEM; + } + } + ret = __chsc_do_secm(css, enable); + if (!ret) { + css->cm_enabled = enable; + if (css->cm_enabled) { + ret = chsc_add_cmg_attr(css); + if (ret) { + __chsc_do_secm(css, 0); + css->cm_enabled = 0; + } + } else + chsc_remove_cmg_attr(css); + } + if (!css->cm_enabled) { + free_page((unsigned long)css->cub_addr1); + free_page((unsigned long)css->cub_addr2); + } + return ret; } -static ssize_t -chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, void *page) { - struct channel_path *cp = container_of(dev, struct channel_path, dev); - char cmd[10]; - int num_args; - int error; - - num_args = sscanf(buf, "%5s", cmd); - if (!num_args) - return count; - - if (!strnicmp(cmd, "on", 2)) - error = s390_vary_chpid(cp->id, 1); - else if (!strnicmp(cmd, "off", 3)) - error = s390_vary_chpid(cp->id, 0); - else - error = -EINVAL; + struct chsc_scpd *scpd_area; + int ccode, ret; - return error < 0 ? error : count; + if ((rfmt == 1) && !css_general_characteristics.fcs) + return -EINVAL; + if ((rfmt == 2) && !css_general_characteristics.cib) + return -EINVAL; -} + memset(page, 0, PAGE_SIZE); + scpd_area = page; + scpd_area->request.length = 0x0010; + scpd_area->request.code = 0x0002; + scpd_area->cssid = chpid.cssid; + scpd_area->first_chpid = chpid.id; + scpd_area->last_chpid = chpid.id; + scpd_area->m = m; + scpd_area->c = c; + scpd_area->fmt = fmt; + scpd_area->rfmt = rfmt; + + ccode = chsc(scpd_area); + if (ccode > 0) + return (ccode == 3) ? -ENODEV : -EBUSY; -static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); + ret = chsc_error_from_response(scpd_area->response.code); + if (ret) + CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", + scpd_area->response.code); + return ret; +} +EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); -static ssize_t -chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc) { - struct channel_path *chp = container_of(dev, struct channel_path, dev); + struct chsc_response_struct *chsc_resp; + struct chsc_scpd *scpd_area; + unsigned long flags; + int ret; - if (!chp) - return 0; - return sprintf(buf, "%x\n", chp->desc.desc); + spin_lock_irqsave(&chsc_page_lock, flags); + scpd_area = chsc_page; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); + if (ret) + goto out; + chsc_resp = (void *)&scpd_area->response; + memcpy(desc, &chsc_resp->data, sizeof(*desc)); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return ret; } -static DEVICE_ATTR(type, 0444, chp_type_show, NULL); - -static struct attribute * chp_attrs[] = { - &dev_attr_status.attr, - &dev_attr_type.attr, - NULL, -}; +int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt1 *desc) +{ + struct chsc_response_struct *chsc_resp; + struct chsc_scpd *scpd_area; + unsigned long flags; + int ret; -static struct attribute_group chp_attr_group = { - .attrs = chp_attrs, -}; + spin_lock_irqsave(&chsc_page_lock, flags); + scpd_area = chsc_page; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); + if (ret) + goto out; + chsc_resp = (void *)&scpd_area->response; + memcpy(desc, &chsc_resp->data, sizeof(*desc)); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return ret; +} static void -chp_release(struct device *dev) +chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, + struct cmg_chars *chars) { - struct channel_path *cp; - - cp = container_of(dev, struct channel_path, dev); - kfree(cp); + struct cmg_chars *cmg_chars; + int i, mask; + + cmg_chars = chp->cmg_chars; + for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { + mask = 0x80 >> (i + 3); + if (cmcv & mask) + cmg_chars->values[i] = chars->values[i]; + else + cmg_chars->values[i] = 0; + } } -static int -chsc_determine_channel_path_description(int chpid, - struct channel_path_desc *desc) +int chsc_get_channel_measurement_chars(struct channel_path *chp) { + struct cmg_chars *cmg_chars; int ccode, ret; struct { @@ -929,129 +913,138 @@ chsc_determine_channel_path_description(int chpid, u32 zeroes1; struct chsc_header response; u32 zeroes2; - struct channel_path_desc desc; - } *scpd_area; - - scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scpd_area) + u32 not_valid : 1; + u32 shared : 1; + u32 : 22; + u32 chpid : 8; + u32 cmcv : 5; + u32 : 11; + u32 cmgq : 8; + u32 cmg : 8; + u32 zeroes3; + u32 data[NR_MEASUREMENT_CHARS]; + } __attribute__ ((packed)) *scmc_area; + + chp->cmg_chars = NULL; + cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); + if (!cmg_chars) return -ENOMEM; - scpd_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0002, - }; - - scpd_area->first_chpid = chpid; - scpd_area->last_chpid = chpid; + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + scmc_area = chsc_page; + scmc_area->request.length = 0x0010; + scmc_area->request.code = 0x0022; + scmc_area->first_chpid = chp->chpid.id; + scmc_area->last_chpid = chp->chpid.id; - ccode = chsc(scpd_area); + ccode = chsc(scmc_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } - switch (scpd_area->response.code) { - case 0x0001: /* Success. */ - memcpy(desc, &scpd_area->desc, - sizeof(struct channel_path_desc)); - ret = 0; - break; - case 0x0003: /* Invalid block. */ - case 0x0007: /* Invalid format. */ - case 0x0008: /* Other invalid block. */ - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); - ret = -EINVAL; - break; - case 0x0004: /* Command not provided in model. */ - CIO_CRW_EVENT(2, "Model does not provide scpd\n"); - ret = -EOPNOTSUPP; - break; - default: - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", - scpd_area->response.code); - ret = -EIO; + ret = chsc_error_from_response(scmc_area->response.code); + if (ret) { + CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", + scmc_area->response.code); + goto out; + } + if (scmc_area->not_valid) { + chp->cmg = -1; + chp->shared = -1; + goto out; + } + chp->cmg = scmc_area->cmg; + chp->shared = scmc_area->shared; + if (chp->cmg != 2 && chp->cmg != 3) { + /* No cmg-dependent data. */ + goto out; } + chp->cmg_chars = cmg_chars; + chsc_initialize_cmg_chars(chp, scmc_area->cmcv, + (struct cmg_chars *) &scmc_area->data); out: - free_page((unsigned long)scpd_area); + spin_unlock_irq(&chsc_page_lock); + if (!chp->cmg_chars) + kfree(cmg_chars); + return ret; } -/* - * Entries for chpids on the system bus. - * This replaces /proc/chpids. - */ -static int -new_channel_path(int chpid) +int __init chsc_init(void) { - struct channel_path *chp; int ret; - chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); - if (!chp) - return -ENOMEM; - memset(chp, 0, sizeof(struct channel_path)); - - /* fill in status, etc. */ - chp->id = chpid; - chp->state = 1; - chp->dev = (struct device) { - .parent = &css_bus_device, - .release = chp_release, - }; - snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); - - /* Obtain channel path description and fill it in. */ - ret = chsc_determine_channel_path_description(chpid, &chp->desc); - if (ret) - goto out_free; - - /* make it known to the system */ - ret = device_register(&chp->dev); - if (ret) { - printk(KERN_WARNING "%s: could not register %02x\n", - __func__, chpid); - goto out_free; + sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sei_page || !chsc_page) { + ret = -ENOMEM; + goto out_err; } - ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); - if (ret) { - device_unregister(&chp->dev); - goto out_free; - } else - chps[chpid] = chp; + ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); + if (ret) + goto out_err; return ret; -out_free: - kfree(chp); +out_err: + free_page((unsigned long)chsc_page); + free_page((unsigned long)sei_page); return ret; } -void * -chsc_get_chp_desc(struct subchannel *sch, int chp_no) +void __init chsc_init_cleanup(void) { - struct channel_path *chp; - struct channel_path_desc *desc; - - chp = chps[sch->schib.pmcw.chpid[chp_no]]; - if (!chp) - return NULL; - desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); - if (!desc) - return NULL; - memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); - return desc; + crw_unregister_handler(CRW_RSC_CSS); + free_page((unsigned long)chsc_page); + free_page((unsigned long)sei_page); } - -static int __init -chsc_alloc_sei_area(void) +int chsc_enable_facility(int operation_code) { - sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sei_page) - printk(KERN_WARNING"Can't allocate page for processing of " \ - "chsc machine checks!\n"); - return (sei_page ? 0 : -ENOMEM); -} + unsigned long flags; + int ret; + struct { + struct chsc_header request; + u8 reserved1:4; + u8 format:4; + u8 reserved2; + u16 operation_code; + u32 reserved3; + u32 reserved4; + u32 operation_data_area[252]; + struct chsc_header response; + u32 reserved5:4; + u32 format2:4; + u32 reserved6:24; + } __attribute__ ((packed)) *sda_area; + + spin_lock_irqsave(&chsc_page_lock, flags); + memset(chsc_page, 0, PAGE_SIZE); + sda_area = chsc_page; + sda_area->request.length = 0x0400; + sda_area->request.code = 0x0031; + sda_area->operation_code = operation_code; + + ret = chsc(sda_area); + if (ret > 0) { + ret = (ret == 3) ? -ENODEV : -EBUSY; + goto out; + } -subsys_initcall(chsc_alloc_sei_area); + switch (sda_area->response.code) { + case 0x0101: + ret = -EOPNOTSUPP; + break; + default: + ret = chsc_error_from_response(sda_area->response.code); + } + if (ret != 0) + CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", + operation_code, sda_area->response.code); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return ret; +} struct css_general_char css_general_characteristics; struct css_chsc_char css_chsc_characteristics; @@ -1068,43 +1061,190 @@ chsc_determine_css_characteristics(void) struct chsc_header response; u32 reserved4; u32 general_char[510]; - u32 chsc_char[518]; - } *scsc_area; + u32 chsc_char[508]; + } __attribute__ ((packed)) *scsc_area; - scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scsc_area) { - printk(KERN_WARNING"cio: Was not able to determine available" \ - "CHSCs due to no memory.\n"); - return -ENOMEM; - } - - scsc_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0010, - }; + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + scsc_area = chsc_page; + scsc_area->request.length = 0x0010; + scsc_area->request.code = 0x0010; result = chsc(scsc_area); if (result) { - printk(KERN_WARNING"cio: Was not able to determine " \ - "available CHSCs, cc=%i.\n", result); - result = -EIO; + result = (result == 3) ? -ENODEV : -EBUSY; goto exit; } - if (scsc_area->response.code != 1) { - printk(KERN_WARNING"cio: Was not able to determine " \ - "available CHSCs.\n"); - result = -EIO; - goto exit; - } - memcpy(&css_general_characteristics, scsc_area->general_char, - sizeof(css_general_characteristics)); - memcpy(&css_chsc_characteristics, scsc_area->chsc_char, - sizeof(css_chsc_characteristics)); + result = chsc_error_from_response(scsc_area->response.code); + if (result == 0) { + memcpy(&css_general_characteristics, scsc_area->general_char, + sizeof(css_general_characteristics)); + memcpy(&css_chsc_characteristics, scsc_area->chsc_char, + sizeof(css_chsc_characteristics)); + } else + CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", + scsc_area->response.code); exit: - free_page ((unsigned long) scsc_area); + spin_unlock_irq(&chsc_page_lock); return result; } EXPORT_SYMBOL_GPL(css_general_characteristics); EXPORT_SYMBOL_GPL(css_chsc_characteristics); + +int chsc_sstpc(void *page, unsigned int op, u16 ctrl) +{ + struct { + struct chsc_header request; + unsigned int rsvd0; + unsigned int op : 8; + unsigned int rsvd1 : 8; + unsigned int ctrl : 16; + unsigned int rsvd2[5]; + struct chsc_header response; + unsigned int rsvd3[7]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0020; + rr->request.code = 0x0033; + rr->op = op; + rr->ctrl = ctrl; + rc = chsc(rr); + if (rc) + return -EIO; + rc = (rr->response.code == 0x0001) ? 0 : -EIO; + return rc; +} + +int chsc_sstpi(void *page, void *result, size_t size) +{ + struct { + struct chsc_header request; + unsigned int rsvd0[3]; + struct chsc_header response; + char data[size]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0010; + rr->request.code = 0x0038; + rc = chsc(rr); + if (rc) + return -EIO; + memcpy(result, &rr->data, size); + return (rr->response.code == 0x0001) ? 0 : -EIO; +} + +int chsc_siosl(struct subchannel_id schid) +{ + struct { + struct chsc_header request; + u32 word1; + struct subchannel_id sid; + u32 word3; + struct chsc_header response; + u32 word[11]; + } __attribute__ ((packed)) *siosl_area; + unsigned long flags; + int ccode; + int rc; + + spin_lock_irqsave(&chsc_page_lock, flags); + memset(chsc_page, 0, PAGE_SIZE); + siosl_area = chsc_page; + siosl_area->request.length = 0x0010; + siosl_area->request.code = 0x0046; + siosl_area->word1 = 0x80000000; + siosl_area->sid = schid; + + ccode = chsc(siosl_area); + if (ccode > 0) { + if (ccode == 3) + rc = -ENODEV; + else + rc = -EBUSY; + CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", + schid.ssid, schid.sch_no, ccode); + goto out; + } + rc = chsc_error_from_response(siosl_area->response.code); + if (rc) + CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", + schid.ssid, schid.sch_no, + siosl_area->response.code); + else + CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", + schid.ssid, schid.sch_no); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(chsc_siosl); + +/** + * chsc_scm_info() - store SCM information (SSI) + * @scm_area: request and response block for SSI + * @token: continuation token + * + * Returns 0 on success. + */ +int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) +{ + int ccode, ret; + + memset(scm_area, 0, sizeof(*scm_area)); + scm_area->request.length = 0x0020; + scm_area->request.code = 0x004C; + scm_area->reqtok = token; + + ccode = chsc(scm_area); + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out; + } + ret = chsc_error_from_response(scm_area->response.code); + if (ret != 0) + CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", + scm_area->response.code); +out: + return ret; +} +EXPORT_SYMBOL_GPL(chsc_scm_info); + +/** + * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info. + * @schid: id of the subchannel on which PNSO is performed + * @brinfo_area: request and response block for the operation + * @resume_token: resume token for multiblock response + * @cnc: Boolean change-notification control + * + * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * + * Returns 0 on success. + */ +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc) +{ + memset(brinfo_area, 0, sizeof(*brinfo_area)); + brinfo_area->request.length = 0x0030; + brinfo_area->request.code = 0x003d; /* network-subchannel operation */ + brinfo_area->m = schid.m; + brinfo_area->ssid = schid.ssid; + brinfo_area->sch = schid.sch_no; + brinfo_area->cssid = schid.cssid; + brinfo_area->oc = 0; /* Store-network-bridging-information list */ + brinfo_area->resume_token = resume_token; + brinfo_area->n = (cnc != 0); + if (chsc(brinfo_area)) + return -EIO; + return chsc_error_from_response(brinfo_area->response.code); +} +EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index be20da49d14..76c9b50700b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -1,66 +1,238 @@ #ifndef S390_CHSC_H #define S390_CHSC_H -#define NR_CHPIDS 256 +#include <linux/types.h> +#include <linux/device.h> +#include <asm/css_chars.h> +#include <asm/chpid.h> +#include <asm/chsc.h> +#include <asm/schid.h> +#include <asm/qdio.h> -#define CHSC_SEI_ACC_CHPID 1 -#define CHSC_SEI_ACC_LINKADDR 2 -#define CHSC_SEI_ACC_FULLLINKADDR 3 +#define CHSC_SDA_OC_MSS 0x2 -struct chsc_header { - u16 length; - u16 code; -}; +#define NR_MEASUREMENT_CHARS 5 +struct cmg_chars { + u32 values[NR_MEASUREMENT_CHARS]; +} __attribute__ ((packed)); + +#define NR_MEASUREMENT_ENTRIES 8 +struct cmg_entry { + u32 values[NR_MEASUREMENT_ENTRIES]; +} __attribute__ ((packed)); -struct channel_path_desc { +struct channel_path_desc_fmt1 { u8 flags; u8 lsn; u8 desc; u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; + u32:24; u8 chpp; -}; + u32 unused[2]; + u16 chid; + u32:16; + u16 mdc; + u16:13; + u8 r:1; + u8 s:1; + u8 f:1; + u32 zeros[2]; +} __attribute__ ((packed)); -struct channel_path { - int id; - int state; - struct channel_path_desc desc; - struct device dev; -}; - -extern void s390_process_css( void ); -extern void chsc_validate_chpids(struct subchannel *); -extern void chpid_is_actually_online(int); - -struct css_general_char { - u64 : 41; - u32 aif : 1; /* bit 41 */ - u32 : 3; - u32 mcss : 1; /* bit 45 */ - u32 : 2; - u32 ext_mb : 1; /* bit 48 */ - u32 : 7; - u32 aif_tdd : 1; /* bit 56 */ - u32 : 10; - u32 aif_osa : 1; /* bit 67 */ - u32 : 28; -}__attribute__((packed)); +struct channel_path; struct css_chsc_char { u64 res; - u64 : 43; + u64 : 20; + u32 secm : 1; /* bit 84 */ + u32 : 1; + u32 scmc : 1; /* bit 86 */ + u32 : 20; u32 scssc : 1; /* bit 107 */ u32 scsscf : 1; /* bit 108 */ - u32 : 19; + u32:7; + u32 pnso:1; /* bit 116 */ + u32:11; }__attribute__((packed)); -extern struct css_general_char css_general_characteristics; extern struct css_chsc_char css_chsc_characteristics; +struct chsc_ssd_info { + u8 path_mask; + u8 fla_valid_mask; + struct chp_id chpid[8]; + u16 fla[8]; +}; + +struct chsc_ssqd_area { + struct chsc_header request; + u16:10; + u8 ssid:2; + u8 fmt:4; + u16 first_sch; + u16:16; + u16 last_sch; + u32:32; + struct chsc_header response; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __packed; + +struct chsc_scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __packed; + +struct chsc_scpd { + struct chsc_header request; + u32:2; + u32 m:1; + u32 c:1; + u32 fmt:4; + u32 cssid:8; + u32:4; + u32 rfmt:4; + u32 first_chpid:8; + u32:24; + u32 last_chpid:8; + u32 zeroes1; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; +} __attribute__ ((packed)); + + +extern int chsc_get_ssd_info(struct subchannel_id schid, + struct chsc_ssd_info *ssd); extern int chsc_determine_css_characteristics(void); -extern int css_characteristics_avail; +extern int chsc_init(void); +extern void chsc_init_cleanup(void); + +extern int chsc_enable_facility(int); +struct channel_subsystem; +extern int chsc_secm(struct channel_subsystem *, int); +int __chsc_do_secm(struct channel_subsystem *css, int enable); + +int chsc_chp_vary(struct chp_id chpid, int on); +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, void *page); +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc); +int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt1 *desc); +void chsc_chp_online(struct chp_id chpid); +void chsc_chp_offline(struct chp_id chpid); +int chsc_get_channel_measurement_chars(struct channel_path *chp); +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr); +int chsc_error_from_response(int response); + +int chsc_siosl(struct subchannel_id schid); + +/* Functions and definitions to query storage-class memory. */ +struct sale { + u64 sa; + u32 p:4; + u32 op_state:4; + u32 data_state:4; + u32 rank:4; + u32 r:1; + u32:7; + u32 rid:8; + u32:32; +} __packed; + +struct chsc_scm_info { + struct chsc_header request; + u32:32; + u64 reqtok; + u32 reserved1[4]; + struct chsc_header response; + u64:56; + u8 rq; + u32 mbc; + u64 msa; + u16 is; + u16 mmc; + u32 mci; + u64 nr_scm_ini; + u64 nr_scm_unini; + u32 reserved2[10]; + u64 restok; + struct sale scmal[248]; +} __packed; + +int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); + +struct chsc_brinfo_resume_token { + u64 t1; + u64 t2; +} __packed; + +struct chsc_brinfo_naihdr { + struct chsc_brinfo_resume_token resume_token; + u32:32; + u32 instance; + u32:24; + u8 naids; + u32 reserved[3]; +} __packed; + +struct chsc_pnso_area { + struct chsc_header request; + u8:2; + u8 m:1; + u8:5; + u8:2; + u8 ssid:2; + u8 fmt:4; + u16 sch; + u8:8; + u8 cssid; + u16:16; + u8 oc; + u32:24; + struct chsc_brinfo_resume_token resume_token; + u32 n:1; + u32:31; + u32 reserved[3]; + struct chsc_header response; + u32:32; + struct chsc_brinfo_naihdr naihdr; + union { + struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0]; + struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; + struct qdio_brinfo_entry_l2 l2[0]; + } entries; +} __packed; + +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc); + +#ifdef CONFIG_SCM_BUS +int scm_update_information(void); +int scm_process_availability_information(void); +#else /* CONFIG_SCM_BUS */ +static inline int scm_update_information(void) { return 0; } +static inline int scm_process_availability_information(void) { return 0; } +#endif /* CONFIG_SCM_BUS */ + -extern void *chsc_get_chp_desc(struct subchannel*, int); #endif diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c new file mode 100644 index 00000000000..3d22d2a4ce1 --- /dev/null +++ b/drivers/s390/cio/chsc_sch.c @@ -0,0 +1,1017 @@ +/* + * Driver for s390 chsc subchannels + * + * Copyright IBM Corp. 2008, 2011 + * + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * + */ + +#include <linux/slab.h> +#include <linux/compat.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/kernel_stat.h> + +#include <asm/compat.h> +#include <asm/cio.h> +#include <asm/chsc.h> +#include <asm/isc.h> + +#include "cio.h" +#include "cio_debug.h" +#include "css.h" +#include "chsc_sch.h" +#include "ioasm.h" + +static debug_info_t *chsc_debug_msg_id; +static debug_info_t *chsc_debug_log_id; + +static struct chsc_request *on_close_request; +static struct chsc_async_area *on_close_chsc_area; +static DEFINE_MUTEX(on_close_mutex); + +#define CHSC_MSG(imp, args...) do { \ + debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ + } while (0) + +#define CHSC_LOG(imp, txt) do { \ + debug_text_event(chsc_debug_log_id, imp , txt); \ + } while (0) + +static void CHSC_LOG_HEX(int level, void *data, int length) +{ + while (length > 0) { + debug_event(chsc_debug_log_id, level, data, length); + length -= chsc_debug_log_id->buf_size; + data += chsc_debug_log_id->buf_size; + } +} + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("driver for s390 chsc subchannels"); +MODULE_LICENSE("GPL"); + +static void chsc_subchannel_irq(struct subchannel *sch) +{ + struct chsc_private *private = dev_get_drvdata(&sch->dev); + struct chsc_request *request = private->request; + struct irb *irb = &__get_cpu_var(cio_irb); + + CHSC_LOG(4, "irb"); + CHSC_LOG_HEX(4, irb, sizeof(*irb)); + inc_irq_stat(IRQIO_CSC); + + /* Copy irb to provided request and set done. */ + if (!request) { + CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", + sch->schid.ssid, sch->schid.sch_no); + return; + } + private->request = NULL; + memcpy(&request->irb, irb, sizeof(*irb)); + cio_update_schib(sch); + complete(&request->completion); + put_device(&sch->dev); +} + +static int chsc_subchannel_probe(struct subchannel *sch) +{ + struct chsc_private *private; + int ret; + + CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", + sch->schid.ssid, sch->schid.sch_no); + sch->isc = CHSC_SCH_ISC; + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + dev_set_drvdata(&sch->dev, private); + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (ret) { + CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", + sch->schid.ssid, sch->schid.sch_no, ret); + dev_set_drvdata(&sch->dev, NULL); + kfree(private); + } else { + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } + } + return ret; +} + +static int chsc_subchannel_remove(struct subchannel *sch) +{ + struct chsc_private *private; + + cio_disable_subchannel(sch); + private = dev_get_drvdata(&sch->dev); + dev_set_drvdata(&sch->dev, NULL); + if (private->request) { + complete(&private->request->completion); + put_device(&sch->dev); + } + kfree(private); + return 0; +} + +static void chsc_subchannel_shutdown(struct subchannel *sch) +{ + cio_disable_subchannel(sch); +} + +static int chsc_subchannel_prepare(struct subchannel *sch) +{ + int cc; + struct schib schib; + /* + * Don't allow suspend while the subchannel is not idle + * since we don't have a way to clear the subchannel and + * cannot disable it with a request running. + */ + cc = stsch_err(sch->schid, &schib); + if (!cc && scsw_stctl(&schib.scsw)) + return -EAGAIN; + return 0; +} + +static int chsc_subchannel_freeze(struct subchannel *sch) +{ + return cio_disable_subchannel(sch); +} + +static int chsc_subchannel_restore(struct subchannel *sch) +{ + return cio_enable_subchannel(sch, (u32)(unsigned long)sch); +} + +static struct css_device_id chsc_subchannel_ids[] = { + { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); + +static struct css_driver chsc_subchannel_driver = { + .drv = { + .owner = THIS_MODULE, + .name = "chsc_subchannel", + }, + .subchannel_type = chsc_subchannel_ids, + .irq = chsc_subchannel_irq, + .probe = chsc_subchannel_probe, + .remove = chsc_subchannel_remove, + .shutdown = chsc_subchannel_shutdown, + .prepare = chsc_subchannel_prepare, + .freeze = chsc_subchannel_freeze, + .thaw = chsc_subchannel_restore, + .restore = chsc_subchannel_restore, +}; + +static int __init chsc_init_dbfs(void) +{ + chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); + if (!chsc_debug_msg_id) + goto out; + debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); + debug_set_level(chsc_debug_msg_id, 2); + chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); + if (!chsc_debug_log_id) + goto out; + debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); + debug_set_level(chsc_debug_log_id, 2); + return 0; +out: + if (chsc_debug_msg_id) + debug_unregister(chsc_debug_msg_id); + return -ENOMEM; +} + +static void chsc_remove_dbfs(void) +{ + debug_unregister(chsc_debug_log_id); + debug_unregister(chsc_debug_msg_id); +} + +static int __init chsc_init_sch_driver(void) +{ + return css_driver_register(&chsc_subchannel_driver); +} + +static void chsc_cleanup_sch_driver(void) +{ + css_driver_unregister(&chsc_subchannel_driver); +} + +static DEFINE_SPINLOCK(chsc_lock); + +static int chsc_subchannel_match_next_free(struct device *dev, void *data) +{ + struct subchannel *sch = to_subchannel(dev); + + return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); +} + +static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) +{ + struct device *dev; + + dev = driver_find_device(&chsc_subchannel_driver.drv, + sch ? &sch->dev : NULL, NULL, + chsc_subchannel_match_next_free); + return dev ? to_subchannel(dev) : NULL; +} + +/** + * chsc_async() - try to start a chsc request asynchronously + * @chsc_area: request to be started + * @request: request structure to associate + * + * Tries to start a chsc request on one of the existing chsc subchannels. + * Returns: + * %0 if the request was performed synchronously + * %-EINPROGRESS if the request was successfully started + * %-EBUSY if all chsc subchannels are busy + * %-ENODEV if no chsc subchannels are available + * Context: + * interrupts disabled, chsc_lock held + */ +static int chsc_async(struct chsc_async_area *chsc_area, + struct chsc_request *request) +{ + int cc; + struct chsc_private *private; + struct subchannel *sch = NULL; + int ret = -ENODEV; + char dbf[10]; + + chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; + while ((sch = chsc_get_next_subchannel(sch))) { + spin_lock(sch->lock); + private = dev_get_drvdata(&sch->dev); + if (private->request) { + spin_unlock(sch->lock); + ret = -EBUSY; + continue; + } + chsc_area->header.sid = sch->schid; + CHSC_LOG(2, "schid"); + CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); + cc = chsc(chsc_area); + snprintf(dbf, sizeof(dbf), "cc:%d", cc); + CHSC_LOG(2, dbf); + switch (cc) { + case 0: + ret = 0; + break; + case 1: + sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; + ret = -EINPROGRESS; + private->request = request; + break; + case 2: + ret = -EBUSY; + break; + default: + ret = -ENODEV; + } + spin_unlock(sch->lock); + CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", + sch->schid.ssid, sch->schid.sch_no, cc); + if (ret == -EINPROGRESS) + return -EINPROGRESS; + put_device(&sch->dev); + if (ret == 0) + return 0; + } + return ret; +} + +static void chsc_log_command(void *chsc_area) +{ + char dbf[10]; + + snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]); + CHSC_LOG(0, dbf); + CHSC_LOG_HEX(0, chsc_area, 32); +} + +static int chsc_examine_irb(struct chsc_request *request) +{ + int backed_up; + + if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)) + return -EIO; + backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; + request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; + if (scsw_cstat(&request->irb.scsw) == 0) + return 0; + if (!backed_up) + return 0; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) + return -EIO; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) + return -EPERM; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) + return -EAGAIN; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) + return -EAGAIN; + return -EIO; +} + +static int chsc_ioctl_start(void __user *user_area) +{ + struct chsc_request *request; + struct chsc_async_area *chsc_area; + int ret; + char dbf[10]; + + if (!css_general_characteristics.dynio) + /* It makes no sense to try. */ + return -EOPNOTSUPP; + chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!chsc_area) + return -ENOMEM; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) { + ret = -ENOMEM; + goto out_free; + } + init_completion(&request->completion); + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + chsc_log_command(chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(chsc_area, request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&request->completion); + ret = chsc_examine_irb(request); + } + /* copy area back to user */ + if (!ret) + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; +out_free: + snprintf(dbf, sizeof(dbf), "ret:%d", ret); + CHSC_LOG(0, dbf); + kfree(request); + free_page((unsigned long)chsc_area); + return ret; +} + +static int chsc_ioctl_on_close_set(void __user *user_area) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (on_close_chsc_area) { + ret = -EBUSY; + goto out_unlock; + } + on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL); + if (!on_close_request) { + ret = -ENOMEM; + goto out_unlock; + } + on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!on_close_chsc_area) { + ret = -ENOMEM; + goto out_free_request; + } + if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free_chsc; + } + ret = 0; + goto out_unlock; + +out_free_chsc: + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; +out_free_request: + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocsret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_on_close_remove(void) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) { + ret = -ENOENT; + goto out_unlock; + } + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; + ret = 0; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocrret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_start_sync(void __user *user_area) +{ + struct chsc_sync_area *chsc_area; + int ret, ccode; + + chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!chsc_area) + return -ENOMEM; + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + if (chsc_area->header.code & 0x4000) { + ret = -EINVAL; + goto out_free; + } + chsc_log_command(chsc_area); + ccode = chsc(chsc_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; + else + ret = 0; +out_free: + free_page((unsigned long)chsc_area); + return ret; +} + +static int chsc_ioctl_info_channel_path(void __user *user_cd) +{ + struct chsc_chp_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_chpid : 8; + u32 : 24; + u32 last_chpid : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scpcd_area; + + scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scpcd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scpcd_area->request.length = 0x0010; + scpcd_area->request.code = 0x0028; + scpcd_area->m = cd->m; + scpcd_area->fmt1 = cd->fmt; + scpcd_area->cssid = cd->chpid.cssid; + scpcd_area->first_chpid = cd->chpid.id; + scpcd_area->last_chpid = cd->chpid.id; + + ccode = chsc(scpcd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scpcd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scpcd: response code=%x\n", + scpcd_area->response.code); + goto out_free; + } + memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scpcd_area); + return ret; +} + +static int chsc_ioctl_info_cu(void __user *user_cd) +{ + struct chsc_cu_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_cun : 8; + u32 : 24; + u32 last_cun : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scucd_area; + + scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scucd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scucd_area->request.length = 0x0010; + scucd_area->request.code = 0x0028; + scucd_area->m = cd->m; + scucd_area->fmt1 = cd->fmt; + scucd_area->cssid = cd->cssid; + scucd_area->first_cun = cd->cun; + scucd_area->last_cun = cd->cun; + + ccode = chsc(scucd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scucd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scucd: response code=%x\n", + scucd_area->response.code); + goto out_free; + } + memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scucd_area); + return ret; +} + +static int chsc_ioctl_info_sch_cu(void __user *user_cud) +{ + struct chsc_sch_cud *cud; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 5; + u32 fmt1 : 4; + u32 : 2; + u32 ssid : 2; + u32 first_sch : 16; + u32 : 8; + u32 cssid : 8; + u32 last_sch : 16; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sscud_area; + + sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sscud_area) + return -ENOMEM; + cud = kzalloc(sizeof(*cud), GFP_KERNEL); + if (!cud) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cud, user_cud, sizeof(*cud))) { + ret = -EFAULT; + goto out_free; + } + sscud_area->request.length = 0x0010; + sscud_area->request.code = 0x0006; + sscud_area->m = cud->schid.m; + sscud_area->fmt1 = cud->fmt; + sscud_area->ssid = cud->schid.ssid; + sscud_area->first_sch = cud->schid.sch_no; + sscud_area->cssid = cud->schid.cssid; + sscud_area->last_sch = cud->schid.sch_no; + + ccode = chsc(sscud_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sscud_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sscud: response code=%x\n", + sscud_area->response.code); + goto out_free; + } + memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); + if (copy_to_user(user_cud, cud, sizeof(*cud))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cud); + free_page((unsigned long)sscud_area); + return ret; +} + +static int chsc_ioctl_conf_info(void __user *user_ci) +{ + struct chsc_conf_info *ci; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 6; + u32 ssid : 2; + u32 : 8; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sci_area; + + sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sci_area) + return -ENOMEM; + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ci, user_ci, sizeof(*ci))) { + ret = -EFAULT; + goto out_free; + } + sci_area->request.length = 0x0010; + sci_area->request.code = 0x0012; + sci_area->m = ci->id.m; + sci_area->fmt1 = ci->fmt; + sci_area->cssid = ci->id.cssid; + sci_area->ssid = ci->id.ssid; + + ccode = chsc(sci_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sci_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sci: response code=%x\n", + sci_area->response.code); + goto out_free; + } + memcpy(&ci->scid, &sci_area->response, sci_area->response.length); + if (copy_to_user(user_ci, ci, sizeof(*ci))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ci); + free_page((unsigned long)sci_area); + return ret; +} + +static int chsc_ioctl_conf_comp_list(void __user *user_ccl) +{ + struct chsc_comp_list *ccl; + int ret, ccode; + struct { + struct chsc_header request; + u32 ctype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u64 : 64; + u32 list_parm[2]; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sccl_area; + struct { + u32 m : 1; + u32 : 31; + u32 cssid : 8; + u32 : 16; + u32 chpid : 8; + } __attribute__ ((packed)) *chpid_parm; + struct { + u32 f_cssid : 8; + u32 l_cssid : 8; + u32 : 16; + u32 res; + } __attribute__ ((packed)) *cssids_parm; + + sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccl_area) + return -ENOMEM; + ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); + if (!ccl) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { + ret = -EFAULT; + goto out_free; + } + sccl_area->request.length = 0x0020; + sccl_area->request.code = 0x0030; + sccl_area->fmt = ccl->req.fmt; + sccl_area->ctype = ccl->req.ctype; + switch (sccl_area->ctype) { + case CCL_CU_ON_CHP: + case CCL_IOP_CHP: + chpid_parm = (void *)&sccl_area->list_parm; + chpid_parm->m = ccl->req.chpid.m; + chpid_parm->cssid = ccl->req.chpid.chp.cssid; + chpid_parm->chpid = ccl->req.chpid.chp.id; + break; + case CCL_CSS_IMG: + case CCL_CSS_IMG_CONF_CHAR: + cssids_parm = (void *)&sccl_area->list_parm; + cssids_parm->f_cssid = ccl->req.cssids.f_cssid; + cssids_parm->l_cssid = ccl->req.cssids.l_cssid; + break; + } + ccode = chsc(sccl_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sccl_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sccl: response code=%x\n", + sccl_area->response.code); + goto out_free; + } + memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); + if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ccl); + free_page((unsigned long)sccl_area); + return ret; +} + +static int chsc_ioctl_chpd(void __user *user_chpd) +{ + struct chsc_scpd *scpd_area; + struct chsc_cpd_info *chpd; + int ret; + + chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); + scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scpd_area || !chpd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { + ret = -EFAULT; + goto out_free; + } + ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, + chpd->rfmt, chpd->c, chpd->m, + scpd_area); + if (ret) + goto out_free; + memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); + if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) + ret = -EFAULT; +out_free: + kfree(chpd); + free_page((unsigned long)scpd_area); + return ret; +} + +static int chsc_ioctl_dcal(void __user *user_dcal) +{ + struct chsc_dcal *dcal; + int ret, ccode; + struct { + struct chsc_header request; + u32 atype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u32 res0[2]; + u32 list_parm[2]; + u32 res1[2]; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sdcal_area; + + sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sdcal_area) + return -ENOMEM; + dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); + if (!dcal) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { + ret = -EFAULT; + goto out_free; + } + sdcal_area->request.length = 0x0020; + sdcal_area->request.code = 0x0034; + sdcal_area->atype = dcal->req.atype; + sdcal_area->fmt = dcal->req.fmt; + memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, + sizeof(sdcal_area->list_parm)); + + ccode = chsc(sdcal_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sdcal_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sdcal: response code=%x\n", + sdcal_area->response.code); + goto out_free; + } + memcpy(&dcal->sdcal, &sdcal_area->response, + sdcal_area->response.length); + if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(dcal); + free_page((unsigned long)sdcal_area); + return ret; +} + +static long chsc_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + void __user *argp; + + CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); + if (is_compat_task()) + argp = compat_ptr(arg); + else + argp = (void __user *)arg; + switch (cmd) { + case CHSC_START: + return chsc_ioctl_start(argp); + case CHSC_START_SYNC: + return chsc_ioctl_start_sync(argp); + case CHSC_INFO_CHANNEL_PATH: + return chsc_ioctl_info_channel_path(argp); + case CHSC_INFO_CU: + return chsc_ioctl_info_cu(argp); + case CHSC_INFO_SCH_CU: + return chsc_ioctl_info_sch_cu(argp); + case CHSC_INFO_CI: + return chsc_ioctl_conf_info(argp); + case CHSC_INFO_CCL: + return chsc_ioctl_conf_comp_list(argp); + case CHSC_INFO_CPD: + return chsc_ioctl_chpd(argp); + case CHSC_INFO_DCAL: + return chsc_ioctl_dcal(argp); + case CHSC_ON_CLOSE_SET: + return chsc_ioctl_on_close_set(argp); + case CHSC_ON_CLOSE_REMOVE: + return chsc_ioctl_on_close_remove(); + default: /* unknown ioctl number */ + return -ENOIOCTLCMD; + } +} + +static atomic_t chsc_ready_for_use = ATOMIC_INIT(1); + +static int chsc_open(struct inode *inode, struct file *file) +{ + if (!atomic_dec_and_test(&chsc_ready_for_use)) { + atomic_inc(&chsc_ready_for_use); + return -EBUSY; + } + return nonseekable_open(inode, file); +} + +static int chsc_release(struct inode *inode, struct file *filp) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) + goto out_unlock; + init_completion(&on_close_request->completion); + CHSC_LOG(0, "on_close"); + chsc_log_command(on_close_chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(on_close_chsc_area, on_close_request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&on_close_request->completion); + ret = chsc_examine_irb(on_close_request); + } + snprintf(dbf, sizeof(dbf), "relret:%d", ret); + CHSC_LOG(0, dbf); + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + atomic_inc(&chsc_ready_for_use); + return 0; +} + +static const struct file_operations chsc_fops = { + .owner = THIS_MODULE, + .open = chsc_open, + .release = chsc_release, + .unlocked_ioctl = chsc_ioctl, + .compat_ioctl = chsc_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice chsc_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "chsc", + .fops = &chsc_fops, +}; + +static int __init chsc_misc_init(void) +{ + return misc_register(&chsc_misc_device); +} + +static void chsc_misc_cleanup(void) +{ + misc_deregister(&chsc_misc_device); +} + +static int __init chsc_sch_init(void) +{ + int ret; + + ret = chsc_init_dbfs(); + if (ret) + return ret; + isc_register(CHSC_SCH_ISC); + ret = chsc_init_sch_driver(); + if (ret) + goto out_dbf; + ret = chsc_misc_init(); + if (ret) + goto out_driver; + return ret; +out_driver: + chsc_cleanup_sch_driver(); +out_dbf: + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); + return ret; +} + +static void __exit chsc_sch_exit(void) +{ + chsc_misc_cleanup(); + chsc_cleanup_sch_driver(); + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); +} + +module_init(chsc_sch_init); +module_exit(chsc_sch_exit); diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h new file mode 100644 index 00000000000..589ebfad6aa --- /dev/null +++ b/drivers/s390/cio/chsc_sch.h @@ -0,0 +1,13 @@ +#ifndef _CHSC_SCH_H +#define _CHSC_SCH_H + +struct chsc_request { + struct completion completion; + struct irb irb; +}; + +struct chsc_private { + struct chsc_request *request; +}; + +#endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index ea813bdce1d..2905d8b0ec9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,159 +1,103 @@ /* - * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls - * $Revision: 1.134 $ * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999, 2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/ftrace.h> #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> - +#include <linux/irq.h> #include <asm/cio.h> #include <asm/delay.h> #include <asm/irq.h> - -#include "airq.h" +#include <asm/irq_regs.h> +#include <asm/setup.h> +#include <asm/reset.h> +#include <asm/ipl.h> +#include <asm/chpid.h> +#include <asm/airq.h> +#include <asm/isc.h> +#include <linux/cputime.h> +#include <asm/fcx.h> +#include <asm/nmi.h> +#include <asm/crw.h> #include "cio.h" #include "css.h" #include "chsc.h" #include "ioasm.h" +#include "io_sch.h" #include "blacklist.h" #include "cio_debug.h" +#include "chp.h" debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; debug_info_t *cio_debug_crw_id; -int cio_show_msg; - -static int __init -cio_setup (char *parm) -{ - if (!strcmp (parm, "yes")) - cio_show_msg = 1; - else if (!strcmp (parm, "no")) - cio_show_msg = 0; - else - printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'", - parm); - return 1; -} - -__setup ("cio_msg=", cio_setup); +DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb); +EXPORT_PER_CPU_SYMBOL(cio_irb); /* * Function: cio_debug_init - * Initializes three debug logs (under /proc/s390dbf) for common I/O: - * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on + * Initializes three debug logs for common I/O: + * - cio_msg logs generic cio messages * - cio_trace logs the calling of different functions - * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on - * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW + * - cio_crw logs machine check related cio messages */ -static int __init -cio_debug_init (void) +static int __init cio_debug_init(void) { - cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); + cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long)); if (!cio_debug_msg_id) goto out_unregister; - debug_register_view (cio_debug_msg_id, &debug_sprintf_view); - debug_set_level (cio_debug_msg_id, 2); - cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 8); + debug_register_view(cio_debug_msg_id, &debug_sprintf_view); + debug_set_level(cio_debug_msg_id, 2); + cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); if (!cio_debug_trace_id) goto out_unregister; - debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); - debug_set_level (cio_debug_trace_id, 2); - cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); + debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); + debug_set_level(cio_debug_trace_id, 2); + cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long)); if (!cio_debug_crw_id) goto out_unregister; - debug_register_view (cio_debug_crw_id, &debug_sprintf_view); - debug_set_level (cio_debug_crw_id, 2); - pr_debug("debugging initialized\n"); + debug_register_view(cio_debug_crw_id, &debug_sprintf_view); + debug_set_level(cio_debug_crw_id, 4); return 0; out_unregister: if (cio_debug_msg_id) - debug_unregister (cio_debug_msg_id); + debug_unregister(cio_debug_msg_id); if (cio_debug_trace_id) - debug_unregister (cio_debug_trace_id); + debug_unregister(cio_debug_trace_id); if (cio_debug_crw_id) - debug_unregister (cio_debug_crw_id); - pr_debug("could not initialize debugging\n"); + debug_unregister(cio_debug_crw_id); return -1; } arch_initcall (cio_debug_init); -int -cio_set_options (struct subchannel *sch, int flags) -{ - sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; - sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; - sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; - return 0; -} - -/* FIXME: who wants to use this? */ -int -cio_get_options (struct subchannel *sch) -{ - int flags; - - flags = 0; - if (sch->options.suspend) - flags |= DOIO_ALLOW_SUSPEND; - if (sch->options.prefetch) - flags |= DOIO_DENY_PREFETCH; - if (sch->options.inter) - flags |= DOIO_SUPPRESS_INTER; - return flags; -} - -/* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. - */ -static inline int -cio_tpi(void) +int cio_set_options(struct subchannel *sch, int flags) { - struct tpi_info *tpi_info; - struct subchannel *sch; - struct irb *irb; + struct io_subchannel_private *priv = to_io_private(sch); - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; - if (tpi (NULL) != 1) - return 0; - irb = (struct irb *) __LC_IRB; - /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) != 0) - /* Not status pending or not operational. */ - return 1; - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (!sch) - return 1; - local_bh_disable(); - irq_enter (); - spin_lock(&sch->lock); - memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); - if (sch->driver && sch->driver->irq) - sch->driver->irq(&sch->dev); - spin_unlock(&sch->lock); - irq_exit (); - __local_bh_enable(); - return 1; + priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; + priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; + priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; + return 0; } -static inline int +static int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; @@ -163,11 +107,14 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) else sch->lpm = 0; - stsch (sch->irq, &sch->schib); + CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " + "subchannel 0.%x.%04x!\n", sch->schid.ssid, + sch->schid.sch_no); - CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " - "subchannel %04x!\n", sch->irq); - sprintf(dbf_text, "no%s", sch->dev.bus_id); + if (cio_update_schib(sch)) + return -ENODEV; + + sprintf(dbf_text, "no%s", dev_name(&sch->dev)); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -180,48 +127,51 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ __u8 lpm, /* logical path mask */ __u8 key) /* storage key */ { - char dbf_txt[15]; + struct io_subchannel_private *priv = to_io_private(sch); + union orb *orb = &priv->orb; int ccode; - CIO_TRACE_EVENT (4, "stIO"); - CIO_TRACE_EVENT (4, sch->dev.bus_id); + CIO_TRACE_EVENT(5, "stIO"); + CIO_TRACE_EVENT(5, dev_name(&sch->dev)); + memset(orb, 0, sizeof(union orb)); /* sch is always under 2G. */ - sch->orb.intparm = (__u32)(unsigned long)sch; - sch->orb.fmt = 1; - - sch->orb.pfch = sch->options.prefetch == 0; - sch->orb.spnd = sch->options.suspend; - sch->orb.ssic = sch->options.suspend && sch->options.inter; - sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; -#ifdef CONFIG_ARCH_S390X + orb->cmd.intparm = (u32)(addr_t)sch; + orb->cmd.fmt = 1; + + orb->cmd.pfch = priv->options.prefetch == 0; + orb->cmd.spnd = priv->options.suspend; + orb->cmd.ssic = priv->options.suspend && priv->options.inter; + orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; +#ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ - sch->orb.c64 = 1; - sch->orb.i2k = 0; + orb->cmd.c64 = 1; + orb->cmd.i2k = 0; #endif - sch->orb.key = key >> 4; + orb->cmd.key = key >> 4; /* issue "Start Subchannel" */ - sch->orb.cpa = (__u32) __pa (cpa); - ccode = ssch (sch->irq, &sch->orb); + orb->cmd.cpa = (__u32) __pa(cpa); + ccode = ssch(sch->schid, orb); /* process condition code */ - sprintf (dbf_txt, "ccode:%d", ccode); - CIO_TRACE_EVENT (4, dbf_txt); + CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); switch (ccode) { case 0: /* * initialize device status information */ - sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ return -EBUSY; - default: /* device/path not operational */ + case 3: /* device/path not operational */ return cio_start_handle_notoper(sch, lpm); + default: + return ccode; } } @@ -237,20 +187,18 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) int cio_resume (struct subchannel *sch) { - char dbf_txt[15]; int ccode; - CIO_TRACE_EVENT (4, "resIO"); - CIO_TRACE_EVENT (4, sch->dev.bus_id); + CIO_TRACE_EVENT(4, "resIO"); + CIO_TRACE_EVENT(4, dev_name(&sch->dev)); - ccode = rsch (sch->irq); + ccode = rsch (sch->schid); - sprintf (dbf_txt, "ccode:%d", ccode); - CIO_TRACE_EVENT (4, dbf_txt); + CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; return 0; case 1: return -EBUSY; @@ -271,26 +219,24 @@ cio_resume (struct subchannel *sch) int cio_halt(struct subchannel *sch) { - char dbf_txt[15]; int ccode; if (!sch) return -ENODEV; - CIO_TRACE_EVENT (2, "haltIO"); - CIO_TRACE_EVENT (2, sch->dev.bus_id); + CIO_TRACE_EVENT(2, "haltIO"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); /* * Issue "Halt subchannel" and process condition code */ - ccode = hsch (sch->irq); + ccode = hsch (sch->schid); - sprintf (dbf_txt, "ccode:%d", ccode); - CIO_TRACE_EVENT (2, dbf_txt); + CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ @@ -306,26 +252,24 @@ cio_halt(struct subchannel *sch) int cio_clear(struct subchannel *sch) { - char dbf_txt[15]; int ccode; if (!sch) return -ENODEV; - CIO_TRACE_EVENT (2, "clearIO"); - CIO_TRACE_EVENT (2, sch->dev.bus_id); + CIO_TRACE_EVENT(2, "clearIO"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); /* * Issue "Clear subchannel" and process condition code */ - ccode = csch (sch->irq); + ccode = csch (sch->schid); - sprintf (dbf_txt, "ccode:%d", ccode); - CIO_TRACE_EVENT (2, dbf_txt); + CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; return 0; default: /* device not operational */ return -ENODEV; @@ -342,24 +286,23 @@ cio_clear(struct subchannel *sch) int cio_cancel (struct subchannel *sch) { - char dbf_txt[15]; int ccode; if (!sch) return -ENODEV; - CIO_TRACE_EVENT (2, "cancelIO"); - CIO_TRACE_EVENT (2, sch->dev.bus_id); + CIO_TRACE_EVENT(2, "cancelIO"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); - ccode = xsch (sch->irq); + ccode = xsch (sch->schid); - sprintf (dbf_txt, "ccode:%d", ccode); - CIO_TRACE_EVENT (2, dbf_txt); + CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); switch (ccode) { case 0: /* success */ /* Update information in scsw. */ - stsch (sch->irq, &sch->schib); + if (cio_update_schib(sch)) + return -ENODEV; return 0; case 1: /* status pending */ return -EBUSY; @@ -370,442 +313,435 @@ cio_cancel (struct subchannel *sch) } } + +static void cio_apply_config(struct subchannel *sch, struct schib *schib) +{ + schib->pmcw.intparm = sch->config.intparm; + schib->pmcw.mbi = sch->config.mbi; + schib->pmcw.isc = sch->config.isc; + schib->pmcw.ena = sch->config.ena; + schib->pmcw.mme = sch->config.mme; + schib->pmcw.mp = sch->config.mp; + schib->pmcw.csense = sch->config.csense; + schib->pmcw.mbfc = sch->config.mbfc; + if (sch->config.mbfc) + schib->mba = sch->config.mba; +} + +static int cio_check_config(struct subchannel *sch, struct schib *schib) +{ + return (schib->pmcw.intparm == sch->config.intparm) && + (schib->pmcw.mbi == sch->config.mbi) && + (schib->pmcw.isc == sch->config.isc) && + (schib->pmcw.ena == sch->config.ena) && + (schib->pmcw.mme == sch->config.mme) && + (schib->pmcw.mp == sch->config.mp) && + (schib->pmcw.csense == sch->config.csense) && + (schib->pmcw.mbfc == sch->config.mbfc) && + (!sch->config.mbfc || (schib->mba == sch->config.mba)); +} + /* - * Function: cio_modify - * Issues a "Modify Subchannel" on the specified subchannel + * cio_commit_config - apply configuration to the subchannel */ -int -cio_modify (struct subchannel *sch) +int cio_commit_config(struct subchannel *sch) { - int ccode, retry, ret; + int ccode, retry, ret = 0; + struct schib schib; + struct irb irb; + + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) + return -ENODEV; - ret = 0; for (retry = 0; retry < 5; retry++) { - ccode = msch_err (sch->irq, &sch->schib); - if (ccode < 0) /* -EIO if msch gets a program check. */ + /* copy desired changes to local schib */ + cio_apply_config(sch, &schib); + ccode = msch_err(sch->schid, &schib); + if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { - case 0: /* successfull */ - return 0; - case 1: /* status pending */ - return -EBUSY; - case 2: /* busy */ - udelay (100); /* allow for recovery */ + case 0: /* successful */ + if (stsch_err(sch->schid, &schib) || + !css_sch_is_valid(&schib)) + return -ENODEV; + if (cio_check_config(sch, &schib)) { + /* commit changes from local schib */ + memcpy(&sch->schib, &schib, sizeof(schib)); + return 0; + } + ret = -EAGAIN; + break; + case 1: /* status pending */ + ret = -EBUSY; + if (tsch(sch->schid, &irb)) + return ret; + break; + case 2: /* busy */ + udelay(100); /* allow for recovery */ ret = -EBUSY; break; - case 3: /* not operational */ + case 3: /* not operational */ return -ENODEV; } } return ret; } -/* - * Enable subchannel. +/** + * cio_update_schib - Perform stsch and update schib if subchannel is valid. + * @sch: subchannel on which to perform stsch + * Return zero on success, -ENODEV otherwise. */ -int -cio_enable_subchannel (struct subchannel *sch, unsigned int isc) +int cio_update_schib(struct subchannel *sch) +{ + struct schib schib; + + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) + return -ENODEV; + + memcpy(&sch->schib, &schib, sizeof(schib)); + return 0; +} +EXPORT_SYMBOL_GPL(cio_update_schib); + +/** + * cio_enable_subchannel - enable a subchannel. + * @sch: subchannel to be enabled + * @intparm: interruption parameter to set + */ +int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { - char dbf_txt[15]; - int ccode; - int retry; int ret; - CIO_TRACE_EVENT (2, "ensch"); - CIO_TRACE_EVENT (2, sch->dev.bus_id); + CIO_TRACE_EVENT(2, "ensch"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); - ccode = stsch (sch->irq, &sch->schib); - if (ccode) + if (sch_is_pseudo_sch(sch)) + return -EINVAL; + if (cio_update_schib(sch)) return -ENODEV; - for (retry = 5, ret = 0; retry > 0; retry--) { - sch->schib.pmcw.ena = 1; - sch->schib.pmcw.isc = isc; - sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; - ret = cio_modify(sch); - if (ret == -ENODEV) - break; - if (ret == -EIO) - /* - * Got a program check in cio_modify. Try without - * the concurrent sense bit the next time. - */ - sch->schib.pmcw.csense = 0; - if (ret == 0) { - stsch (sch->irq, &sch->schib); - if (sch->schib.pmcw.ena) - break; - } - if (ret == -EBUSY) { - struct irb irb; - if (tsch(sch->irq, &irb) != 0) - break; - } + sch->config.ena = 1; + sch->config.isc = sch->isc; + sch->config.intparm = intparm; + + ret = cio_commit_config(sch); + if (ret == -EIO) { + /* + * Got a program check in msch. Try without + * the concurrent sense bit the next time. + */ + sch->config.csense = 0; + ret = cio_commit_config(sch); } - sprintf (dbf_txt, "ret:%d", ret); - CIO_TRACE_EVENT (2, dbf_txt); + CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; } +EXPORT_SYMBOL_GPL(cio_enable_subchannel); -/* - * Disable subchannel. +/** + * cio_disable_subchannel - disable a subchannel. + * @sch: subchannel to disable */ -int -cio_disable_subchannel (struct subchannel *sch) +int cio_disable_subchannel(struct subchannel *sch) { - char dbf_txt[15]; - int ccode; - int retry; int ret; - CIO_TRACE_EVENT (2, "dissch"); - CIO_TRACE_EVENT (2, sch->dev.bus_id); + CIO_TRACE_EVENT(2, "dissch"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); - ccode = stsch (sch->irq, &sch->schib); - if (ccode == 3) /* Not operational. */ + if (sch_is_pseudo_sch(sch)) + return 0; + if (cio_update_schib(sch)) return -ENODEV; - if (sch->schib.scsw.actl != 0) + sch->config.ena = 0; + ret = cio_commit_config(sch); + + CIO_HEX_EVENT(2, &ret, sizeof(ret)); + return ret; +} +EXPORT_SYMBOL_GPL(cio_disable_subchannel); + +static int cio_check_devno_blacklisted(struct subchannel *sch) +{ + if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { /* - * the disable function must not be called while there are - * requests pending for completion ! + * This device must not be known to Linux. So we simply + * say that there is no device and return ENODEV. */ - return -EBUSY; - - for (retry = 5, ret = 0; retry > 0; retry--) { - sch->schib.pmcw.ena = 0; - ret = cio_modify(sch); - if (ret == -ENODEV) - break; - if (ret == -EBUSY) - /* - * The subchannel is busy or status pending. - * We'll disable when the next interrupt was delivered - * via the state machine. - */ - break; - if (ret == 0) { - stsch (sch->irq, &sch->schib); - if (!sch->schib.pmcw.ena) - break; - } + CIO_MSG_EVENT(6, "Blacklisted device detected " + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); + return -ENODEV; } - sprintf (dbf_txt, "ret:%d", ret); - CIO_TRACE_EVENT (2, dbf_txt); - return ret; + return 0; } -/* - * cio_validate_subchannel() +static int cio_validate_io_subchannel(struct subchannel *sch) +{ + /* Initialization for io subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +static int cio_validate_msg_subchannel(struct subchannel *sch) +{ + /* Initialization for message subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +/** + * cio_validate_subchannel - basic validation of subchannel + * @sch: subchannel structure to be filled out + * @schid: subchannel id * * Find out subchannel type and initialize struct subchannel. * Return codes: - * SUBCHANNEL_TYPE_IO for a normal io subchannel - * SUBCHANNEL_TYPE_CHSC for a chsc subchannel - * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel - * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel + * 0 on success * -ENXIO for non-defined subchannels - * -ENODEV for subchannels with invalid device number or blacklisted devices + * -ENODEV for invalid subchannels or blacklisted devices + * -EIO for subchannels in an invalid subchannel set */ -int -cio_validate_subchannel (struct subchannel *sch, unsigned int irq) +int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; + int err; - sprintf (dbf_txt, "valsch%x", irq); - CIO_TRACE_EVENT (4, dbf_txt); - - /* Nuke all fields. */ - memset(sch, 0, sizeof(struct subchannel)); - - spin_lock_init(&sch->lock); - - /* Set a name for the subchannel */ - snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); + sprintf(dbf_txt, "valsch%x", schid.sch_no); + CIO_TRACE_EVENT(4, dbf_txt); /* * The first subchannel that is not-operational (ccode==3) - * indicates that there aren't any more devices available. + * indicates that there aren't any more devices available. + * If stsch gets an exception, it means the current subchannel set + * is not valid. */ - sch->irq = irq; - ccode = stsch (irq, &sch->schib); - if (ccode) - return -ENXIO; - - /* Copy subchannel type from path management control word. */ - sch->st = sch->schib.pmcw.st; - - /* - * ... just being curious we check for non I/O subchannels - */ - if (sch->st != 0) { - CIO_DEBUG(KERN_INFO, 0, - "Subchannel %04X reports " - "non-I/O subchannel type %04X\n", - sch->irq, sch->st); - /* We stop here for non-io subchannels. */ - return sch->st; + ccode = stsch_err(schid, &sch->schib); + if (ccode) { + err = (ccode == 3) ? -ENXIO : ccode; + goto out; } - - /* Initialization for io subchannels. */ - if (!sch->schib.pmcw.dnv) - /* io subchannel but device number is invalid. */ - return -ENODEV; - - /* Devno is valid. */ - if (is_blacklisted (sch->schib.pmcw.dev)) { - /* - * This device must not be known to Linux. So we simply - * say that there is no device and return ENODEV. - */ - CIO_MSG_EVENT(0, "Blacklisted device detected " - "at devno %04X\n", sch->schib.pmcw.dev); - return -ENODEV; + sch->st = sch->schib.pmcw.st; + sch->schid = schid; + + switch (sch->st) { + case SUBCHANNEL_TYPE_IO: + err = cio_validate_io_subchannel(sch); + break; + case SUBCHANNEL_TYPE_MSG: + err = cio_validate_msg_subchannel(sch); + break; + default: + err = 0; } - sch->opm = 0xff; - chsc_validate_chpids(sch); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; - - CIO_DEBUG(KERN_INFO, 0, - "Detected device %04X on subchannel %04X" - " - PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, - sch->schib.pmcw.pam, sch->schib.pmcw.pom); + if (err) + goto out; - /* - * We now have to initially ... - * ... set "interruption subclass" - * ... enable "concurrent sense" - * ... enable "multipath mode" if more than one - * CHPID is available. This is done regardless - * whether multiple paths are available for us. - */ - sch->schib.pmcw.isc = 3; /* could be smth. else */ - sch->schib.pmcw.csense = 1; /* concurrent sense */ - sch->schib.pmcw.ena = 0; - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; /* multipath mode */ - return 0; + CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", + sch->schid.ssid, sch->schid.sch_no, sch->st); +out: + return err; } /* - * do_IRQ() handles all normal I/O device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - * + * do_cio_interrupt() handles all normal I/O device IRQ's */ -void -do_IRQ (struct pt_regs *regs) +static irqreturn_t do_cio_interrupt(int irq, void *dummy) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; - irq_enter (); - asm volatile ("mc 0,0"); - if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) - /** - * Make sure that the i/o interrupt did not "overtake" - * the last HZ timer interrupt. - */ - account_ticks(regs); - /* - * Get interrupt information from lowcore - */ - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; - irb = (struct irb *) __LC_IRB; - do { - kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; - /* - * Non I/O-subchannel thin interrupts are processed differently - */ - if (tpi_info->adapter_IO == 1 && - tpi_info->int_type == IO_INTERRUPT_TYPE) { - do_adapter_IO(); - continue; - } - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (sch) - spin_lock(&sch->lock); - /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) == 0 && sch) { - /* Keep subchannel information word up to date. */ - memcpy (&sch->schib.scsw, &irb->scsw, - sizeof (irb->scsw)); - /* Call interrupt handler if there is one. */ - if (sch->driver && sch->driver->irq) - sch->driver->irq(&sch->dev); - } - if (sch) - spin_unlock(&sch->lock); - /* - * Are more interrupts pending? - * If so, the tpi instruction will update the lowcore - * to hold the info for the next interrupt. - * We don't do this for VM because a tpi drops the cpu - * out of the sie which costs more cycles than it saves. - */ - } while (!MACHINE_IS_VM && tpi (NULL) != 0); - irq_exit (); + __this_cpu_write(s390_idle.nohz_delay, 1); + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + irb = &__get_cpu_var(cio_irb); + sch = (struct subchannel *)(unsigned long) tpi_info->intparm; + if (!sch) { + /* Clear pending interrupt condition. */ + inc_irq_stat(IRQIO_CIO); + tsch(tpi_info->schid, irb); + return IRQ_HANDLED; + } + spin_lock(sch->lock); + /* Store interrupt response block to lowcore. */ + if (tsch(tpi_info->schid, irb) == 0) { + /* Keep subchannel information word up to date. */ + memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); + /* Call interrupt handler if there is one. */ + if (sch->driver && sch->driver->irq) + sch->driver->irq(sch); + else + inc_irq_stat(IRQIO_CIO); + } else + inc_irq_stat(IRQIO_CIO); + spin_unlock(sch->lock); + + return IRQ_HANDLED; +} + +static struct irqaction io_interrupt = { + .name = "IO", + .handler = do_cio_interrupt, +}; + +void __init init_cio_interrupts(void) +{ + irq_set_chip_and_handler(IO_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(IO_INTERRUPT, &io_interrupt); } #ifdef CONFIG_CCW_CONSOLE -static struct subchannel console_subchannel; -static int console_subchannel_in_use; +static struct subchannel *console_sch; +static struct lock_class_key console_sch_key; /* - * busy wait for the next interrupt on the console + * Use cio_tsch to update the subchannel status and call the interrupt handler + * if status had been pending. Called with the subchannel's lock held. */ -void -wait_cons_dev (void) +void cio_tsch(struct subchannel *sch) { - unsigned long cr6 __attribute__ ((aligned (8))); - unsigned long save_cr6 __attribute__ ((aligned (8))); + struct irb *irb; + int irq_context; - /* - * before entering the spinlock we may already have - * processed the interrupt on a different CPU... - */ - if (!console_subchannel_in_use) + irb = &__get_cpu_var(cio_irb); + /* Store interrupt response block to lowcore. */ + if (tsch(sch->schid, irb) != 0) + /* Not status pending or not operational. */ return; + memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); + /* Call interrupt handler with updated status. */ + irq_context = in_interrupt(); + if (!irq_context) { + local_bh_disable(); + irq_enter(); + } + kstat_incr_irq_this_cpu(IO_INTERRUPT); + if (sch->driver && sch->driver->irq) + sch->driver->irq(sch); + else + inc_irq_stat(IRQIO_CIO); + if (!irq_context) { + irq_exit(); + _local_bh_enable(); + } +} - /* disable all but isc 7 (console device) */ - __ctl_store (save_cr6, 6, 6); - cr6 = 0x01000000; - __ctl_load (cr6, 6, 6); - - do { - spin_unlock(&console_subchannel.lock); - if (!cio_tpi()) - cpu_relax(); - spin_lock(&console_subchannel.lock); - } while (console_subchannel.schib.scsw.actl != 0); - /* - * restore previous isc value - */ - __ctl_load (save_cr6, 6, 6); +static int cio_test_for_console(struct subchannel_id schid, void *data) +{ + struct schib schib; + + if (stsch_err(schid, &schib) != 0) + return -ENXIO; + if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && + (schib.pmcw.dev == console_devno)) { + console_irq = schid.sch_no; + return 1; /* found */ + } + return 0; } -static int -cio_console_irq(void) +static int cio_get_console_sch_no(void) { - int irq; - + struct subchannel_id schid; + struct schib schib; + + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ - if (stsch(console_irq, &console_subchannel.schib) != 0 || - !console_subchannel.schib.pmcw.dnv) + schid.sch_no = console_irq; + if (stsch_err(schid, &schib) != 0 || + (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1; - console_devno = console_subchannel.schib.pmcw.dev; + console_devno = schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - if (stsch(irq, &console_subchannel.schib) != 0) - break; - if (console_subchannel.schib.pmcw.dnv && - console_subchannel.schib.pmcw.dev == - console_devno) { - console_irq = irq; - break; - } - } - if (console_irq == -1) - return -1; - } else { - /* unlike in 2.4, we cannot autoprobe here, since - * the channel subsystem is not fully initialized. - * With some luck, the HWC console can take over */ - printk(KERN_WARNING "No ccw console found!\n"); - return -1; + for_each_subchannel(cio_test_for_console, NULL); } return console_irq; } -struct subchannel * -cio_probe_console(void) +struct subchannel *cio_probe_console(void) { - int irq, ret; + struct subchannel_id schid; + struct subchannel *sch; + int sch_no, ret; - if (xchg(&console_subchannel_in_use, 1) != 0) - return ERR_PTR(-EBUSY); - irq = cio_console_irq(); - if (irq == -1) { - console_subchannel_in_use = 0; - return ERR_PTR(-ENODEV); - } - memset(&console_subchannel, 0, sizeof(struct subchannel)); - ret = cio_validate_subchannel(&console_subchannel, irq); - if (ret) { - console_subchannel_in_use = 0; + sch_no = cio_get_console_sch_no(); + if (sch_no == -1) { + pr_warning("No CCW console was found\n"); return ERR_PTR(-ENODEV); } - - /* - * enable console I/O-interrupt subclass 7 - */ - ctl_set_bit(6, 24); - console_subchannel.schib.pmcw.isc = 7; - console_subchannel.schib.pmcw.intparm = - (__u32)(unsigned long)&console_subchannel; - ret = cio_modify(&console_subchannel); + init_subchannel_id(&schid); + schid.sch_no = sch_no; + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return sch; + + lockdep_set_class(sch->lock, &console_sch_key); + isc_register(CONSOLE_ISC); + sch->config.isc = CONSOLE_ISC; + sch->config.intparm = (u32)(addr_t)sch; + ret = cio_commit_config(sch); if (ret) { - console_subchannel_in_use = 0; + isc_unregister(CONSOLE_ISC); + put_device(&sch->dev); return ERR_PTR(ret); } - return &console_subchannel; + console_sch = sch; + return sch; } -void -cio_release_console(void) +int cio_is_console(struct subchannel_id schid) { - console_subchannel.schib.pmcw.intparm = 0; - cio_modify(&console_subchannel); - ctl_clear_bit(6, 24); - console_subchannel_in_use = 0; -} - -/* Bah... hack to catch console special sausages. */ -int -cio_is_console(int irq) -{ - if (!console_subchannel_in_use) + if (!console_sch) return 0; - return (irq == console_subchannel.irq); + return schid_equal(&schid, &console_sch->schid); } -struct subchannel * -cio_get_console_subchannel(void) +void cio_register_early_subchannels(void) { - if (!console_subchannel_in_use) - return 0; - return &console_subchannel; + int ret; + + if (!console_sch) + return; + + ret = css_register_subchannel(console_sch); + if (ret) + put_device(&console_sch->dev); } +#endif /* CONFIG_CCW_CONSOLE */ -#endif -static inline int -__disable_subchannel_easy(unsigned int schid, struct schib *schib) +static int +__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; - cc = msch(schid, schib); + cc = msch_err(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); - stsch(schid, schib); + if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) + return -ENODEV; if (!schib->pmcw.ena) return 0; } return -EBUSY; /* uhm... */ } -static inline int -__clear_subchannel_easy(unsigned int schid) +static int +__clear_io_subchannel_easy(struct subchannel_id schid) { int retry; @@ -815,46 +751,272 @@ __clear_subchannel_easy(unsigned int schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(schid, (struct irb *)__LC_IRB); - return 0; + tsch(ti.schid, &__get_cpu_var(cio_irb)); + if (schid_equal(&ti.schid, &schid)) + return 0; } - udelay(100); + udelay_simple(100); } return -EBUSY; } -extern void do_reipl(unsigned long devno); +static void __clear_chsc_subchannel_easy(void) +{ + /* It seems we can only wait for a bit here :/ */ + udelay_simple(100); +} + +static int pgm_check_occured; + +static void cio_reset_pgm_check_handler(void) +{ + pgm_check_occured = 1; +} + +static int stsch_reset(struct subchannel_id schid, struct schib *addr) +{ + int rc; + + pgm_check_occured = 0; + s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; + rc = stsch_err(schid, addr); + s390_base_pgm_handler_fn = NULL; + + /* The program check handler could have changed pgm_check_occured. */ + barrier(); -/* Clear all subchannels. */ -void -clear_all_subchannels(void) + if (pgm_check_occured) + return -EIO; + else + return rc; +} + +static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) { - unsigned int schid; + struct schib schib; - local_irq_disable(); - for (schid=0;schid<=highest_subchannel;schid++) { - struct schib schib; - if (stsch(schid, &schib)) - break; /* break out of the loop */ - if (!schib.pmcw.ena) - continue; - switch(__disable_subchannel_easy(schid, &schib)) { - case 0: - case -ENODEV: + if (stsch_reset(schid, &schib)) + return -ENXIO; + if (!schib.pmcw.ena) + return 0; + switch(__disable_subchannel_easy(schid, &schib)) { + case 0: + case -ENODEV: + break; + default: /* -EBUSY */ + switch (schib.pmcw.st) { + case SUBCHANNEL_TYPE_IO: + if (__clear_io_subchannel_easy(schid)) + goto out; /* give up... */ + break; + case SUBCHANNEL_TYPE_CHSC: + __clear_chsc_subchannel_easy(); + break; + default: + /* No default clear strategy */ break; - default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... jump out of switch */ - stsch(schid, &schib); - __disable_subchannel_easy(schid, &schib); } + stsch_err(schid, &schib); + __disable_subchannel_easy(schid, &schib); + } +out: + return 0; +} + +static atomic_t chpid_reset_count; + +static void s390_reset_chpids_mcck_handler(void) +{ + struct crw crw; + struct mci *mci; + + /* Check for pending channel report word. */ + mci = (struct mci *)&S390_lowcore.mcck_interruption_code; + if (!mci->cp) + return; + /* Process channel report words. */ + while (stcrw(&crw) == 0) { + /* Check for responses to RCHP. */ + if (crw.slct && crw.rsc == CRW_RSC_CPATH) + atomic_dec(&chpid_reset_count); + } +} + +#define RCHP_TIMEOUT (30 * USEC_PER_SEC) +static void css_reset(void) +{ + int i, ret; + unsigned long long timeout; + struct chp_id chpid; + + /* Reset subchannels. */ + for_each_subchannel(__shutdown_subchannel_easy, NULL); + /* Reset channel paths. */ + s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; + /* Enable channel report machine checks. */ + __ctl_set_bit(14, 28); + /* Temporarily reenable machine checks. */ + local_mcck_enable(); + chp_id_init(&chpid); + for (i = 0; i <= __MAX_CHPID; i++) { + chpid.id = i; + ret = rchp(chpid); + if ((ret == 0) || (ret == 2)) + /* + * rchp either succeeded, or another rchp is already + * in progress. In either case, we'll get a crw. + */ + atomic_inc(&chpid_reset_count); + } + /* Wait for machine check for all channel paths. */ + timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); + while (atomic_read(&chpid_reset_count) != 0) { + if (get_tod_clock_fast() > timeout) + break; + cpu_relax(); + } + /* Disable machine checks again. */ + local_mcck_disable(); + /* Disable channel report machine checks. */ + __ctl_clear_bit(14, 28); + s390_base_mcck_handler_fn = NULL; +} + +static struct reset_call css_reset_call = { + .fn = css_reset, +}; + +static int __init init_css_reset_call(void) +{ + atomic_set(&chpid_reset_count, 0); + register_reset_call(&css_reset_call); + return 0; +} + +arch_initcall(init_css_reset_call); + +struct sch_match_id { + struct subchannel_id schid; + struct ccw_dev_id devid; + int rc; +}; + +static int __reipl_subchannel_match(struct subchannel_id schid, void *data) +{ + struct schib schib; + struct sch_match_id *match_id = data; + + if (stsch_reset(schid, &schib)) + return -ENXIO; + if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && + (schib.pmcw.dev == match_id->devid.devno) && + (schid.ssid == match_id->devid.ssid)) { + match_id->schid = schid; + match_id->rc = 0; + return 1; } + return 0; } +static int reipl_find_schid(struct ccw_dev_id *devid, + struct subchannel_id *schid) +{ + struct sch_match_id match_id; + + match_id.devid = *devid; + match_id.rc = -ENODEV; + for_each_subchannel(__reipl_subchannel_match, &match_id); + if (match_id.rc == 0) + *schid = match_id.schid; + return match_id.rc; +} + +extern void do_reipl_asm(__u32 schid); + /* Make sure all subchannels are quiet before we re-ipl an lpar. */ -void -reipl(unsigned long devno) +void reipl_ccw_dev(struct ccw_dev_id *devid) +{ + struct subchannel_id uninitialized_var(schid); + + s390_reset_system(NULL, NULL); + if (reipl_find_schid(devid, &schid) != 0) + panic("IPL Device not found\n"); + do_reipl_asm(*((__u32*)&schid)); +} + +int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) +{ + struct subchannel_id schid; + struct schib schib; + + schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; + if (!schid.one) + return -ENODEV; + if (stsch_err(schid, &schib)) + return -ENODEV; + if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) + return -ENODEV; + if (!schib.pmcw.dnv) + return -ENODEV; + iplinfo->devno = schib.pmcw.dev; + iplinfo->is_qdio = schib.pmcw.qf; + return 0; +} + +/** + * cio_tm_start_key - perform start function + * @sch: subchannel on which to perform the start function + * @tcw: transport-command word to be started + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given subchannel. Return zero on success, non-zero + * otherwise. + */ +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) +{ + int cc; + union orb *orb = &to_io_private(sch)->orb; + + memset(orb, 0, sizeof(union orb)); + orb->tm.intparm = (u32) (addr_t) sch; + orb->tm.key = key >> 4; + orb->tm.b = 1; + orb->tm.lpm = lpm ? lpm : sch->lpm; + orb->tm.tcw = (u32) (addr_t) tcw; + cc = ssch(sch->schid, orb); + switch (cc) { + case 0: + return 0; + case 1: + case 2: + return -EBUSY; + default: + return cio_start_handle_notoper(sch, lpm); + } +} + +/** + * cio_tm_intrg - perform interrogate function + * @sch - subchannel on which to perform the interrogate function + * + * If the specified subchannel is running in transport-mode, perform the + * interrogate function. Return zero on success, non-zero otherwie. + */ +int cio_tm_intrg(struct subchannel *sch) { - clear_all_subchannels(); - do_reipl(devno); + int cc; + + if (!to_io_private(sch)->orb.tm.b) + return -EINVAL; + cc = xsch(sch->schid); + switch (cc) { + case 0: + case 2: + return 0; + case 1: + return -EBUSY; + default: + return -ENODEV; + } } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index c50a9da420a..a01376ae174 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -1,121 +1,113 @@ #ifndef S390_CIO_H #define S390_CIO_H -/* - * where we put the ssd info - */ -struct ssd_info { - __u8 valid:1; - __u8 type:7; /* subchannel type */ - __u8 chpid[8]; /* chpids */ - __u16 fla[8]; /* full link addresses */ -} __attribute__ ((packed)); +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <asm/chpid.h> +#include <asm/cio.h> +#include <asm/fcx.h> +#include <asm/schid.h> +#include "chsc.h" /* * path management control word */ struct pmcw { - __u32 intparm; /* interruption parameter */ - __u32 qf : 1; /* qdio facility */ - __u32 res0 : 1; /* reserved zeros */ - __u32 isc : 3; /* interruption sublass */ - __u32 res5 : 3; /* reserved zeros */ - __u32 ena : 1; /* enabled */ - __u32 lm : 2; /* limit mode */ - __u32 mme : 2; /* measurement-mode enable */ - __u32 mp : 1; /* multipath mode */ - __u32 tf : 1; /* timing facility */ - __u32 dnv : 1; /* device number valid */ - __u32 dev : 16; /* device number */ - __u8 lpm; /* logical path mask */ - __u8 pnom; /* path not operational mask */ - __u8 lpum; /* last path used mask */ - __u8 pim; /* path installed mask */ - __u16 mbi; /* measurement-block index */ - __u8 pom; /* path operational mask */ - __u8 pam; /* path available mask */ - __u8 chpid[8]; /* CHPID 0-7 (if available) */ - __u32 unused1 : 8; /* reserved zeros */ - __u32 st : 3; /* subchannel type */ - __u32 unused2 : 18; /* reserved zeros */ - __u32 mbfc : 1; /* measurement block format control */ - __u32 xmwme : 1; /* extended measurement word mode enable */ - __u32 csense : 1; /* concurrent sense; can be enabled ...*/ + u32 intparm; /* interruption parameter */ + u32 qf : 1; /* qdio facility */ + u32 w : 1; + u32 isc : 3; /* interruption sublass */ + u32 res5 : 3; /* reserved zeros */ + u32 ena : 1; /* enabled */ + u32 lm : 2; /* limit mode */ + u32 mme : 2; /* measurement-mode enable */ + u32 mp : 1; /* multipath mode */ + u32 tf : 1; /* timing facility */ + u32 dnv : 1; /* device number valid */ + u32 dev : 16; /* device number */ + u8 lpm; /* logical path mask */ + u8 pnom; /* path not operational mask */ + u8 lpum; /* last path used mask */ + u8 pim; /* path installed mask */ + u16 mbi; /* measurement-block index */ + u8 pom; /* path operational mask */ + u8 pam; /* path available mask */ + u8 chpid[8]; /* CHPID 0-7 (if available) */ + u32 unused1 : 8; /* reserved zeros */ + u32 st : 3; /* subchannel type */ + u32 unused2 : 18; /* reserved zeros */ + u32 mbfc : 1; /* measurement block format control */ + u32 xmwme : 1; /* extended measurement word mode enable */ + u32 csense : 1; /* concurrent sense; can be enabled ...*/ /* ... per MSCH, however, if facility */ /* ... is not installed, this results */ /* ... in an operand exception. */ } __attribute__ ((packed)); +/* Target SCHIB configuration. */ +struct schib_config { + u64 mba; + u32 intparm; + u16 mbi; + u32 isc:3; + u32 ena:1; + u32 mme:2; + u32 mp:1; + u32 csense:1; + u32 mbfc:1; +} __attribute__ ((packed)); + /* * subchannel information block */ struct schib { struct pmcw pmcw; /* path management control word */ - struct scsw scsw; /* subchannel status word */ + union scsw scsw; /* subchannel status word */ __u64 mba; /* measurement block address */ __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); /* - * operation request block + * When rescheduled, todo's with higher values will overwrite those + * with lower values. */ -struct orb { - __u32 intparm; /* interruption parameter */ - __u32 key : 4; /* flags, like key, suspend control, etc. */ - __u32 spnd : 1; /* suspend control */ - __u32 res1 : 1; /* reserved */ - __u32 mod : 1; /* modification control */ - __u32 sync : 1; /* synchronize control */ - __u32 fmt : 1; /* format control */ - __u32 pfch : 1; /* prefetch control */ - __u32 isic : 1; /* initial-status-interruption control */ - __u32 alcc : 1; /* address-limit-checking control */ - __u32 ssic : 1; /* suppress-suspended-interr. control */ - __u32 res2 : 1; /* reserved */ - __u32 c64 : 1; /* IDAW/QDIO 64 bit control */ - __u32 i2k : 1; /* IDAW 2/4kB block size control */ - __u32 lpm : 8; /* logical path mask */ - __u32 ils : 1; /* incorrect length */ - __u32 zero : 6; /* reserved zeros */ - __u32 orbx : 1; /* ORB extension control */ - __u32 cpa; /* channel program address */ -} __attribute__ ((packed,aligned(4))); +enum sch_todo { + SCH_TODO_NOTHING, + SCH_TODO_EVAL, + SCH_TODO_UNREG, +}; /* subchannel data structure used by I/O subroutines */ struct subchannel { - unsigned int irq; /* aka. subchannel number */ - spinlock_t lock; /* subchannel lock */ - + struct subchannel_id schid; + spinlock_t *lock; /* subchannel lock */ + struct mutex reg_mutex; enum { SUBCHANNEL_TYPE_IO = 0, SUBCHANNEL_TYPE_CHSC = 1, - SUBCHANNEL_TYPE_MESSAGE = 2, + SUBCHANNEL_TYPE_MSG = 2, SUBCHANNEL_TYPE_ADM = 3, } st; /* subchannel type */ - - struct { - unsigned int suspend:1; /* allow suspend */ - unsigned int prefetch:1;/* deny prefetch */ - unsigned int inter:1; /* suppress intermediate interrupts */ - } __attribute__ ((packed)) options; - __u8 vpm; /* verified path mask */ __u8 lpm; /* logical path mask */ __u8 opm; /* operational path mask */ struct schib schib; /* subchannel information block */ - struct orb orb; /* operation request block */ - struct ccw1 sense_ccw; /* static ccw for sense command */ - struct ssd_info ssd_info; /* subchannel description */ + int isc; /* desired interruption subclass */ + struct chsc_ssd_info ssd_info; /* subchannel description */ struct device dev; /* entry in device tree */ struct css_driver *driver; + enum sch_todo todo; + struct work_struct todo_work; + struct schib_config config; } __attribute__ ((aligned(8))); -#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ +DECLARE_PER_CPU(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) -extern int cio_validate_subchannel (struct subchannel *, unsigned int); -extern int cio_enable_subchannel (struct subchannel *, unsigned int); +extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); +extern int cio_enable_subchannel(struct subchannel *, u32); extern int cio_disable_subchannel (struct subchannel *); extern int cio_cancel (struct subchannel *); extern int cio_clear (struct subchannel *); @@ -125,19 +117,21 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8); extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); extern int cio_cancel (struct subchannel *); extern int cio_set_options (struct subchannel *, int); -extern int cio_get_options (struct subchannel *); -extern int cio_modify (struct subchannel *); +extern int cio_update_schib(struct subchannel *sch); +extern int cio_commit_config(struct subchannel *sch); + +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); +int cio_tm_intrg(struct subchannel *sch); + /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); -extern void cio_release_console(void); -extern int cio_is_console(int irq); -extern struct subchannel *cio_get_console_subchannel(void); +extern int cio_is_console(struct subchannel_id); +extern void cio_register_early_subchannels(void); +extern void cio_tsch(struct subchannel *sch); #else -#define cio_is_console(irq) 0 -#define cio_get_console_subchannel() NULL +#define cio_is_console(schid) 0 +static inline void cio_register_early_subchannels(void) {} #endif -extern int cio_show_msg; - #endif diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h index 6af8b27d366..e64e8278c42 100644 --- a/drivers/s390/cio/cio_debug.h +++ b/drivers/s390/cio/cio_debug.h @@ -3,30 +3,32 @@ #include <asm/debug.h> -#define CIO_TRACE_EVENT(imp, txt) do { \ +/* for use of debug feature */ +extern debug_info_t *cio_debug_msg_id; +extern debug_info_t *cio_debug_trace_id; +extern debug_info_t *cio_debug_crw_id; + +#define CIO_TRACE_EVENT(imp, txt) do { \ debug_text_event(cio_debug_trace_id, imp, txt); \ } while (0) -#define CIO_MSG_EVENT(imp, args...) do { \ - debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ +#define CIO_MSG_EVENT(imp, args...) do { \ + debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ } while (0) -#define CIO_CRW_EVENT(imp, args...) do { \ - debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ +#define CIO_CRW_EVENT(imp, args...) do { \ + debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ } while (0) -#define CIO_HEX_EVENT(imp, args...) do { \ - debug_event(cio_debug_trace_id, imp, ##args); \ - } while (0) - -#define CIO_DEBUG(printk_level,event_level,msg...) ({ \ - if (cio_show_msg) printk(printk_level msg); \ - CIO_MSG_EVENT (event_level, msg); \ -}) - -/* for use of debug feature */ -extern debug_info_t *cio_debug_msg_id; -extern debug_info_t *cio_debug_trace_id; -extern debug_info_t *cio_debug_crw_id; +static inline void CIO_HEX_EVENT(int level, void *data, int length) +{ + if (unlikely(!cio_debug_trace_id)) + return; + while (length > 0) { + debug_event(cio_debug_trace_id, level, data, length); + length -= cio_debug_trace_id->buf_size; + data += cio_debug_trace_id->buf_size; + } +} #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 8cc4f1a940d..23054f8fa9f 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1,11 +1,10 @@ /* - * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) - * * Linux on zSeries Channel Measurement Facility support * - * Copyright 2000,2003 IBM Corporation + * Copyright IBM Corp. 2000, 2006 * - * Author: Arnd Bergmann <arndb@de.ibm.com> + * Authors: Arnd Bergmann <arndb@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> * * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> * @@ -24,16 +23,22 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/bootmem.h> #include <linux/device.h> #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/timex.h> /* get_tod_clock() */ #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/cmb.h> +#include <asm/div64.h> #include "cio.h" #include "css.h" @@ -41,7 +46,8 @@ #include "ioasm.h" #include "chsc.h" -/* parameter to enable cmf during boot, possible uses are: +/* + * parameter to enable cmf during boot, possible uses are: * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be * used on any subchannel * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure @@ -69,18 +75,20 @@ enum cmb_index { * enum cmb_format - types of supported measurement block formats * * @CMF_BASIC: traditional channel measurement blocks supported - * by all machines that we run on + * by all machines that we run on * @CMF_EXTENDED: improved format that was introduced with the z990 - * machine - * @CMF_AUTODETECT: default: use extended format when running on a z990 - * or later machine, otherwise fall back to basic format - **/ + * machine + * @CMF_AUTODETECT: default: use extended format when running on a machine + * supporting extended format, otherwise fall back to + * basic format + */ enum cmb_format { CMF_BASIC, CMF_EXTENDED, CMF_AUTODETECT = -1, }; -/** + +/* * format - actual format for all measurement blocks * * The format module parameter can be set to a value of 0 (zero) @@ -88,38 +96,50 @@ enum cmb_format { * enum cmb_format. */ static int format = CMF_AUTODETECT; -module_param(format, bool, 0444); +module_param(format, bint, 0444); /** * struct cmb_operations - functions to use depending on cmb_format * - * all these functions operate on a struct cmf_device. There is only - * one instance of struct cmb_operations because all cmf_device - * objects are guaranteed to be of the same type. + * Most of these functions operate on a struct ccw_device. There is only + * one instance of struct cmb_operations because the format of the measurement + * data is guaranteed to be the same for every ccw_device. * * @alloc: allocate memory for a channel measurement block, * either with the help of a special pool or with kmalloc * @free: free memory allocated with @alloc * @set: enable or disable measurement + * @read: read a measurement entry at an index * @readall: read a measurement block in a common format * @reset: clear the data in the associated measurement block and * reset its time stamp + * @align: align an allocated block so that the hardware can use it */ struct cmb_operations { - int (*alloc) (struct ccw_device*); - void(*free) (struct ccw_device*); - int (*set) (struct ccw_device*, u32); - u64 (*read) (struct ccw_device*, int); - int (*readall)(struct ccw_device*, struct cmbdata *); - void (*reset) (struct ccw_device*); - + int (*alloc) (struct ccw_device *); + void (*free) (struct ccw_device *); + int (*set) (struct ccw_device *, u32); + u64 (*read) (struct ccw_device *, int); + int (*readall)(struct ccw_device *, struct cmbdata *); + void (*reset) (struct ccw_device *); + void *(*align) (void *); +/* private: */ struct attribute_group *attr_group; }; static struct cmb_operations *cmbops; -/* our user interface is designed in terms of nanoseconds, +struct cmb_data { + void *hw_block; /* Pointer to block updated by hardware */ + void *last_block; /* Last changed block copied from hardware block */ + int size; /* Size of hw_block and last_block */ + unsigned long long last_update; /* when last_block was updated */ +}; + +/* + * Our user interface is designed in terms of nanoseconds, * while the hardware measures total times in its own - * unit.*/ + * unit. + */ static inline u64 time_to_nsec(u32 value) { return ((u64)value) * 128000ull; @@ -139,19 +159,20 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count) if (count == 0) return 0; - /* value comes in units of 128 µsec */ + /* value comes in units of 128 µsec */ ret = time_to_nsec(value); do_div(ret, count); return ret; } -/* activate or deactivate the channel monitor. When area is NULL, +/* + * Activate or deactivate the channel monitor. When area is NULL, * the monitor is deactivated. The channel monitor needs to * be active in order to measure subchannels, which also need - * to be enabled. */ -static inline void -cmf_activate(void *area, unsigned int onoff) + * to be enabled. + */ +static inline void cmf_activate(void *area, unsigned int onoff) { register void * __gpr2 asm("2"); register long __gpr1 asm("1"); @@ -162,59 +183,22 @@ cmf_activate(void *area, unsigned int onoff) asm("schm" : : "d" (__gpr2), "d" (__gpr1) ); } -static int -set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) +static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, + unsigned long address) { - int ret; - int retry; struct subchannel *sch; - struct schib *schib; sch = to_subchannel(cdev->dev.parent); - schib = &sch->schib; - /* msch can silently fail, so do it again if necessary */ - for (retry = 0; retry < 3; retry++) { - /* prepare schib */ - stsch(sch->irq, schib); - schib->pmcw.mme = mme; - schib->pmcw.mbfc = mbfc; - /* address can be either a block address or a block index */ - if (mbfc) - schib->mba = address; - else - schib->pmcw.mbi = address; - - /* try to submit it */ - switch(ret = msch_err(sch->irq, schib)) { - case 0: - break; - case 1: - case 2: /* in I/O or status pending */ - ret = -EBUSY; - break; - case 3: /* subchannel is no longer valid */ - ret = -ENODEV; - break; - default: /* msch caught an exception */ - ret = -EINVAL; - break; - } - stsch(sch->irq, schib); /* restore the schib */ - if (ret) - break; - - /* check if it worked */ - if (schib->pmcw.mme == mme && - schib->pmcw.mbfc == mbfc && - (mbfc ? (schib->mba == address) - : (schib->pmcw.mbi == address))) - return 0; - - ret = -EINVAL; - } + sch->config.mme = mme; + sch->config.mbfc = mbfc; + /* address can be either a block address or a block index */ + if (mbfc) + sch->config.mba = address; + else + sch->config.mbi = address; - return ret; + return cio_commit_config(sch); } struct set_schib_struct { @@ -223,63 +207,229 @@ struct set_schib_struct { unsigned long address; wait_queue_head_t wait; int ret; + struct kref kref; }; +static void cmf_set_schib_release(struct kref *kref) +{ + struct set_schib_struct *set_data; + + set_data = container_of(kref, struct set_schib_struct, kref); + kfree(set_data); +} + +#define CMF_PENDING 1 + static int set_schib_wait(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) { - struct set_schib_struct s = { - .mme = mme, - .mbfc = mbfc, - .address = address, - .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait), - }; + struct set_schib_struct *set_data; + int ret; spin_lock_irq(cdev->ccwlock); - s.ret = set_schib(cdev, mme, mbfc, address); - if (s.ret != -EBUSY) { - goto out_nowait; + if (!cdev->private->cmb) { + ret = -ENODEV; + goto out; + } + set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC); + if (!set_data) { + ret = -ENOMEM; + goto out; } + init_waitqueue_head(&set_data->wait); + kref_init(&set_data->kref); + set_data->mme = mme; + set_data->mbfc = mbfc; + set_data->address = address; + + ret = set_schib(cdev, mme, mbfc, address); + if (ret != -EBUSY) + goto out_put; if (cdev->private->state != DEV_STATE_ONLINE) { - s.ret = -EBUSY; /* if the device is not online, don't even try again */ - goto out_nowait; + ret = -EBUSY; + goto out_put; } + cdev->private->state = DEV_STATE_CMFCHANGE; - cdev->private->cmb_wait = &s; - s.ret = 1; + set_data->ret = CMF_PENDING; + cdev->private->cmb_wait = set_data; spin_unlock_irq(cdev->ccwlock); - if (wait_event_interruptible(s.wait, s.ret != 1)) { + if (wait_event_interruptible(set_data->wait, + set_data->ret != CMF_PENDING)) { spin_lock_irq(cdev->ccwlock); - if (s.ret == 1) { - s.ret = -ERESTARTSYS; - cdev->private->cmb_wait = 0; + if (set_data->ret == CMF_PENDING) { + set_data->ret = -ERESTARTSYS; if (cdev->private->state == DEV_STATE_CMFCHANGE) cdev->private->state = DEV_STATE_ONLINE; } spin_unlock_irq(cdev->ccwlock); } - return s.ret; - -out_nowait: + spin_lock_irq(cdev->ccwlock); + cdev->private->cmb_wait = NULL; + ret = set_data->ret; +out_put: + kref_put(&set_data->kref, cmf_set_schib_release); +out: spin_unlock_irq(cdev->ccwlock); - return s.ret; + return ret; } void retry_set_schib(struct ccw_device *cdev) { - struct set_schib_struct *s; + struct set_schib_struct *set_data; + + set_data = cdev->private->cmb_wait; + if (!set_data) { + WARN_ON(1); + return; + } + kref_get(&set_data->kref); + set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc, + set_data->address); + wake_up(&set_data->wait); + kref_put(&set_data->kref, cmf_set_schib_release); +} + +static int cmf_copy_block(struct ccw_device *cdev) +{ + struct subchannel *sch; + void *reference_buf; + void *hw_block; + struct cmb_data *cmb_data; + + sch = to_subchannel(cdev->dev.parent); + + if (cio_update_schib(sch)) + return -ENODEV; + + if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { + /* Don't copy if a start function is in progress. */ + if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && + (scsw_actl(&sch->schib.scsw) & + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && + (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) + return -EBUSY; + } + cmb_data = cdev->private->cmb; + hw_block = cmbops->align(cmb_data->hw_block); + if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size)) + /* No need to copy. */ + return 0; + reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC); + if (!reference_buf) + return -ENOMEM; + /* Ensure consistency of block copied from hardware. */ + do { + memcpy(cmb_data->last_block, hw_block, cmb_data->size); + memcpy(reference_buf, hw_block, cmb_data->size); + } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); + cmb_data->last_update = get_tod_clock(); + kfree(reference_buf); + return 0; +} + +struct copy_block_struct { + wait_queue_head_t wait; + int ret; + struct kref kref; +}; + +static void cmf_copy_block_release(struct kref *kref) +{ + struct copy_block_struct *copy_block; + + copy_block = container_of(kref, struct copy_block_struct, kref); + kfree(copy_block); +} + +static int cmf_cmb_copy_wait(struct ccw_device *cdev) +{ + struct copy_block_struct *copy_block; + int ret; + unsigned long flags; + + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + ret = -ENODEV; + goto out; + } + copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC); + if (!copy_block) { + ret = -ENOMEM; + goto out; + } + init_waitqueue_head(©_block->wait); + kref_init(©_block->kref); + + ret = cmf_copy_block(cdev); + if (ret != -EBUSY) + goto out_put; + + if (cdev->private->state != DEV_STATE_ONLINE) { + ret = -EBUSY; + goto out_put; + } - s = cdev->private->cmb_wait; - cdev->private->cmb_wait = 0; - if (!s) { + cdev->private->state = DEV_STATE_CMFUPDATE; + copy_block->ret = CMF_PENDING; + cdev->private->cmb_wait = copy_block; + + spin_unlock_irqrestore(cdev->ccwlock, flags); + if (wait_event_interruptible(copy_block->wait, + copy_block->ret != CMF_PENDING)) { + spin_lock_irqsave(cdev->ccwlock, flags); + if (copy_block->ret == CMF_PENDING) { + copy_block->ret = -ERESTARTSYS; + if (cdev->private->state == DEV_STATE_CMFUPDATE) + cdev->private->state = DEV_STATE_ONLINE; + } + spin_unlock_irqrestore(cdev->ccwlock, flags); + } + spin_lock_irqsave(cdev->ccwlock, flags); + cdev->private->cmb_wait = NULL; + ret = copy_block->ret; +out_put: + kref_put(©_block->kref, cmf_copy_block_release); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; +} + +void cmf_retry_copy_block(struct ccw_device *cdev) +{ + struct copy_block_struct *copy_block; + + copy_block = cdev->private->cmb_wait; + if (!copy_block) { WARN_ON(1); return; } - s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); - wake_up(&s->wait); + kref_get(©_block->kref); + copy_block->ret = cmf_copy_block(cdev); + wake_up(©_block->wait); + kref_put(©_block->kref, cmf_copy_block_release); +} + +static void cmf_generic_reset(struct ccw_device *cdev) +{ + struct cmb_data *cmb_data; + + spin_lock_irq(cdev->ccwlock); + cmb_data = cdev->private->cmb; + if (cmb_data) { + memset(cmb_data->last_block, 0, cmb_data->size); + /* + * Need to reset hw block as well to make the hardware start + * from 0 again. + */ + memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); + cmb_data->last_update = 0; + } + cdev->private->cmb_start_time = get_tod_clock(); + spin_unlock_irq(cdev->ccwlock); } /** @@ -287,6 +437,7 @@ void retry_set_schib(struct ccw_device *cdev) * * @mem: pointer to CMBs (only in basic measurement mode) * @list: contains a linked list of all subchannels + * @num_channels: number of channels to be measured * @lock: protect concurrent access to @mem and @list */ struct cmb_area { @@ -297,33 +448,41 @@ struct cmb_area { }; static struct cmb_area cmb_area = { - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock), .list = LIST_HEAD_INIT(cmb_area.list), .num_channels = 1024, }; - /* ****** old style CMB handling ********/ -/** int maxchannels - * +/* * Basic channel measurement blocks are allocated in one contiguous * block of memory, which can not be moved as long as any channel * is active. Therefore, a maximum number of subchannels needs to * be defined somewhere. This is a module parameter, defaulting to - * a resonable value of 1024, or 32 kb of memory. + * a reasonable value of 1024, or 32 kb of memory. * Current kernels don't allow kmalloc with more than 128kb, so the - * maximum is 4096 + * maximum is 4096. */ module_param_named(maxchannels, cmb_area.num_channels, uint, 0444); /** * struct cmb - basic channel measurement block + * @ssch_rsch_count: number of ssch and rsch + * @sample_count: number of samples + * @device_connect_time: time of device connect + * @function_pending_time: time of function pending + * @device_disconnect_time: time of device disconnect + * @control_unit_queuing_time: time of control unit queuing + * @device_active_only_time: time of device active only + * @reserved: unused in basic measurement mode * - * cmb as used by the hardware the fields are described in z/Architecture - * Principles of Operation, chapter 17. - * The area to be a contiguous array and may not be reallocated or freed. + * The measurement block as used by the hardware. The fields are described + * further in z/Architecture Principles of Operation, chapter 17. + * + * The cmb area made up from these blocks must be a contiguous array and may + * not be reallocated or freed. * Only one cmb area can be present in the system. */ struct cmb { @@ -337,11 +496,12 @@ struct cmb { u32 reserved[2]; }; -/* insert a single device into the cmb_area list - * called with cmb_area.lock held from alloc_cmb +/* + * Insert a single device into the cmb_area list. + * Called with cmb_area.lock held from alloc_cmb. */ -static inline int -alloc_cmb_single (struct ccw_device *cdev) +static int alloc_cmb_single(struct ccw_device *cdev, + struct cmb_data *cmb_data) { struct cmb *cmb; struct ccw_device_private *node; @@ -353,12 +513,16 @@ alloc_cmb_single (struct ccw_device *cdev) goto out; } - /* find first unused cmb in cmb_area.mem. - * this is a little tricky: cmb_area.list - * remains sorted by ->cmb pointers */ + /* + * Find first unused cmb in cmb_area.mem. + * This is a little tricky: cmb_area.list + * remains sorted by ->cmb->hw_data pointers. + */ cmb = cmb_area.mem; list_for_each_entry(node, &cmb_area.list, cmb_list) { - if ((struct cmb*)node->cmb > cmb) + struct cmb_data *data; + data = node->cmb; + if ((struct cmb*)data->hw_block > cmb) break; cmb++; } @@ -369,20 +533,32 @@ alloc_cmb_single (struct ccw_device *cdev) /* insert new cmb */ list_add_tail(&cdev->private->cmb_list, &node->cmb_list); - cdev->private->cmb = cmb; + cmb_data->hw_block = cmb; + cdev->private->cmb = cmb_data; ret = 0; out: spin_unlock_irq(cdev->ccwlock); return ret; } -static int -alloc_cmb (struct ccw_device *cdev) +static int alloc_cmb(struct ccw_device *cdev) { int ret; struct cmb *mem; ssize_t size; + struct cmb_data *cmb_data; + /* Allocate private cmb_data. */ + cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); + if (!cmb_data) + return -ENOMEM; + + cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL); + if (!cmb_data->last_block) { + kfree(cmb_data); + return -ENOMEM; + } + cmb_data->size = sizeof(struct cmb); spin_lock(&cmb_area.lock); if (!cmb_area.mem) { @@ -411,29 +587,36 @@ alloc_cmb (struct ccw_device *cdev) } /* do the actual allocation */ - ret = alloc_cmb_single(cdev); + ret = alloc_cmb_single(cdev, cmb_data); out: spin_unlock(&cmb_area.lock); - + if (ret) { + kfree(cmb_data->last_block); + kfree(cmb_data); + } return ret; } -static void -free_cmb(struct ccw_device *cdev) +static void free_cmb(struct ccw_device *cdev) { struct ccw_device_private *priv; - - priv = cdev->private; + struct cmb_data *cmb_data; spin_lock(&cmb_area.lock); spin_lock_irq(cdev->ccwlock); + priv = cdev->private; + if (list_empty(&priv->cmb_list)) { /* already freed */ goto out; } + cmb_data = priv->cmb; priv->cmb = NULL; + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); list_del_init(&priv->cmb_list); if (list_empty(&cmb_area.list)) { @@ -448,83 +631,97 @@ out: spin_unlock(&cmb_area.lock); } -static int -set_cmb(struct ccw_device *cdev, u32 mme) +static int set_cmb(struct ccw_device *cdev, u32 mme) { u16 offset; + struct cmb_data *cmb_data; + unsigned long flags; - if (!cdev->private->cmb) + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + spin_unlock_irqrestore(cdev->ccwlock, flags); return -EINVAL; - - offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; + } + cmb_data = cdev->private->cmb; + offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0; + spin_unlock_irqrestore(cdev->ccwlock, flags); return set_schib_wait(cdev, mme, 0, offset); } -static u64 -read_cmb (struct ccw_device *cdev, int index) +static u64 read_cmb(struct ccw_device *cdev, int index) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmb cmb; - unsigned long flags; + struct cmb *cmb; u32 val; + int ret; + unsigned long flags; + + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return 0; spin_lock_irqsave(cdev->ccwlock, flags); if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return 0; + ret = 0; + goto out; } - - cmb = *(struct cmb*)cdev->private->cmb; - spin_unlock_irqrestore(cdev->ccwlock, flags); + cmb = ((struct cmb_data *)cdev->private->cmb)->last_block; switch (index) { case cmb_ssch_rsch_count: - return cmb.ssch_rsch_count; + ret = cmb->ssch_rsch_count; + goto out; case cmb_sample_count: - return cmb.sample_count; + ret = cmb->sample_count; + goto out; case cmb_device_connect_time: - val = cmb.device_connect_time; + val = cmb->device_connect_time; break; case cmb_function_pending_time: - val = cmb.function_pending_time; + val = cmb->function_pending_time; break; case cmb_device_disconnect_time: - val = cmb.device_disconnect_time; + val = cmb->device_disconnect_time; break; case cmb_control_unit_queuing_time: - val = cmb.control_unit_queuing_time; + val = cmb->control_unit_queuing_time; break; case cmb_device_active_only_time: - val = cmb.device_active_only_time; + val = cmb->device_active_only_time; break; default: - return 0; + ret = 0; + goto out; } - return time_to_avg_nsec(val, cmb.sample_count); + ret = time_to_avg_nsec(val, cmb->sample_count); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static int -readall_cmb (struct ccw_device *cdev, struct cmbdata *data) +static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmb cmb; - unsigned long flags; + struct cmb *cmb; + struct cmb_data *cmb_data; u64 time; + unsigned long flags; + int ret; + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return ret; spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return -ENODEV; + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = -ENODEV; + goto out; } - - cmb = *(struct cmb*)cdev->private->cmb; - time = get_clock() - cdev->private->cmb_start_time; - spin_unlock_irqrestore(cdev->ccwlock, flags); + if (cmb_data->last_update == 0) { + ret = -EAGAIN; + goto out; + } + cmb = cmb_data->last_block; + time = cmb_data->last_update - cdev->private->cmb_start_time; memset(data, 0, sizeof(struct cmbdata)); @@ -535,31 +732,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data) data->elapsed_time = (time * 1000) >> 12; /* copy data to new structure */ - data->ssch_rsch_count = cmb.ssch_rsch_count; - data->sample_count = cmb.sample_count; + data->ssch_rsch_count = cmb->ssch_rsch_count; + data->sample_count = cmb->sample_count; /* time fields are converted to nanoseconds while copying */ - data->device_connect_time = time_to_nsec(cmb.device_connect_time); - data->function_pending_time = time_to_nsec(cmb.function_pending_time); - data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); + data->device_connect_time = time_to_nsec(cmb->device_connect_time); + data->function_pending_time = time_to_nsec(cmb->function_pending_time); + data->device_disconnect_time = + time_to_nsec(cmb->device_disconnect_time); data->control_unit_queuing_time - = time_to_nsec(cmb.control_unit_queuing_time); + = time_to_nsec(cmb->control_unit_queuing_time); data->device_active_only_time - = time_to_nsec(cmb.device_active_only_time); + = time_to_nsec(cmb->device_active_only_time); + ret = 0; +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; +} - return 0; +static void reset_cmb(struct ccw_device *cdev) +{ + cmf_generic_reset(cdev); } -static void -reset_cmb(struct ccw_device *cdev) +static void * align_cmb(void *area) { - struct cmb *cmb; - spin_lock_irq(cdev->ccwlock); - cmb = cdev->private->cmb; - if (cmb) - memset (cmb, 0, sizeof (*cmb)); - cdev->private->cmb_start_time = get_clock(); - spin_unlock_irq(cdev->ccwlock); + return area; } static struct attribute_group cmf_attr_group; @@ -571,16 +769,28 @@ static struct cmb_operations cmbops_basic = { .read = read_cmb, .readall = readall_cmb, .reset = reset_cmb, + .align = align_cmb, .attr_group = &cmf_attr_group, }; - + /* ******** extended cmb handling ********/ /** * struct cmbe - extended channel measurement block + * @ssch_rsch_count: number of ssch and rsch + * @sample_count: number of samples + * @device_connect_time: time of device connect + * @function_pending_time: time of function pending + * @device_disconnect_time: time of device disconnect + * @control_unit_queuing_time: time of control unit queuing + * @device_active_only_time: time of device active only + * @device_busy_time: time of device busy + * @initial_command_response_time: initial command response time + * @reserved: unused * - * cmb as used by the hardware, may be in any 64 bit physical location, - * the fields are described in z/Architecture Principles of Operation, + * The measurement block as used by the hardware. May be in any 64 bit physical + * location. + * The fields are described further in z/Architecture Principles of Operation, * third edition, chapter 17. */ struct cmbe { @@ -596,10 +806,12 @@ struct cmbe { u32 reserved[7]; }; -/* kmalloc only guarantees 8 byte alignment, but we need cmbe +/* + * kmalloc only guarantees 8 byte alignment, but we need cmbe * pointers to be naturally aligned. Make sure to allocate - * enough space for two cmbes */ -static inline struct cmbe* cmbe_align(struct cmbe *c) + * enough space for two cmbes. + */ +static inline struct cmbe *cmbe_align(struct cmbe *c) { unsigned long addr; addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) & @@ -607,22 +819,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c) return (struct cmbe*)addr; } -static int -alloc_cmbe (struct ccw_device *cdev) +static int alloc_cmbe(struct ccw_device *cdev) { struct cmbe *cmbe; - cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); + struct cmb_data *cmb_data; + int ret; + + cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL); if (!cmbe) return -ENOMEM; - + cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); + if (!cmb_data) { + ret = -ENOMEM; + goto out_free; + } + cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL); + if (!cmb_data->last_block) { + ret = -ENOMEM; + goto out_free; + } + cmb_data->size = sizeof(struct cmbe); spin_lock_irq(cdev->ccwlock); if (cdev->private->cmb) { - kfree(cmbe); spin_unlock_irq(cdev->ccwlock); - return -EBUSY; + ret = -EBUSY; + goto out_free; } - - cdev->private->cmb = cmbe; + cmb_data->hw_block = cmbe; + cdev->private->cmb = cmb_data; spin_unlock_irq(cdev->ccwlock); /* activate global measurement if this is the first channel */ @@ -633,15 +857,24 @@ alloc_cmbe (struct ccw_device *cdev) spin_unlock(&cmb_area.lock); return 0; +out_free: + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); + kfree(cmbe); + return ret; } -static void -free_cmbe (struct ccw_device *cdev) +static void free_cmbe(struct ccw_device *cdev) { + struct cmb_data *cmb_data; + spin_lock_irq(cdev->ccwlock); - if (cdev->private->cmb) - kfree(cdev->private->cmb); + cmb_data = cdev->private->cmb; cdev->private->cmb = NULL; + if (cmb_data) + kfree(cmb_data->last_block); + kfree(cmb_data); spin_unlock_irq(cdev->ccwlock); /* deactivate global measurement if this is the last channel */ @@ -652,89 +885,105 @@ free_cmbe (struct ccw_device *cdev) spin_unlock(&cmb_area.lock); } -static int -set_cmbe(struct ccw_device *cdev, u32 mme) +static int set_cmbe(struct ccw_device *cdev, u32 mme) { unsigned long mba; + struct cmb_data *cmb_data; + unsigned long flags; - if (!cdev->private->cmb) + spin_lock_irqsave(cdev->ccwlock, flags); + if (!cdev->private->cmb) { + spin_unlock_irqrestore(cdev->ccwlock, flags); return -EINVAL; - mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; + } + cmb_data = cdev->private->cmb; + mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0; + spin_unlock_irqrestore(cdev->ccwlock, flags); return set_schib_wait(cdev, mme, 1, mba); } -u64 -read_cmbe (struct ccw_device *cdev, int index) +static u64 read_cmbe(struct ccw_device *cdev, int index) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmbe cmb; - unsigned long flags; + struct cmbe *cmb; + struct cmb_data *cmb_data; u32 val; + int ret; + unsigned long flags; - spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) return 0; - } - cmb = *cmbe_align(cdev->private->cmb); - spin_unlock_irqrestore(cdev->ccwlock, flags); + spin_lock_irqsave(cdev->ccwlock, flags); + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = 0; + goto out; + } + cmb = cmb_data->last_block; switch (index) { case cmb_ssch_rsch_count: - return cmb.ssch_rsch_count; + ret = cmb->ssch_rsch_count; + goto out; case cmb_sample_count: - return cmb.sample_count; + ret = cmb->sample_count; + goto out; case cmb_device_connect_time: - val = cmb.device_connect_time; + val = cmb->device_connect_time; break; case cmb_function_pending_time: - val = cmb.function_pending_time; + val = cmb->function_pending_time; break; case cmb_device_disconnect_time: - val = cmb.device_disconnect_time; + val = cmb->device_disconnect_time; break; case cmb_control_unit_queuing_time: - val = cmb.control_unit_queuing_time; + val = cmb->control_unit_queuing_time; break; case cmb_device_active_only_time: - val = cmb.device_active_only_time; + val = cmb->device_active_only_time; break; case cmb_device_busy_time: - val = cmb.device_busy_time; + val = cmb->device_busy_time; break; case cmb_initial_command_response_time: - val = cmb.initial_command_response_time; + val = cmb->initial_command_response_time; break; default: - return 0; + ret = 0; + goto out; } - return time_to_avg_nsec(val, cmb.sample_count); + ret = time_to_avg_nsec(val, cmb->sample_count); +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static int -readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) +static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data) { - /* yes, we have to put it on the stack - * because the cmb must only be accessed - * atomically, e.g. with mvc */ - struct cmbe cmb; - unsigned long flags; + struct cmbe *cmb; + struct cmb_data *cmb_data; u64 time; + unsigned long flags; + int ret; + ret = cmf_cmb_copy_wait(cdev); + if (ret < 0) + return ret; spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - return -ENODEV; + cmb_data = cdev->private->cmb; + if (!cmb_data) { + ret = -ENODEV; + goto out; } - - cmb = *cmbe_align(cdev->private->cmb); - time = get_clock() - cdev->private->cmb_start_time; - spin_unlock_irqrestore(cdev->ccwlock, flags); + if (cmb_data->last_update == 0) { + ret = -EAGAIN; + goto out; + } + time = cmb_data->last_update - cdev->private->cmb_start_time; memset (data, 0, sizeof(struct cmbdata)); @@ -744,35 +993,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) /* conver to nanoseconds */ data->elapsed_time = (time * 1000) >> 12; + cmb = cmb_data->last_block; /* copy data to new structure */ - data->ssch_rsch_count = cmb.ssch_rsch_count; - data->sample_count = cmb.sample_count; + data->ssch_rsch_count = cmb->ssch_rsch_count; + data->sample_count = cmb->sample_count; /* time fields are converted to nanoseconds while copying */ - data->device_connect_time = time_to_nsec(cmb.device_connect_time); - data->function_pending_time = time_to_nsec(cmb.function_pending_time); - data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); + data->device_connect_time = time_to_nsec(cmb->device_connect_time); + data->function_pending_time = time_to_nsec(cmb->function_pending_time); + data->device_disconnect_time = + time_to_nsec(cmb->device_disconnect_time); data->control_unit_queuing_time - = time_to_nsec(cmb.control_unit_queuing_time); + = time_to_nsec(cmb->control_unit_queuing_time); data->device_active_only_time - = time_to_nsec(cmb.device_active_only_time); - data->device_busy_time = time_to_nsec(cmb.device_busy_time); + = time_to_nsec(cmb->device_active_only_time); + data->device_busy_time = time_to_nsec(cmb->device_busy_time); data->initial_command_response_time - = time_to_nsec(cmb.initial_command_response_time); + = time_to_nsec(cmb->initial_command_response_time); - return 0; + ret = 0; +out: + spin_unlock_irqrestore(cdev->ccwlock, flags); + return ret; } -static void -reset_cmbe(struct ccw_device *cdev) +static void reset_cmbe(struct ccw_device *cdev) { - struct cmbe *cmb; - spin_lock_irq(cdev->ccwlock); - cmb = cmbe_align(cdev->private->cmb); - if (cmb) - memset (cmb, 0, sizeof (*cmb)); - cdev->private->cmb_start_time = get_clock(); - spin_unlock_irq(cdev->ccwlock); + cmf_generic_reset(cdev); +} + +static void * align_cmbe(void *area) +{ + return cmbe_align(area); } static struct attribute_group cmf_attr_group_ext; @@ -784,36 +1036,43 @@ static struct cmb_operations cmbops_extended = { .read = read_cmbe, .readall = readall_cmbe, .reset = reset_cmbe, + .align = align_cmbe, .attr_group = &cmf_attr_group_ext, }; - -static ssize_t -cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx) +static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx) { return sprintf(buf, "%lld\n", (unsigned long long) cmf_read(to_ccwdev(dev), idx)); } -static ssize_t -cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t cmb_show_avg_sample_interval(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ccw_device *cdev; long interval; unsigned long count; + struct cmb_data *cmb_data; cdev = to_ccwdev(dev); - interval = get_clock() - cdev->private->cmb_start_time; count = cmf_read(cdev, cmb_sample_count); - if (count) + spin_lock_irq(cdev->ccwlock); + cmb_data = cdev->private->cmb; + if (count) { + interval = cmb_data->last_update - + cdev->private->cmb_start_time; + interval = (interval * 1000) >> 12; interval /= count; - else + } else interval = -1; + spin_unlock_irq(cdev->ccwlock); return sprintf(buf, "%ld\n", interval); } -static ssize_t -cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t cmb_show_avg_utilization(struct device *dev, + struct device_attribute *attr, + char *buf) { struct cmbdata data; u64 utilization; @@ -821,7 +1080,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char int ret; ret = cmf_readall(to_ccwdev(dev), &data); - if (ret) + if (ret == -EAGAIN || ret == -ENODEV) + /* No data (yet/currently) available to use for calculation. */ + return sprintf(buf, "n/a\n"); + else if (ret) return ret; utilization = data.device_connect_time + @@ -842,14 +1104,16 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char } #define cmf_attr(name) \ -static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \ -{ return cmb_show_attr((dev), buf, cmb_ ## name); } \ -static DEVICE_ATTR(name, 0444, show_ ## name, NULL); +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ return cmb_show_attr((dev), buf, cmb_##name); } \ +static DEVICE_ATTR(name, 0444, show_##name, NULL); #define cmf_attr_avg(name) \ -static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \ -{ return cmb_show_attr((dev), buf, cmb_ ## name); } \ -static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL); +static ssize_t show_avg_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ return cmb_show_attr((dev), buf, cmb_##name); } \ +static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL); cmf_attr(ssch_rsch_count); cmf_attr(sample_count); @@ -861,7 +1125,8 @@ cmf_attr_avg(device_active_only_time); cmf_attr_avg(device_busy_time); cmf_attr_avg(initial_command_response_time); -static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL); +static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, + NULL); static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL); static struct attribute *cmf_attributes[] = { @@ -874,7 +1139,7 @@ static struct attribute *cmf_attributes[] = { &dev_attr_avg_device_disconnect_time.attr, &dev_attr_avg_control_unit_queuing_time.attr, &dev_attr_avg_device_active_only_time.attr, - 0, + NULL, }; static struct attribute_group cmf_attr_group = { @@ -894,7 +1159,7 @@ static struct attribute *cmf_attributes_ext[] = { &dev_attr_avg_device_active_only_time.attr, &dev_attr_avg_device_busy_time.attr, &dev_attr_avg_initial_command_response_time.attr, - 0, + NULL, }; static struct attribute_group cmf_attr_group_ext = { @@ -902,28 +1167,33 @@ static struct attribute_group cmf_attr_group_ext = { .attrs = cmf_attributes_ext, }; -static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t cmb_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) { return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0); } -static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c) +static ssize_t cmb_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t c) { struct ccw_device *cdev; int ret; + unsigned long val; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; cdev = to_ccwdev(dev); - switch (buf[0]) { - case '0': + switch (val) { + case 0: ret = disable_cmf(cdev); - if (ret) - printk(KERN_INFO "disable_cmf failed (%d)\n", ret); break; - case '1': + case 1: ret = enable_cmf(cdev); - if (ret && ret != -EBUSY) - printk(KERN_INFO "enable_cmf failed (%d)\n", ret); break; } @@ -932,9 +1202,21 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); -/* enable_cmf/disable_cmf: module interface for cmf (de)activation */ -int -enable_cmf(struct ccw_device *cdev) +int ccw_set_cmf(struct ccw_device *cdev, int enable) +{ + return cmbops->set(cdev, enable ? 2 : 0); +} + +/** + * enable_cmf() - switch on the channel measurement for a specific device + * @cdev: The ccw device to be enabled + * + * Returns %0 for success or a negative error value. + * + * Context: + * non-atomic + */ +int enable_cmf(struct ccw_device *cdev) { int ret; @@ -955,8 +1237,16 @@ enable_cmf(struct ccw_device *cdev) return ret; } -int -disable_cmf(struct ccw_device *cdev) +/** + * disable_cmf() - switch off the channel measurement for a specific device + * @cdev: The ccw device to be disabled + * + * Returns %0 for success or a negative error value. + * + * Context: + * non-atomic + */ +int disable_cmf(struct ccw_device *cdev) { int ret; @@ -968,30 +1258,55 @@ disable_cmf(struct ccw_device *cdev) return ret; } -u64 -cmf_read(struct ccw_device *cdev, int index) +/** + * cmf_read() - read one value from the current channel measurement block + * @cdev: the channel to be read + * @index: the index of the value to be read + * + * Returns the value read or %0 if the value cannot be read. + * + * Context: + * any + */ +u64 cmf_read(struct ccw_device *cdev, int index) { return cmbops->read(cdev, index); } -int -cmf_readall(struct ccw_device *cdev, struct cmbdata *data) +/** + * cmf_readall() - read the current channel measurement block + * @cdev: the channel to be read + * @data: a pointer to a data block that will be filled + * + * Returns %0 on success, a negative error value otherwise. + * + * Context: + * any + */ +int cmf_readall(struct ccw_device *cdev, struct cmbdata *data) { return cmbops->readall(cdev, data); } -static int __init -init_cmf(void) +/* Reenable cmf when a disconnected device becomes available again. */ +int cmf_reenable(struct ccw_device *cdev) +{ + cmbops->reset(cdev); + return cmbops->set(cdev, 2); +} + +static int __init init_cmf(void) { char *format_string; char *detect_string = "parameter"; - /* We cannot really autoprobe this. If the user did not give a parameter, - see if we are running on z990 or up, otherwise fall back to basic mode. */ - + /* + * If the user did not give a parameter, see if we are running on a + * machine supporting extended measurement blocks, otherwise fall back + * to basic mode. + */ if (format == CMF_AUTODETECT) { - if (!css_characteristics_avail || - !css_general_characteristics.ext_mb) { + if (!css_general_characteristics.ext_mb) { format = CMF_BASIC; } else { format = CMF_EXTENDED; @@ -1005,26 +1320,16 @@ init_cmf(void) case CMF_BASIC: format_string = "basic"; cmbops = &cmbops_basic; - if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) { - printk(KERN_ERR "Basic channel measurement facility" - " can only use 1 to 4096 devices\n" - KERN_ERR "when the cmf driver is built" - " as a loadable module\n"); - return 1; - } break; case CMF_EXTENDED: - format_string = "extended"; + format_string = "extended"; cmbops = &cmbops_extended; break; default: - printk(KERN_ERR "Invalid format %d for channel " - "measurement facility\n", format); return 1; } - - printk(KERN_INFO "Channel measurement facility using %s format (%s)\n", - format_string, detect_string); + pr_info("Channel measurement facility initialized using format " + "%s (mode %s)\n", format_string, detect_string); return 0; } @@ -1034,7 +1339,7 @@ module_init(init_cmf); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("channel measurement facility base driver\n" - "Copyright 2003 IBM Corporation\n"); + "Copyright IBM Corp. 2003\n"); EXPORT_SYMBOL_GPL(enable_cmf); EXPORT_SYMBOL_GPL(disable_cmf); diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c new file mode 100644 index 00000000000..0f8a25f98b1 --- /dev/null +++ b/drivers/s390/cio/crw.c @@ -0,0 +1,161 @@ +/* + * Channel report handling code + * + * Copyright IBM Corp. 2000, 2009 + * Author(s): Ingo Adlung <adlung@de.ibm.com>, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Cornelia Huck <cornelia.huck@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/mutex.h> +#include <linux/kthread.h> +#include <linux/init.h> +#include <linux/wait.h> +#include <asm/crw.h> +#include <asm/ctl_reg.h> + +static DEFINE_MUTEX(crw_handler_mutex); +static crw_handler_t crw_handlers[NR_RSCS]; +static atomic_t crw_nr_req = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); + +/** + * crw_register_handler() - register a channel report word handler + * @rsc: reporting source code to handle + * @handler: handler to be registered + * + * Returns %0 on success and a negative error value otherwise. + */ +int crw_register_handler(int rsc, crw_handler_t handler) +{ + int rc = 0; + + if ((rsc < 0) || (rsc >= NR_RSCS)) + return -EINVAL; + mutex_lock(&crw_handler_mutex); + if (crw_handlers[rsc]) + rc = -EBUSY; + else + crw_handlers[rsc] = handler; + mutex_unlock(&crw_handler_mutex); + return rc; +} + +/** + * crw_unregister_handler() - unregister a channel report word handler + * @rsc: reporting source code to handle + */ +void crw_unregister_handler(int rsc) +{ + if ((rsc < 0) || (rsc >= NR_RSCS)) + return; + mutex_lock(&crw_handler_mutex); + crw_handlers[rsc] = NULL; + mutex_unlock(&crw_handler_mutex); +} + +/* + * Retrieve CRWs and call function to handle event. + */ +static int crw_collect_info(void *unused) +{ + struct crw crw[2]; + int ccode, signal; + unsigned int chain; + +repeat: + signal = wait_event_interruptible(crw_handler_wait_q, + atomic_read(&crw_nr_req) > 0); + if (unlikely(signal)) + atomic_inc(&crw_nr_req); + chain = 0; + while (1) { + crw_handler_t handler; + + if (unlikely(chain > 1)) { + struct crw tmp_crw; + + printk(KERN_WARNING"%s: Code does not support more " + "than two chained crws; please report to " + "linux390@de.ibm.com!\n", __func__); + ccode = stcrw(&tmp_crw); + printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + __func__, tmp_crw.slct, tmp_crw.oflw, + tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, + tmp_crw.erc, tmp_crw.rsid); + printk(KERN_WARNING"%s: This was crw number %x in the " + "chain\n", __func__, chain); + if (ccode != 0) + break; + chain = tmp_crw.chn ? chain + 1 : 0; + continue; + } + ccode = stcrw(&crw[chain]); + if (ccode != 0) + break; + printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw[chain].slct, crw[chain].oflw, crw[chain].chn, + crw[chain].rsc, crw[chain].anc, crw[chain].erc, + crw[chain].rsid); + /* Check for overflows. */ + if (crw[chain].oflw) { + int i; + + pr_debug("%s: crw overflow detected!\n", __func__); + mutex_lock(&crw_handler_mutex); + for (i = 0; i < NR_RSCS; i++) { + if (crw_handlers[i]) + crw_handlers[i](NULL, NULL, 1); + } + mutex_unlock(&crw_handler_mutex); + chain = 0; + continue; + } + if (crw[0].chn && !chain) { + chain++; + continue; + } + mutex_lock(&crw_handler_mutex); + handler = crw_handlers[crw[chain].rsc]; + if (handler) + handler(&crw[0], chain ? &crw[1] : NULL, 0); + mutex_unlock(&crw_handler_mutex); + /* chain is always 0 or 1 here. */ + chain = crw[chain].chn ? chain + 1 : 0; + } + if (atomic_dec_and_test(&crw_nr_req)) + wake_up(&crw_handler_wait_q); + goto repeat; + return 0; +} + +void crw_handle_channel_report(void) +{ + atomic_inc(&crw_nr_req); + wake_up(&crw_handler_wait_q); +} + +void crw_wait_for_channel_report(void) +{ + crw_handle_channel_report(); + wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); +} + +/* + * Machine checks for the channel subsystem must be enabled + * after the channel subsystem is initialized + */ +static int __init crw_machine_check_init(void) +{ + struct task_struct *task; + + task = kthread_run(crw_collect_info, NULL, "kmcheck"); + if (IS_ERR(task)) + return PTR_ERR(task); + ctl_set_bit(14, 28); /* enable channel report MCH */ + return 0; +} +device_initcall(crw_machine_check_init); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 555119cacc2..0268e5fd59b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,130 +1,350 @@ /* - * drivers/s390/cio/css.c - * driver for channel subsystem - * $Revision: 1.85 $ + * driver for channel subsystem * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Copyright IBM Corp. 2002, 2010 + * + * Author(s): Arnd Bergmann (arndb@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ + +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/list.h> +#include <linux/reboot.h> +#include <linux/suspend.h> +#include <linux/proc_fs.h> +#include <asm/isc.h> +#include <asm/crw.h> #include "css.h" #include "cio.h" #include "cio_debug.h" #include "ioasm.h" #include "chsc.h" +#include "device.h" +#include "idset.h" +#include "chp.h" -unsigned int highest_subchannel; -int need_rescan = 0; int css_init_done = 0; +int max_ssid; + +struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; +static struct bus_type css_bus_type; + +int +for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) +{ + struct subchannel_id schid; + int ret; -struct pgid global_pgid; -int css_characteristics_avail = 0; + init_subchannel_id(&schid); + ret = -ENODEV; + do { + do { + ret = fn(schid, data); + if (ret) + break; + } while (schid.sch_no++ < __MAX_SUBCHANNEL); + schid.sch_no = 0; + } while (schid.ssid++ < max_ssid); + return ret; +} -struct device css_bus_device = { - .bus_id = "css0", +struct cb_data { + void *data; + struct idset *set; + int (*fn_known_sch)(struct subchannel *, void *); + int (*fn_unknown_sch)(struct subchannel_id, void *); }; -static struct subchannel * -css_alloc_subchannel(int irq) +static int call_fn_known_sch(struct device *dev, void *data) +{ + struct subchannel *sch = to_subchannel(dev); + struct cb_data *cb = data; + int rc = 0; + + if (cb->set) + idset_sch_del(cb->set, sch->schid); + if (cb->fn_known_sch) + rc = cb->fn_known_sch(sch, cb->data); + return rc; +} + +static int call_fn_unknown_sch(struct subchannel_id schid, void *data) { + struct cb_data *cb = data; + int rc = 0; + + if (idset_sch_contains(cb->set, schid)) + rc = cb->fn_unknown_sch(schid, cb->data); + return rc; +} + +static int call_fn_all_sch(struct subchannel_id schid, void *data) +{ + struct cb_data *cb = data; struct subchannel *sch; - int ret; + int rc = 0; - sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); - if (sch == NULL) - return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, irq); - if (ret < 0) { - kfree(sch); - return ERR_PTR(ret); + sch = get_subchannel_by_schid(schid); + if (sch) { + if (cb->fn_known_sch) + rc = cb->fn_known_sch(sch, cb->data); + put_device(&sch->dev); + } else { + if (cb->fn_unknown_sch) + rc = cb->fn_unknown_sch(schid, cb->data); } - if (irq > highest_subchannel) - highest_subchannel = irq; - if (sch->st != SUBCHANNEL_TYPE_IO) { - /* For now we ignore all non-io subchannels. */ - kfree(sch); - return ERR_PTR(-EINVAL); - } + return rc; +} - /* - * Set intparm to subchannel address. - * This is fine even on 64bit since the subchannel is always located - * under 2G. - */ - sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; - ret = cio_modify(sch); - if (ret) { - kfree(sch); - return ERR_PTR(ret); +int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), + int (*fn_unknown)(struct subchannel_id, + void *), void *data) +{ + struct cb_data cb; + int rc; + + cb.data = data; + cb.fn_known_sch = fn_known; + cb.fn_unknown_sch = fn_unknown; + + if (fn_known && !fn_unknown) { + /* Skip idset allocation in case of known-only loop. */ + cb.set = NULL; + return bus_for_each_dev(&css_bus_type, NULL, &cb, + call_fn_known_sch); } + + cb.set = idset_sch_new(); + if (!cb.set) + /* fall back to brute force scanning in case of oom */ + return for_each_subchannel(call_fn_all_sch, &cb); + + idset_fill(cb.set); + + /* Process registered subchannels. */ + rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); + if (rc) + goto out; + /* Process unregistered subchannels. */ + if (fn_unknown) + rc = for_each_subchannel(call_fn_unknown_sch, &cb); +out: + idset_free(cb.set); + + return rc; +} + +static void css_sch_todo(struct work_struct *work); + +static int css_sch_create_locks(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + + spin_lock_init(sch->lock); + mutex_init(&sch->reg_mutex); + + return 0; +} + +static void css_subchannel_release(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + + sch->config.intparm = 0; + cio_commit_config(sch); + kfree(sch->lock); + kfree(sch); +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid) +{ + struct subchannel *sch; + int ret; + + sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); + if (!sch) + return ERR_PTR(-ENOMEM); + + ret = cio_validate_subchannel(sch, schid); + if (ret < 0) + goto err; + + ret = css_sch_create_locks(sch); + if (ret) + goto err; + + INIT_WORK(&sch->todo_work, css_sch_todo); + sch->dev.release = &css_subchannel_release; + device_initialize(&sch->dev); return sch; + +err: + kfree(sch); + return ERR_PTR(ret); } -static void -css_free_subchannel(struct subchannel *sch) +static int css_sch_device_register(struct subchannel *sch) { - if (sch) { - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - kfree(sch); + int ret; + + mutex_lock(&sch->reg_mutex); + dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, + sch->schid.sch_no); + ret = device_add(&sch->dev); + mutex_unlock(&sch->reg_mutex); + return ret; +} + +/** + * css_sch_device_unregister - unregister a subchannel + * @sch: subchannel to be unregistered + */ +void css_sch_device_unregister(struct subchannel *sch) +{ + mutex_lock(&sch->reg_mutex); + if (device_is_registered(&sch->dev)) + device_unregister(&sch->dev); + mutex_unlock(&sch->reg_mutex); +} +EXPORT_SYMBOL_GPL(css_sch_device_unregister); + +static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) +{ + int i; + int mask; + + memset(ssd, 0, sizeof(struct chsc_ssd_info)); + ssd->path_mask = pmcw->pim; + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (pmcw->pim & mask) { + chp_id_init(&ssd->chpid[i]); + ssd->chpid[i].id = pmcw->chpid[i]; + } } - } -static void -css_subchannel_release(struct device *dev) +static void ssd_register_chpids(struct chsc_ssd_info *ssd) { - struct subchannel *sch; + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (ssd->path_mask & mask) + if (!chp_is_registered(ssd->chpid[i])) + chp_new(ssd->chpid[i]); + } +} - sch = to_subchannel(dev); - if (!cio_is_console(sch->irq)) - kfree(sch); +void css_update_ssd_info(struct subchannel *sch) +{ + int ret; + + ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); + if (ret) + ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); + + ssd_register_chpids(&sch->ssd_info); } -extern int css_get_ssd_info(struct subchannel *sch); +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); -static int -css_register_subchannel(struct subchannel *sch) + return sprintf(buf, "%01x\n", sch->st); +} + +static DEVICE_ATTR(type, 0444, type_show, NULL); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "css:t%01X\n", sch->st); +} + +static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); + +static struct attribute *subch_attrs[] = { + &dev_attr_type.attr, + &dev_attr_modalias.attr, + NULL, +}; + +static struct attribute_group subch_attr_group = { + .attrs = subch_attrs, +}; + +static const struct attribute_group *default_subch_attr_groups[] = { + &subch_attr_group, + NULL, +}; + +int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; + sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; - sch->dev.release = &css_subchannel_release; - + sch->dev.groups = default_subch_attr_groups; + /* + * We don't want to generate uevents for I/O subchannels that don't + * have a working ccw device behind them since they will be + * unregistered before they can be used anyway, so we delay the add + * uevent until after device recognition was successful. + * Note that we suppress the uevent for all subchannel types; + * the subchannel driver can decide itself when it wants to inform + * userspace of its existence. + */ + dev_set_uevent_suppress(&sch->dev, 1); + css_update_ssd_info(sch); /* make it known to the system */ - ret = device_register(&sch->dev); - if (ret) - printk (KERN_WARNING "%s: could not register %s\n", - __func__, sch->dev.bus_id); - else - css_get_ssd_info(sch); + ret = css_sch_device_register(sch); + if (ret) { + CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", + sch->schid.ssid, sch->schid.sch_no, ret); + return ret; + } + if (!sch->driver) { + /* + * No driver matched. Generate the uevent now so that + * a fitting driver module may be loaded based on the + * modalias. + */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } return ret; } -int -css_probe_device(int irq) +static int css_probe_device(struct subchannel_id schid) { - int ret; struct subchannel *sch; + int ret; - sch = css_alloc_subchannel(irq); + sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) return PTR_ERR(sch); + ret = css_register_subchannel(sch); if (ret) - css_free_subchannel(sch); + put_device(&sch->dev); + return ret; } @@ -132,438 +352,943 @@ static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; - int irq = (unsigned long)data; + struct subchannel_id *schid = data; sch = to_subchannel(dev); - return (sch->irq == irq); + return schid_equal(&sch->schid, schid); } struct subchannel * -get_subchannel_by_schid(int irq) +get_subchannel_by_schid(struct subchannel_id schid) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, - (void *)(unsigned long)irq, check_subchannel); + &schid, check_subchannel); return dev ? to_subchannel(dev) : NULL; } +/** + * css_sch_is_valid() - check if a subchannel is valid + * @schib: subchannel information block for the subchannel + */ +int css_sch_is_valid(struct schib *schib) +{ + if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) + return 0; + if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) + return 0; + return 1; +} +EXPORT_SYMBOL_GPL(css_sch_is_valid); -static inline int -css_get_subchannel_status(struct subchannel *sch, int schid) +static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; - int cc; - - cc = stsch(schid, &schib); - if (cc) - return CIO_GONE; - if (!schib.pmcw.dnv) - return CIO_GONE; - if (sch && sch->schib.pmcw.dnv && - (schib.pmcw.dev != sch->schib.pmcw.dev)) - return CIO_REVALIDATE; - if (sch && !sch->lpm) - return CIO_NO_PATH; - return CIO_OPER; -} - -static int -css_evaluate_subchannel(int irq, int slow) -{ - int event, ret, disc; - struct subchannel *sch; - unsigned long flags; - sch = get_subchannel_by_schid(irq); - disc = sch ? device_is_disconnected(sch) : 0; - if (disc && slow) { - if (sch) - put_device(&sch->dev); - return 0; /* Already processed. */ + if (!slow) { + /* Will be done on the slow path. */ + return -EAGAIN; } - /* - * We've got a machine check, so running I/O won't get an interrupt. - * Kill any pending timers. - */ - if (sch) - device_kill_pending_timer(sch); - if (!disc && !slow) { - if (sch) - put_device(&sch->dev); - return -EAGAIN; /* Will be done on the slow path. */ + if (stsch_err(schid, &schib)) { + /* Subchannel is not provided. */ + return -ENXIO; } - event = css_get_subchannel_status(sch, irq); - CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", - irq, event, sch?(disc?"disconnected":"normal"):"unknown", - slow?"slow":"fast"); - switch (event) { - case CIO_NO_PATH: - case CIO_GONE: - if (!sch) { - /* Never used this subchannel. Ignore. */ - ret = 0; - break; - } - if (disc && (event == CIO_NO_PATH)) { - /* - * Uargh, hack again. Because we don't get a machine - * check on configure on, our path bookkeeping can - * be out of date here (it's fine while we only do - * logical varying or get chsc machine checks). We - * need to force reprobing or we might miss devices - * coming operational again. It won't do harm in real - * no path situations. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; - break; - } - if (sch->driver && sch->driver->notify && - sch->driver->notify(&sch->dev, event)) { - cio_disable_subchannel(sch); - device_set_disconnected(sch); - ret = 0; - break; - } - /* - * Unregister subchannel. - * The device will be killed automatically. - */ - cio_disable_subchannel(sch); - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + if (!css_sch_is_valid(&schib)) { + /* Unusable - ignore. */ + return 0; + } + CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, + schid.sch_no); + + return css_probe_device(schid); +} + +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) +{ + int ret = 0; + + if (sch->driver) { + if (sch->driver->sch_event) + ret = sch->driver->sch_event(sch, slow); + else + dev_dbg(&sch->dev, + "Got subchannel machine check but " + "no sch_event handler provided.\n"); + } + if (ret != 0 && ret != -EAGAIN) { + CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", + sch->schid.ssid, sch->schid.sch_no, ret); + } + return ret; +} + +static void css_evaluate_subchannel(struct subchannel_id schid, int slow) +{ + struct subchannel *sch; + int ret; + + sch = get_subchannel_by_schid(schid); + if (sch) { + ret = css_evaluate_known_subchannel(sch, slow); put_device(&sch->dev); - ret = 0; + } else + ret = css_evaluate_new_subchannel(schid, slow); + if (ret == -EAGAIN) + css_schedule_eval(schid); +} + +/** + * css_sched_sch_todo - schedule a subchannel operation + * @sch: subchannel + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with subchannel lock held. + */ +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) +{ + CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", + sch->schid.ssid, sch->schid.sch_no, todo); + if (sch->todo >= todo) + return; + /* Get workqueue ref. */ + if (!get_device(&sch->dev)) + return; + sch->todo = todo; + if (!queue_work(cio_work_q, &sch->todo_work)) { + /* Already queued, release workqueue ref. */ + put_device(&sch->dev); + } +} +EXPORT_SYMBOL_GPL(css_sched_sch_todo); + +static void css_sch_todo(struct work_struct *work) +{ + struct subchannel *sch; + enum sch_todo todo; + int ret; + + sch = container_of(work, struct subchannel, todo_work); + /* Find out todo. */ + spin_lock_irq(sch->lock); + todo = sch->todo; + CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, + sch->schid.sch_no, todo); + sch->todo = SCH_TODO_NOTHING; + spin_unlock_irq(sch->lock); + /* Perform todo. */ + switch (todo) { + case SCH_TODO_NOTHING: break; - case CIO_REVALIDATE: - /* - * Revalidation machine check. Sick. - * We don't notify the driver since we have to throw the device - * away in any case. - */ - if (!disc) { - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = css_probe_device(irq); - } else { - /* - * We can't immediately deregister the disconnected - * device since it might block. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; + case SCH_TODO_EVAL: + ret = css_evaluate_known_subchannel(sch, 1); + if (ret == -EAGAIN) { + spin_lock_irq(sch->lock); + css_sched_sch_todo(sch, todo); + spin_unlock_irq(sch->lock); } break; - case CIO_OPER: - if (disc) { - spin_lock_irqsave(&sch->lock, flags); - /* Get device operational again. */ - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - } - ret = sch ? 0 : css_probe_device(irq); + case SCH_TODO_UNREG: + css_sch_device_unregister(sch); break; - default: - BUG(); - ret = 0; } - return ret; + /* Release workqueue ref. */ + put_device(&sch->dev); } -static void -css_rescan_devices(void) +static struct idset *slow_subchannel_set; +static spinlock_t slow_subchannel_lock; +static wait_queue_head_t css_eval_wq; +static atomic_t css_eval_scheduled; + +static int __init slow_subchannel_init(void) +{ + spin_lock_init(&slow_subchannel_lock); + atomic_set(&css_eval_scheduled, 0); + init_waitqueue_head(&css_eval_wq); + slow_subchannel_set = idset_sch_new(); + if (!slow_subchannel_set) { + CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); + return -ENOMEM; + } + return 0; +} + +static int slow_eval_known_fn(struct subchannel *sch, void *data) { - int irq, ret; + int eval; + int rc; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - ret = css_evaluate_subchannel(irq, 1); - /* No more memory. It doesn't make sense to continue. No - * panic because this can happen in midflight and just - * because we can't use a new device is no reason to crash - * the system. */ - if (ret == -ENOMEM) + spin_lock_irq(&slow_subchannel_lock); + eval = idset_sch_contains(slow_subchannel_set, sch->schid); + idset_sch_del(slow_subchannel_set, sch->schid); + spin_unlock_irq(&slow_subchannel_lock); + if (eval) { + rc = css_evaluate_known_subchannel(sch, 1); + if (rc == -EAGAIN) + css_schedule_eval(sch->schid); + } + return 0; +} + +static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) +{ + int eval; + int rc = 0; + + spin_lock_irq(&slow_subchannel_lock); + eval = idset_sch_contains(slow_subchannel_set, schid); + idset_sch_del(slow_subchannel_set, schid); + spin_unlock_irq(&slow_subchannel_lock); + if (eval) { + rc = css_evaluate_new_subchannel(schid, 1); + switch (rc) { + case -EAGAIN: + css_schedule_eval(schid); + rc = 0; break; - /* -ENXIO indicates that there are no more subchannels. */ - if (ret == -ENXIO) + case -ENXIO: + case -ENOMEM: + case -EIO: + /* These should abort looping */ + spin_lock_irq(&slow_subchannel_lock); + idset_sch_del_subseq(slow_subchannel_set, schid); + spin_unlock_irq(&slow_subchannel_lock); break; + default: + rc = 0; + } + /* Allow scheduling here since the containing loop might + * take a while. */ + cond_resched(); } + return rc; } -struct slow_subchannel { - struct list_head slow_list; - unsigned long schid; -}; +static void css_slow_path_func(struct work_struct *unused) +{ + unsigned long flags; -static LIST_HEAD(slow_subchannels_head); -static DEFINE_SPINLOCK(slow_subchannel_lock); + CIO_TRACE_EVENT(4, "slowpath"); + for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, + NULL); + spin_lock_irqsave(&slow_subchannel_lock, flags); + if (idset_is_empty(slow_subchannel_set)) { + atomic_set(&css_eval_scheduled, 0); + wake_up(&css_eval_wq); + } + spin_unlock_irqrestore(&slow_subchannel_lock, flags); +} -static void -css_trigger_slow_path(void) +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); +struct workqueue_struct *cio_work_q; + +void css_schedule_eval(struct subchannel_id schid) { - CIO_TRACE_EVENT(4, "slowpath"); + unsigned long flags; - if (need_rescan) { - need_rescan = 0; - css_rescan_devices(); - return; - } + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_sch_add(slow_subchannel_set, schid); + atomic_set(&css_eval_scheduled, 1); + queue_delayed_work(cio_work_q, &slow_path_work, 0); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); +} - spin_lock_irq(&slow_subchannel_lock); - while (!list_empty(&slow_subchannels_head)) { - struct slow_subchannel *slow_sch = - list_entry(slow_subchannels_head.next, - struct slow_subchannel, slow_list); +void css_schedule_eval_all(void) +{ + unsigned long flags; + + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_fill(slow_subchannel_set); + atomic_set(&css_eval_scheduled, 1); + queue_delayed_work(cio_work_q, &slow_path_work, 0); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); +} + +static int __unset_registered(struct device *dev, void *data) +{ + struct idset *set = data; + struct subchannel *sch = to_subchannel(dev); + + idset_sch_del(set, sch->schid); + return 0; +} + +void css_schedule_eval_all_unreg(unsigned long delay) +{ + unsigned long flags; + struct idset *unreg_set; - list_del_init(slow_subchannels_head.next); - spin_unlock_irq(&slow_subchannel_lock); - css_evaluate_subchannel(slow_sch->schid, 1); - spin_lock_irq(&slow_subchannel_lock); - kfree(slow_sch); + /* Find unregistered subchannels. */ + unreg_set = idset_sch_new(); + if (!unreg_set) { + /* Fallback. */ + css_schedule_eval_all(); + return; } - spin_unlock_irq(&slow_subchannel_lock); + idset_fill(unreg_set); + bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); + /* Apply to slow_subchannel_set. */ + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_add_set(slow_subchannel_set, unreg_set); + atomic_set(&css_eval_scheduled, 1); + queue_delayed_work(cio_work_q, &slow_path_work, delay); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); + idset_free(unreg_set); } -typedef void (*workfunc)(void *); -DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); -struct workqueue_struct *slow_path_wq; +void css_wait_for_slow_path(void) +{ + flush_workqueue(cio_work_q); +} -/* - * Rescan for new devices. FIXME: This is slow. - * This function is called when we have lost CRWs due to overflows and we have - * to do subchannel housekeeping. - */ -void -css_reiterate_subchannels(void) +/* Schedule reprobing of all unregistered subchannels. */ +void css_schedule_reprobe(void) { - css_clear_subchannel_slow_list(); - need_rescan = 1; + /* Schedule with a delay to allow merging of subsequent calls. */ + css_schedule_eval_all_unreg(1 * HZ); } +EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* * Called from the machine check handler for subchannel report words. */ -int -css_process_crw(int irq) +static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - int ret; - - CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); + struct subchannel_id mchk_schid; + struct subchannel *sch; - if (need_rescan) - /* We need to iterate all subchannels anyway. */ - return -EAGAIN; - /* + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + if (crw1) + CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, + crw1->anc, crw1->erc, crw1->rsid); + init_subchannel_id(&mchk_schid); + mchk_schid.sch_no = crw0->rsid; + if (crw1) + mchk_schid.ssid = (crw1->rsid >> 4) & 3; + + if (crw0->erc == CRW_ERC_PMOD) { + sch = get_subchannel_by_schid(mchk_schid); + if (sch) { + css_update_ssd_info(sch); + put_device(&sch->dev); + } + } + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. */ - ret = css_evaluate_subchannel(irq, 0); - if (ret == -EAGAIN) { - if (css_enqueue_subchannel_slow(irq)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } - return ret; + css_evaluate_subchannel(mchk_schid, 0); } static void __init -css_generate_pgid(void) +css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { - /* Let's build our path group ID here. */ - if (css_characteristics_avail && css_general_characteristics.mcss) - global_pgid.cpu_addr = 0x8000; - else { + struct cpuid cpu_id; + + if (css_general_characteristics.mcss) { + css->global_pgid.pgid_high.ext_cssid.version = 0x80; + css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; + } else { #ifdef CONFIG_SMP - global_pgid.cpu_addr = hard_smp_processor_id(); + css->global_pgid.pgid_high.cpu_addr = stap(); #else - global_pgid.cpu_addr = 0; + css->global_pgid.pgid_high.cpu_addr = 0; #endif } - global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; - global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; - global_pgid.tod_high = (__u32) (get_clock() >> 32); + get_cpu_id(&cpu_id); + css->global_pgid.cpu_id = cpu_id.ident; + css->global_pgid.cpu_model = cpu_id.machine; + css->global_pgid.tod_high = tod_high; + +} + +static void +channel_subsystem_release(struct device *dev) +{ + struct channel_subsystem *css; + + css = to_css(dev); + mutex_destroy(&css->mutex); + if (css->pseudo_subchannel) { + /* Implies that it has been generated but never registered. */ + css_subchannel_release(&css->pseudo_subchannel->dev); + css->pseudo_subchannel = NULL; + } + kfree(css); +} + +static ssize_t +css_cm_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_subsystem *css = to_css(dev); + int ret; + + if (!css) + return 0; + mutex_lock(&css->mutex); + ret = sprintf(buf, "%x\n", css->cm_enabled); + mutex_unlock(&css->mutex); + return ret; +} + +static ssize_t +css_cm_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_subsystem *css = to_css(dev); + int ret; + unsigned long val; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + mutex_lock(&css->mutex); + switch (val) { + case 0: + ret = css->cm_enabled ? chsc_secm(css, 0) : 0; + break; + case 1: + ret = css->cm_enabled ? 0 : chsc_secm(css, 1); + break; + default: + ret = -EINVAL; + } + mutex_unlock(&css->mutex); + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); + +static int __init setup_css(int nr) +{ + u32 tod_high; + int ret; + struct channel_subsystem *css; + + css = channel_subsystems[nr]; + memset(css, 0, sizeof(struct channel_subsystem)); + css->pseudo_subchannel = + kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); + if (!css->pseudo_subchannel) + return -ENOMEM; + css->pseudo_subchannel->dev.parent = &css->device; + css->pseudo_subchannel->dev.release = css_subchannel_release; + dev_set_name(&css->pseudo_subchannel->dev, "defunct"); + mutex_init(&css->pseudo_subchannel->reg_mutex); + ret = css_sch_create_locks(css->pseudo_subchannel); + if (ret) { + kfree(css->pseudo_subchannel); + return ret; + } + mutex_init(&css->mutex); + css->valid = 1; + css->cssid = nr; + dev_set_name(&css->device, "css%x", nr); + css->device.release = channel_subsystem_release; + tod_high = (u32) (get_tod_clock() >> 32); + css_generate_pgid(css, tod_high); + return 0; +} + +static int css_reboot_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + int ret, i; + + ret = NOTIFY_DONE; + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; + + css = channel_subsystems[i]; + mutex_lock(&css->mutex); + if (css->cm_enabled) + if (chsc_secm(css, 0)) + ret = NOTIFY_BAD; + mutex_unlock(&css->mutex); + } + + return ret; +} + +static struct notifier_block css_reboot_notifier = { + .notifier_call = css_reboot_event, +}; + +/* + * Since the css devices are neither on a bus nor have a class + * nor have a special device type, we cannot stop/restart channel + * path measurements via the normal suspend/resume callbacks, but have + * to use notifiers. + */ +static int css_power_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + int ret, i; + + switch (event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + ret = NOTIFY_DONE; + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; + + css = channel_subsystems[i]; + mutex_lock(&css->mutex); + if (!css->cm_enabled) { + mutex_unlock(&css->mutex); + continue; + } + ret = __chsc_do_secm(css, 0); + ret = notifier_from_errno(ret); + mutex_unlock(&css->mutex); + } + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + ret = NOTIFY_DONE; + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; + + css = channel_subsystems[i]; + mutex_lock(&css->mutex); + if (!css->cm_enabled) { + mutex_unlock(&css->mutex); + continue; + } + ret = __chsc_do_secm(css, 1); + ret = notifier_from_errno(ret); + mutex_unlock(&css->mutex); + } + /* search for subchannels, which appeared during hibernation */ + css_schedule_reprobe(); + break; + default: + ret = NOTIFY_DONE; + } + return ret; + } +static struct notifier_block css_power_notifier = { + .notifier_call = css_power_event, +}; /* * Now that the driver core is running, we can setup our channel subsystem. - * The struct subchannel's are created during probing (except for the - * static console subchannel). + * The struct subchannel's are created during probing. */ -static int __init -init_channel_subsystem (void) +static int __init css_bus_init(void) { - int ret, irq; + int ret, i; - if (chsc_determine_css_characteristics() == 0) - css_characteristics_avail = 1; + ret = chsc_init(); + if (ret) + return ret; + + chsc_determine_css_characteristics(); + /* Try to enable MSS. */ + ret = chsc_enable_facility(CHSC_SDA_OC_MSS); + if (ret) + max_ssid = 0; + else /* Success. */ + max_ssid = __MAX_SSID; - css_generate_pgid(); + ret = slow_subchannel_init(); + if (ret) + goto out; + + ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); + if (ret) + goto out; if ((ret = bus_register(&css_bus_type))) goto out; - if ((ret = device_register (&css_bus_device))) - goto out_bus; - css_init_done = 1; + /* Setup css structure. */ + for (i = 0; i <= __MAX_CSSID; i++) { + struct channel_subsystem *css; - ctl_set_bit(6, 28); - - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - struct subchannel *sch; - - if (cio_is_console(irq)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(irq); - if (IS_ERR(sch)) - ret = PTR_ERR(sch); - else - ret = 0; - if (ret == -ENOMEM) - panic("Out of memory in " - "init_channel_subsystem\n"); - /* -ENXIO: no more subchannels. */ - if (ret == -ENXIO) - break; + css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); + if (!css) { + ret = -ENOMEM; + goto out_unregister; + } + channel_subsystems[i] = css; + ret = setup_css(i); + if (ret) { + kfree(channel_subsystems[i]); + goto out_unregister; + } + ret = device_register(&css->device); + if (ret) { + put_device(&css->device); + goto out_unregister; + } + if (css_chsc_characteristics.secm) { + ret = device_create_file(&css->device, + &dev_attr_cm_enable); if (ret) - continue; + goto out_device; + } + ret = device_register(&css->pseudo_subchannel->dev); + if (ret) { + put_device(&css->pseudo_subchannel->dev); + goto out_file; } - /* - * We register ALL valid subchannels in ioinfo, even those - * that have been present before init_channel_subsystem. - * These subchannels can't have been registered yet (kmalloc - * not working) so we do it now. This is true e.g. for the - * console subchannel. - */ - css_register_subchannel(sch); } - return 0; + ret = register_reboot_notifier(&css_reboot_notifier); + if (ret) + goto out_unregister; + ret = register_pm_notifier(&css_power_notifier); + if (ret) { + unregister_reboot_notifier(&css_reboot_notifier); + goto out_unregister; + } + css_init_done = 1; -out_bus: + /* Enable default isc for I/O subchannels. */ + isc_register(IO_SCH_ISC); + + return 0; +out_file: + if (css_chsc_characteristics.secm) + device_remove_file(&channel_subsystems[i]->device, + &dev_attr_cm_enable); +out_device: + device_unregister(&channel_subsystems[i]->device); +out_unregister: + while (i > 0) { + struct channel_subsystem *css; + + i--; + css = channel_subsystems[i]; + device_unregister(&css->pseudo_subchannel->dev); + css->pseudo_subchannel = NULL; + if (css_chsc_characteristics.secm) + device_remove_file(&css->device, + &dev_attr_cm_enable); + device_unregister(&css->device); + } bus_unregister(&css_bus_type); out: + crw_unregister_handler(CRW_RSC_SCH); + idset_free(slow_subchannel_set); + chsc_init_cleanup(); + pr_alert("The CSS device driver initialization failed with " + "errno=%d\n", ret); return ret; } -/* - * find a driver for a subchannel. They identify by the subchannel - * type with the exception that the console subchannel driver has its own - * subchannel type although the device is an i/o subchannel - */ -static int -css_bus_match (struct device *dev, struct device_driver *drv) +static void __init css_bus_cleanup(void) +{ + struct channel_subsystem *css; + int i; + + for (i = 0; i <= __MAX_CSSID; i++) { + css = channel_subsystems[i]; + device_unregister(&css->pseudo_subchannel->dev); + css->pseudo_subchannel = NULL; + if (css_chsc_characteristics.secm) + device_remove_file(&css->device, &dev_attr_cm_enable); + device_unregister(&css->device); + } + bus_unregister(&css_bus_type); + crw_unregister_handler(CRW_RSC_SCH); + idset_free(slow_subchannel_set); + chsc_init_cleanup(); + isc_unregister(IO_SCH_ISC); +} + +static int __init channel_subsystem_init(void) { - struct subchannel *sch = container_of (dev, struct subchannel, dev); - struct css_driver *driver = container_of (drv, struct css_driver, drv); + int ret; + + ret = css_bus_init(); + if (ret) + return ret; + cio_work_q = create_singlethread_workqueue("cio"); + if (!cio_work_q) { + ret = -ENOMEM; + goto out_bus; + } + ret = io_subchannel_init(); + if (ret) + goto out_wq; - if (sch->st == driver->subchannel_type) - return 1; + return ret; +out_wq: + destroy_workqueue(cio_work_q); +out_bus: + css_bus_cleanup(); + return ret; +} +subsys_initcall(channel_subsystem_init); +static int css_settle(struct device_driver *drv, void *unused) +{ + struct css_driver *cssdrv = to_cssdriver(drv); + + if (cssdrv->settle) + return cssdrv->settle(); return 0; } -struct bus_type css_bus_type = { - .name = "css", - .match = &css_bus_match, -}; +int css_complete_work(void) +{ + int ret; + + /* Wait for the evaluation of subchannels to finish. */ + ret = wait_event_interruptible(css_eval_wq, + atomic_read(&css_eval_scheduled) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + /* Wait for the subchannel type specific initialization to finish */ + return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); +} -subsys_initcall(init_channel_subsystem); /* - * Register root devices for some drivers. The release function must not be - * in the device drivers, so we do it here. + * Wait for the initialization of devices to finish, to make sure we are + * done with our setup if the search for the root device starts. */ -static void -s390_root_dev_release(struct device *dev) +static int __init channel_subsystem_init_sync(void) { - kfree(dev); + /* Register subchannels which are already in use. */ + cio_register_early_subchannels(); + /* Start initial subchannel evaluation. */ + css_schedule_eval_all(); + css_complete_work(); + return 0; } +subsys_initcall_sync(channel_subsystem_init_sync); -struct device * -s390_root_dev_register(const char *name) +void channel_subsystem_reinit(void) { - struct device *dev; - int ret; - - if (!strlen(name)) - return ERR_PTR(-EINVAL); - dev = kmalloc(sizeof(struct device), GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(struct device)); - strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); - dev->release = s390_root_dev_release; - ret = device_register(dev); - if (ret) { - kfree(dev); - return ERR_PTR(ret); + struct channel_path *chp; + struct chp_id chpid; + + chsc_enable_facility(CHSC_SDA_OC_MSS); + chp_id_for_each(&chpid) { + chp = chpid_to_chp(chpid); + if (chp) + chp_update_desc(chp); } - return dev; } -void -s390_root_dev_unregister(struct device *dev) +#ifdef CONFIG_PROC_FS +static ssize_t cio_settle_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { - if (dev) - device_unregister(dev); + int ret; + + /* Handle pending CRW's. */ + crw_wait_for_channel_report(); + ret = css_complete_work(); + + return ret ? ret : count; } -int -css_enqueue_subchannel_slow(unsigned long schid) +static const struct file_operations cio_settle_proc_fops = { + .open = nonseekable_open, + .write = cio_settle_write, + .llseek = no_llseek, +}; + +static int __init cio_settle_init(void) { - struct slow_subchannel *new_slow_sch; - unsigned long flags; + struct proc_dir_entry *entry; - new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); - if (!new_slow_sch) + entry = proc_create("cio_settle", S_IWUSR, NULL, + &cio_settle_proc_fops); + if (!entry) return -ENOMEM; - memset(new_slow_sch, 0, sizeof(struct slow_subchannel)); - new_slow_sch->schid = schid; - spin_lock_irqsave(&slow_subchannel_lock, flags); - list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); - spin_unlock_irqrestore(&slow_subchannel_lock, flags); return 0; } +device_initcall(cio_settle_init); +#endif /*CONFIG_PROC_FS*/ -void -css_clear_subchannel_slow_list(void) +int sch_is_pseudo_sch(struct subchannel *sch) { - unsigned long flags; + return sch == to_css(sch->dev.parent)->pseudo_subchannel; +} - spin_lock_irqsave(&slow_subchannel_lock, flags); - while (!list_empty(&slow_subchannels_head)) { - struct slow_subchannel *slow_sch = - list_entry(slow_subchannels_head.next, - struct slow_subchannel, slow_list); +static int css_bus_match(struct device *dev, struct device_driver *drv) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *driver = to_cssdriver(drv); + struct css_device_id *id; - list_del_init(slow_subchannels_head.next); - kfree(slow_sch); + for (id = driver->subchannel_type; id->match_flags; id++) { + if (sch->st == id->type) + return 1; } - spin_unlock_irqrestore(&slow_subchannel_lock, flags); + + return 0; +} + +static int css_probe(struct device *dev) +{ + struct subchannel *sch; + int ret; + + sch = to_subchannel(dev); + sch->driver = to_cssdriver(dev->driver); + ret = sch->driver->probe ? sch->driver->probe(sch) : 0; + if (ret) + sch->driver = NULL; + return ret; } +static int css_remove(struct device *dev) +{ + struct subchannel *sch; + int ret; + + sch = to_subchannel(dev); + ret = sch->driver->remove ? sch->driver->remove(sch) : 0; + sch->driver = NULL; + return ret; +} +static void css_shutdown(struct device *dev) +{ + struct subchannel *sch; -int -css_slow_subchannels_exist(void) + sch = to_subchannel(dev); + if (sch->driver && sch->driver->shutdown) + sch->driver->shutdown(sch); +} + +static int css_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct subchannel *sch = to_subchannel(dev); + int ret; + + ret = add_uevent_var(env, "ST=%01X", sch->st); + if (ret) + return ret; + ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); + return ret; +} + +static int css_pm_prepare(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (mutex_is_locked(&sch->reg_mutex)) + return -EAGAIN; + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + /* Notify drivers that they may not register children. */ + return drv->prepare ? drv->prepare(sch) : 0; +} + +static void css_pm_complete(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return; + drv = to_cssdriver(sch->dev.driver); + if (drv->complete) + drv->complete(sch); +} + +static int css_pm_freeze(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->freeze ? drv->freeze(sch) : 0; +} + +static int css_pm_thaw(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->thaw ? drv->thaw(sch) : 0; +} + +static int css_pm_restore(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + struct css_driver *drv; + + css_update_ssd_info(sch); + if (!sch->dev.driver) + return 0; + drv = to_cssdriver(sch->dev.driver); + return drv->restore ? drv->restore(sch) : 0; +} + +static const struct dev_pm_ops css_pm_ops = { + .prepare = css_pm_prepare, + .complete = css_pm_complete, + .freeze = css_pm_freeze, + .thaw = css_pm_thaw, + .restore = css_pm_restore, +}; + +static struct bus_type css_bus_type = { + .name = "css", + .match = css_bus_match, + .probe = css_probe, + .remove = css_remove, + .shutdown = css_shutdown, + .uevent = css_uevent, + .pm = &css_pm_ops, +}; + +/** + * css_driver_register - register a css driver + * @cdrv: css driver to register + * + * This is mainly a wrapper around driver_register that sets name + * and bus_type in the embedded struct device_driver correctly. + */ +int css_driver_register(struct css_driver *cdrv) +{ + cdrv->drv.bus = &css_bus_type; + return driver_register(&cdrv->drv); +} +EXPORT_SYMBOL_GPL(css_driver_register); + +/** + * css_driver_unregister - unregister a css driver + * @cdrv: css driver to unregister + * + * This is a wrapper around driver_unregister. + */ +void css_driver_unregister(struct css_driver *cdrv) { - return (!list_empty(&slow_subchannels_head)); + driver_unregister(&cdrv->drv); } +EXPORT_SYMBOL_GPL(css_driver_unregister); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL(s390_root_dev_register); -EXPORT_SYMBOL(s390_root_dev_unregister); -EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 2004a6c4938..2c9107e2025 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -1,10 +1,17 @@ #ifndef _CSS_H #define _CSS_H +#include <linux/mutex.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/device.h> +#include <linux/types.h> #include <asm/cio.h> +#include <asm/chpid.h> +#include <asm/schid.h> + +#include "cio.h" /* * path grouping stuff @@ -33,123 +40,107 @@ struct path_state { __u8 resvd : 3; /* reserved */ } __attribute__ ((packed)); +struct extended_cssid { + u8 version; + u8 cssid; +} __attribute__ ((packed)); + struct pgid { union { __u8 fc; /* SPID function code */ struct path_state ps; /* SNID path state */ - } inf; - __u32 cpu_addr : 16; /* CPU address */ + } __attribute__ ((packed)) inf; + union { + __u32 cpu_addr : 16; /* CPU address */ + struct extended_cssid ext_cssid; + } __attribute__ ((packed)) pgid_high; __u32 cpu_id : 24; /* CPU identification */ __u32 cpu_model : 16; /* CPU model */ __u32 tod_high; /* high word TOD clock */ } __attribute__ ((packed)); -extern struct pgid global_pgid; - -#define MAX_CIWS 8 - -/* - * sense-id response buffer layout - */ -struct senseid { - /* common part */ - __u8 reserved; /* always 0x'FF' */ - __u16 cu_type; /* control unit type */ - __u8 cu_model; /* control unit model */ - __u16 dev_type; /* device type */ - __u8 dev_model; /* device model */ - __u8 unused; /* padding byte */ - /* extended part */ - struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ -} __attribute__ ((packed,aligned(4))); - -struct ccw_device_private { - int state; /* device state */ - atomic_t onoff; - unsigned long registered; - __u16 devno; /* device number */ - __u16 irq; /* subchannel number */ - __u8 imask; /* lpm mask for SNID/SID/SPGID */ - int iretry; /* retry counter SNID/SID/SPGID */ - struct { - unsigned int fast:1; /* post with "channel end" */ - unsigned int repall:1; /* report every interrupt status */ - unsigned int pgroup:1; /* do path grouping */ - unsigned int force:1; /* allow forced online */ - } __attribute__ ((packed)) options; - struct { - unsigned int pgid_single:1; /* use single path for Set PGID */ - unsigned int esid:1; /* Ext. SenseID supported by HW */ - unsigned int dosense:1; /* delayed SENSE required */ - unsigned int doverify:1; /* delayed path verification */ - unsigned int donotify:1; /* call notify function */ - unsigned int recog_done:1; /* dev. recog. complete */ - unsigned int fake_irb:1; /* deliver faked irb */ - } __attribute__((packed)) flags; - unsigned long intparm; /* user interruption parameter */ - struct qdio_irq *qdio_data; - struct irb irb; /* device status */ - struct senseid senseid; /* SenseID info */ - struct pgid pgid; /* path group ID */ - struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ - struct work_struct kick_work; - wait_queue_head_t wait_q; - struct timer_list timer; - void *cmb; /* measurement information */ - struct list_head cmb_list; /* list of measured devices */ - u64 cmb_start_time; /* clock value of cmb reset */ - void *cmb_wait; /* deferred cmb enable/disable */ -}; - -/* - * A css driver handles all subchannels of one type. - * Currently, we only care about I/O subchannels (type 0), these - * have a ccw_device connected to them. +struct subchannel; +struct chp_link; +/** + * struct css_driver - device driver for subchannels + * @subchannel_type: subchannel type supported by this driver + * @drv: embedded device driver structure + * @irq: called on interrupts + * @chp_event: called for events affecting a channel path + * @sch_event: called for events affecting the subchannel + * @probe: function called on probe + * @remove: function called on remove + * @shutdown: called at device shutdown + * @prepare: prepare for pm state transition + * @complete: undo work done in @prepare + * @freeze: callback for freezing during hibernation snapshotting + * @thaw: undo work done in @freeze + * @restore: callback for restoring after hibernation + * @settle: wait for asynchronous work to finish */ struct css_driver { - unsigned int subchannel_type; + struct css_device_id *subchannel_type; struct device_driver drv; - void (*irq)(struct device *); - int (*notify)(struct device *, int); - void (*verify)(struct device *); - void (*termination)(struct device *); + void (*irq)(struct subchannel *); + int (*chp_event)(struct subchannel *, struct chp_link *, int); + int (*sch_event)(struct subchannel *, int); + int (*probe)(struct subchannel *); + int (*remove)(struct subchannel *); + void (*shutdown)(struct subchannel *); + int (*prepare) (struct subchannel *); + void (*complete) (struct subchannel *); + int (*freeze)(struct subchannel *); + int (*thaw) (struct subchannel *); + int (*restore)(struct subchannel *); + int (*settle)(void); }; -/* - * all css_drivers have the css_bus_type - */ -extern struct bus_type css_bus_type; -extern struct css_driver io_subchannel_driver; - -int css_probe_device(int irq); -extern struct subchannel * get_subchannel_by_schid(int irq); -extern unsigned int highest_subchannel; -extern int css_init_done; +#define to_cssdriver(n) container_of(n, struct css_driver, drv) -#define __MAX_SUBCHANNELS 65536 +extern int css_driver_register(struct css_driver *); +extern void css_driver_unregister(struct css_driver *); -extern struct bus_type css_bus_type; -extern struct device css_bus_device; - -/* Some helper functions for disconnected state. */ -int device_is_disconnected(struct subchannel *); -void device_set_disconnected(struct subchannel *); -void device_trigger_reprobe(struct subchannel *); - -/* Helper functions for vary on/off. */ -int device_is_online(struct subchannel *); -void device_set_waiting(struct subchannel *); +extern void css_sch_device_unregister(struct subchannel *); +extern int css_register_subchannel(struct subchannel *); +extern struct subchannel *css_alloc_subchannel(struct subchannel_id); +extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); +extern int css_init_done; +extern int max_ssid; +int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), + int (*fn_unknown)(struct subchannel_id, + void *), void *data); +extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); +void css_update_ssd_info(struct subchannel *sch); + +struct channel_subsystem { + u8 cssid; + int valid; + struct channel_path *chps[__MAX_CHPID + 1]; + struct device device; + struct pgid global_pgid; + struct mutex mutex; + /* channel measurement related */ + int cm_enabled; + void *cub_addr1; + void *cub_addr2; + /* for orphaned ccw devices */ + struct subchannel *pseudo_subchannel; +}; +#define to_css(dev) container_of(dev, struct channel_subsystem, device) -/* Machine check helper function. */ -void device_kill_pending_timer(struct subchannel *); +extern struct channel_subsystem *channel_subsystems[]; /* Helper functions to build lists for the slow path. */ -int css_enqueue_subchannel_slow(unsigned long schid); -void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); -void css_clear_subchannel_slow_list(void); -int css_slow_subchannels_exist(void); -extern int need_rescan; - -extern struct workqueue_struct *slow_path_wq; -extern struct work_struct slow_path_work; +void css_schedule_eval(struct subchannel_id schid); +void css_schedule_eval_all(void); +void css_schedule_eval_all_unreg(unsigned long delay); +int css_complete_work(void); + +int sch_is_pseudo_sch(struct subchannel *); +struct schib; +int css_sch_is_valid(struct schib *); + +extern struct workqueue_struct *cio_work_q; +void css_wait_for_slow_path(void); +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 14c76f5e417..dfef5e63cb7 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1,15 +1,15 @@ /* - * drivers/s390/cio/device.c * bus driver for ccw devices - * $Revision: 1.131 $ * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002, 2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ -#include <linux/config.h> + +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -19,14 +19,34 @@ #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/kernel_stat.h> #include <asm/ccwdev.h> #include <asm/cio.h> +#include <asm/param.h> /* HZ */ +#include <asm/cmb.h> +#include <asm/isc.h> +#include "chp.h" #include "cio.h" +#include "cio_debug.h" #include "css.h" #include "device.h" #include "ioasm.h" +#include "io_sch.h" +#include "blacklist.h" +#include "chsc.h" + +static struct timer_list recovery_timer; +static DEFINE_SPINLOCK(recovery_lock); +static int recovery_phase; +static const unsigned long recovery_delay[] = { 3, 30, 300 }; + +static atomic_t ccw_device_init_count = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); +static struct bus_type ccw_bus_type; /******************* bus type handling ***********************/ @@ -53,145 +73,136 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) return 1; } -/* - * Hotplugging interface for ccw devices. - * Heavily modeled on pci and usb hotplug. - */ -static int -ccw_hotplug (struct device *dev, char **envp, int num_envp, - char *buffer, int buffer_size) +/* Store modalias string delimited by prefix/suffix string into buffer with + * specified size. Return length of resulting string (excluding trailing '\0') + * even if string doesn't fit buffer (snprintf semantics). */ +static int snprint_alias(char *buf, size_t size, + struct ccw_device_id *id, const char *suffix) { - struct ccw_device *cdev = to_ccwdev(dev); - int i = 0; - int length = 0; + int len; - if (!cdev) - return -ENODEV; + len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); + if (len > size) + return len; + buf += len; + size -= len; - /* what we want to pass to /sbin/hotplug */ + if (id->dev_type != 0) + len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, + id->dev_model, suffix); + else + len += snprintf(buf, size, "dtdm%s", suffix); - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", - cdev->id.cu_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - ++length; - buffer += length; + return len; +} - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", - cdev->id.cu_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - ++length; - buffer += length; +/* Set up environment variables for ccw device uevent. Return 0 on success, + * non-zero otherwise. */ +static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_device_id *id = &(cdev->id); + int ret; + char modalias_buf[30]; + + /* CU_TYPE= */ + ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); + if (ret) + return ret; + + /* CU_MODEL= */ + ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); + if (ret) + return ret; /* The next two can be zero, that's ok for us */ - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", - cdev->id.dev_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - ++length; - buffer += length; - - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", - cdev->id.dev_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - - envp[i] = 0; + /* DEV_TYPE= */ + ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); + if (ret) + return ret; - return 0; + /* DEV_MODEL= */ + ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); + if (ret) + return ret; + + /* MODALIAS= */ + snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); + ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); + return ret; } -struct bus_type ccw_bus_type = { - .name = "ccw", - .match = &ccw_bus_match, - .hotplug = &ccw_hotplug, +static void io_subchannel_irq(struct subchannel *); +static int io_subchannel_probe(struct subchannel *); +static int io_subchannel_remove(struct subchannel *); +static void io_subchannel_shutdown(struct subchannel *); +static int io_subchannel_sch_event(struct subchannel *, int); +static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, + int); +static void recovery_func(unsigned long data); + +static struct css_device_id io_subchannel_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, + { /* end of list */ }, }; +MODULE_DEVICE_TABLE(css, io_subchannel_ids); + +static int io_subchannel_prepare(struct subchannel *sch) +{ + struct ccw_device *cdev; + /* + * Don't allow suspend while a ccw device registration + * is still outstanding. + */ + cdev = sch_get_cdev(sch); + if (cdev && !device_is_registered(&cdev->dev)) + return -EAGAIN; + return 0; +} + +static int io_subchannel_settle(void) +{ + int ret; -static int io_subchannel_probe (struct device *); -static int io_subchannel_remove (struct device *); -void io_subchannel_irq (struct device *); -static int io_subchannel_notify(struct device *, int); -static void io_subchannel_verify(struct device *); -static void io_subchannel_ioterm(struct device *); -static void io_subchannel_shutdown(struct device *); + ret = wait_event_interruptible(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + return 0; +} -struct css_driver io_subchannel_driver = { - .subchannel_type = SUBCHANNEL_TYPE_IO, +static struct css_driver io_subchannel_driver = { .drv = { + .owner = THIS_MODULE, .name = "io_subchannel", - .bus = &css_bus_type, - .probe = &io_subchannel_probe, - .remove = &io_subchannel_remove, - .shutdown = &io_subchannel_shutdown, }, + .subchannel_type = io_subchannel_ids, .irq = io_subchannel_irq, - .notify = io_subchannel_notify, - .verify = io_subchannel_verify, - .termination = io_subchannel_ioterm, + .sch_event = io_subchannel_sch_event, + .chp_event = io_subchannel_chp_event, + .probe = io_subchannel_probe, + .remove = io_subchannel_remove, + .shutdown = io_subchannel_shutdown, + .prepare = io_subchannel_prepare, + .settle = io_subchannel_settle, }; -struct workqueue_struct *ccw_device_work; -struct workqueue_struct *ccw_device_notify_work; -static wait_queue_head_t ccw_device_init_wq; -static atomic_t ccw_device_init_count; - -static int __init -init_ccw_bus_type (void) +int __init io_subchannel_init(void) { int ret; - init_waitqueue_head(&ccw_device_init_wq); - atomic_set(&ccw_device_init_count, 0); - - ccw_device_work = create_singlethread_workqueue("cio"); - if (!ccw_device_work) - return -ENOMEM; /* FIXME: better errno ? */ - ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); - if (!ccw_device_notify_work) { - ret = -ENOMEM; /* FIXME: better errno ? */ - goto out_err; - } - slow_path_wq = create_singlethread_workqueue("kslowcrw"); - if (!slow_path_wq) { - ret = -ENOMEM; /* FIXME: better errno ? */ - goto out_err; - } - if ((ret = bus_register (&ccw_bus_type))) - goto out_err; - - if ((ret = driver_register(&io_subchannel_driver.drv))) - goto out_err; + setup_timer(&recovery_timer, recovery_func, 0); + ret = bus_register(&ccw_bus_type); + if (ret) + return ret; + ret = css_driver_register(&io_subchannel_driver); + if (ret) + bus_unregister(&ccw_bus_type); - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); - flush_workqueue(ccw_device_work); - return 0; -out_err: - if (ccw_device_work) - destroy_workqueue(ccw_device_work); - if (ccw_device_notify_work) - destroy_workqueue(ccw_device_notify_work); - if (slow_path_wq) - destroy_workqueue(slow_path_wq); return ret; } -static void __exit -cleanup_ccw_bus_type (void) -{ - driver_unregister(&io_subchannel_driver.drv); - bus_unregister(&ccw_bus_type); - destroy_workqueue(ccw_device_notify_work); - destroy_workqueue(ccw_device_work); -} - -subsys_initcall(init_ccw_bus_type); -module_exit(cleanup_ccw_bus_type); /************************ device handling **************************/ @@ -207,13 +218,18 @@ static ssize_t chpids_show (struct device * dev, struct device_attribute *attr, char * buf) { struct subchannel *sch = to_subchannel(dev); - struct ssd_info *ssd = &sch->ssd_info; + struct chsc_ssd_info *ssd = &sch->ssd_info; ssize_t ret = 0; int chp; + int mask; - for (chp = 0; chp < 8; chp++) - ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); - + for (chp = 0; chp < 8; chp++) { + mask = 0x80 >> chp; + if (ssd->path_mask & mask) + ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); + else + ret += sprintf(buf + ret, "00 "); + } ret += sprintf (buf+ret, "\n"); return min((ssize_t)PAGE_SIZE, ret); } @@ -252,6 +268,18 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t +modalias_show (struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_device_id *id = &(cdev->id); + int len; + + len = snprint_alias(buf, PAGE_SIZE, id, "\n"); + + return len > PAGE_SIZE ? PAGE_SIZE : len; +} + +static ssize_t online_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); @@ -259,26 +287,41 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, cdev->online ? "1\n" : "0\n"); } -static void -ccw_device_remove_disconnected(struct ccw_device *cdev) +int ccw_device_is_orphan(struct ccw_device *cdev) { - struct subchannel *sch; - /* - * Forced offline in disconnected state means - * 'throw away device'. - */ - sch = to_subchannel(cdev->dev.parent); - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); + return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); } -int -ccw_device_set_offline(struct ccw_device *cdev) +static void ccw_device_unregister(struct ccw_device *cdev) { - int ret; + if (device_is_registered(&cdev->dev)) { + /* Undo device_add(). */ + device_del(&cdev->dev); + } + if (cdev->private->flags.initialized) { + cdev->private->flags.initialized = 0; + /* Release reference from device_initialize(). */ + put_device(&cdev->dev); + } +} + +static void io_subchannel_quiesce(struct subchannel *); + +/** + * ccw_device_set_offline() - disable a ccw device for I/O + * @cdev: target ccw device + * + * This function calls the driver's set_offline() function for @cdev, if + * given, and then disables @cdev. + * Returns: + * %0 on success and a negative error value on failure. + * Context: + * enabled, ccw device lock not held + */ +int ccw_device_set_offline(struct ccw_device *cdev) +{ + struct subchannel *sch; + int ret, state; if (!cdev) return -ENODEV; @@ -290,37 +333,80 @@ ccw_device_set_offline(struct ccw_device *cdev) if (ret != 0) return ret; } - cdev->online = 0; spin_lock_irq(cdev->ccwlock); - ret = ccw_device_offline(cdev); - if (ret == -ENODEV) { - if (cdev->private->state != DEV_STATE_NOT_OPER) { - cdev->private->state = DEV_STATE_OFFLINE; - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); - } + sch = to_subchannel(cdev->dev.parent); + cdev->online = 0; + /* Wait until a final state or DISCONNECTED is reached */ + while (!dev_fsm_final_state(cdev) && + cdev->private->state != DEV_STATE_DISCONNECTED) { spin_unlock_irq(cdev->ccwlock); - return ret; + wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED)); + spin_lock_irq(cdev->ccwlock); } + do { + ret = ccw_device_offline(cdev); + if (!ret) + break; + CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " + "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + if (ret != -EBUSY) + goto error; + state = cdev->private->state; + spin_unlock_irq(cdev->ccwlock); + io_subchannel_quiesce(sch); + spin_lock_irq(cdev->ccwlock); + cdev->private->state = state; + } while (ret == -EBUSY); spin_unlock_irq(cdev->ccwlock); - if (ret == 0) - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - else { - pr_debug("ccw_device_offline returned %d, device %s\n", - ret, cdev->dev.bus_id); - cdev->online = 1; + wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED)); + /* Inform the user if set offline failed. */ + if (cdev->private->state == DEV_STATE_BOXED) { + pr_warning("%s: The device entered boxed state while " + "being set offline\n", dev_name(&cdev->dev)); + } else if (cdev->private->state == DEV_STATE_NOT_OPER) { + pr_warning("%s: The device stopped operating while " + "being set offline\n", dev_name(&cdev->dev)); } - return ret; + /* Give up reference from ccw_device_set_online(). */ + put_device(&cdev->dev); + return 0; + +error: + cdev->private->state = DEV_STATE_OFFLINE; + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); + spin_unlock_irq(cdev->ccwlock); + /* Give up reference from ccw_device_set_online(). */ + put_device(&cdev->dev); + return -ENODEV; } -int -ccw_device_set_online(struct ccw_device *cdev) +/** + * ccw_device_set_online() - enable a ccw device for I/O + * @cdev: target ccw device + * + * This function first enables @cdev and then calls the driver's set_online() + * function for @cdev, if given. If set_online() returns an error, @cdev is + * disabled again. + * Returns: + * %0 on success and a negative error value on failure. + * Context: + * enabled, ccw device lock not held + */ +int ccw_device_set_online(struct ccw_device *cdev) { int ret; + int ret2; if (!cdev) return -ENODEV; if (cdev->online || !cdev->drv) return -EINVAL; + /* Hold on to an extra reference while device is online. */ + if (!get_device(&cdev->dev)) + return -ENODEV; spin_lock_irq(cdev->ccwlock); ret = ccw_device_online(cdev); @@ -328,97 +414,175 @@ ccw_device_set_online(struct ccw_device *cdev) if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else { - pr_debug("ccw_device_online returned %d, device %s\n", - ret, cdev->dev.bus_id); + CIO_MSG_EVENT(0, "ccw_device_online returned %d, " + "device 0.%x.%04x\n", + ret, cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return ret; } - if (cdev->private->state != DEV_STATE_ONLINE) + spin_lock_irq(cdev->ccwlock); + /* Check if online processing was successful */ + if ((cdev->private->state != DEV_STATE_ONLINE) && + (cdev->private->state != DEV_STATE_W4SENSE)) { + spin_unlock_irq(cdev->ccwlock); + /* Inform the user that set online failed. */ + if (cdev->private->state == DEV_STATE_BOXED) { + pr_warning("%s: Setting the device online failed " + "because it is boxed\n", + dev_name(&cdev->dev)); + } else if (cdev->private->state == DEV_STATE_NOT_OPER) { + pr_warning("%s: Setting the device online failed " + "because it is not operational\n", + dev_name(&cdev->dev)); + } + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return -ENODEV; - if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { - cdev->online = 1; - return 0; } + spin_unlock_irq(cdev->ccwlock); + if (cdev->drv->set_online) + ret = cdev->drv->set_online(cdev); + if (ret) + goto rollback; + spin_lock_irq(cdev->ccwlock); - ret = ccw_device_offline(cdev); + cdev->online = 1; spin_unlock_irq(cdev->ccwlock); - if (ret == 0) - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - else - pr_debug("ccw_device_offline returned %d, device %s\n", - ret, cdev->dev.bus_id); - return (ret = 0) ? -ENODEV : ret; + return 0; + +rollback: + spin_lock_irq(cdev->ccwlock); + /* Wait until a final state or DISCONNECTED is reached */ + while (!dev_fsm_final_state(cdev) && + cdev->private->state != DEV_STATE_DISCONNECTED) { + spin_unlock_irq(cdev->ccwlock); + wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED)); + spin_lock_irq(cdev->ccwlock); + } + ret2 = ccw_device_offline(cdev); + if (ret2) + goto error; + spin_unlock_irq(cdev->ccwlock); + wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED)); + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); + return ret; + +error: + CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " + "device 0.%x.%04x\n", + ret2, cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + cdev->private->state = DEV_STATE_OFFLINE; + spin_unlock_irq(cdev->ccwlock); + /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); + return ret; } -static ssize_t -online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static int online_store_handle_offline(struct ccw_device *cdev) +{ + if (cdev->private->state == DEV_STATE_DISCONNECTED) { + spin_lock_irq(cdev->ccwlock); + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); + spin_unlock_irq(cdev->ccwlock); + return 0; + } + if (cdev->drv && cdev->drv->set_offline) + return ccw_device_set_offline(cdev); + return -EINVAL; +} + +static int online_store_recog_and_online(struct ccw_device *cdev) +{ + /* Do device recognition, if needed. */ + if (cdev->private->state == DEV_STATE_BOXED) { + spin_lock_irq(cdev->ccwlock); + ccw_device_recognition(cdev); + spin_unlock_irq(cdev->ccwlock); + wait_event(cdev->private->wait_q, + cdev->private->flags.recog_done); + if (cdev->private->state != DEV_STATE_OFFLINE) + /* recognition failed */ + return -EAGAIN; + } + if (cdev->drv && cdev->drv->set_online) + return ccw_device_set_online(cdev); + return -EINVAL; +} + +static int online_store_handle_online(struct ccw_device *cdev, int force) +{ + int ret; + + ret = online_store_recog_and_online(cdev); + if (ret && !force) + return ret; + if (force && cdev->private->state == DEV_STATE_BOXED) { + ret = ccw_device_stlck(cdev); + if (ret) + return ret; + if (cdev->id.cu_type == 0) + cdev->private->state = DEV_STATE_NOT_OPER; + ret = online_store_recog_and_online(cdev); + if (ret) + return ret; + } + return 0; +} + +static ssize_t online_store (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); - int i, force, ret; - char *tmp; + int force, ret; + unsigned long i; - if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) + /* Prevent conflict between multiple on-/offline processing requests. */ + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; - - if (cdev->drv && !try_module_get(cdev->drv->owner)) { - atomic_set(&cdev->private->onoff, 0); - return -EINVAL; + /* Prevent conflict between internal I/Os and on-/offline processing. */ + if (!dev_fsm_final_state(cdev) && + cdev->private->state != DEV_STATE_DISCONNECTED) { + ret = -EAGAIN; + goto out; + } + /* Prevent conflict between pending work and on-/offline processing.*/ + if (work_pending(&cdev->private->todo_work)) { + ret = -EAGAIN; + goto out; } if (!strncmp(buf, "force\n", count)) { force = 1; i = 1; + ret = 0; } else { force = 0; - i = simple_strtoul(buf, &tmp, 16); - } - if (i == 1) { - /* Do device recognition, if needed. */ - if (cdev->id.cu_type == 0) { - ret = ccw_device_recognition(cdev); - if (ret) { - printk(KERN_WARNING"Couldn't start recognition " - "for device %s (ret=%d)\n", - cdev->dev.bus_id, ret); - goto out; - } - wait_event(cdev->private->wait_q, - cdev->private->flags.recog_done); - } - if (cdev->drv && cdev->drv->set_online) - ccw_device_set_online(cdev); - } else if (i == 0) { - if (cdev->private->state == DEV_STATE_DISCONNECTED) - ccw_device_remove_disconnected(cdev); - else if (cdev->drv && cdev->drv->set_offline) - ccw_device_set_offline(cdev); + ret = kstrtoul(buf, 16, &i); } - if (force && cdev->private->state == DEV_STATE_BOXED) { - ret = ccw_device_stlck(cdev); - if (ret) { - printk(KERN_WARNING"ccw_device_stlck for device %s " - "returned %d!\n", cdev->dev.bus_id, ret); - goto out; - } - /* Do device recognition, if needed. */ - if (cdev->id.cu_type == 0) { - cdev->private->state = DEV_STATE_NOT_OPER; - ret = ccw_device_recognition(cdev); - if (ret) { - printk(KERN_WARNING"Couldn't start recognition " - "for device %s (ret=%d)\n", - cdev->dev.bus_id, ret); - goto out; - } - wait_event(cdev->private->wait_q, - cdev->private->flags.recog_done); - } - if (cdev->drv && cdev->drv->set_online) - ccw_device_set_online(cdev); + if (ret) + goto out; + + device_lock(dev); + switch (i) { + case 0: + ret = online_store_handle_offline(cdev); + break; + case 1: + ret = online_store_handle_online(cdev, force); + break; + default: + ret = -EINVAL; } - out: - if (cdev->drv) - module_put(cdev->drv->owner); + device_unlock(dev); + +out: atomic_set(&cdev->private->onoff, 0); - return count; + return (ret < 0) ? ret : count; } static ssize_t @@ -427,6 +591,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch; + if (ccw_device_is_orphan(cdev)) + return sprintf(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: return sprintf(buf, "boxed\n"); @@ -444,33 +610,59 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) } } +static ssize_t +initiate_logging(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct subchannel *sch = to_subchannel(dev); + int rc; + + rc = chsc_siosl(sch->schid); + if (rc < 0) { + pr_warning("Logging for subchannel 0.%x.%04x failed with " + "errno=%d\n", + sch->schid.ssid, sch->schid.sch_no, rc); + return rc; + } + pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", + sch->schid.ssid, sch->schid.sch_no); + return count; +} + +static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%02x\n", sch->vpm); +} + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); +static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); -extern struct device_attribute dev_attr_cmb_enable; static DEVICE_ATTR(availability, 0444, available_show, NULL); +static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); +static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); -static struct attribute * subch_attrs[] = { +static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, + &dev_attr_logging.attr, + &dev_attr_vpm.attr, NULL, }; -static struct attribute_group subch_attr_group = { - .attrs = subch_attrs, +static struct attribute_group io_subchannel_attr_group = { + .attrs = io_subchannel_attrs, }; -static inline int -subchannel_add_files (struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &subch_attr_group); -} - static struct attribute * ccwdev_attrs[] = { &dev_attr_devtype.attr, &dev_attr_cutype.attr, + &dev_attr_modalias.attr, &dev_attr_online.attr, &dev_attr_cmb_enable.attr, &dev_attr_availability.attr, @@ -481,221 +673,238 @@ static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; -static inline int -device_add_files (struct device *dev) +static const struct attribute_group *ccwdev_attr_groups[] = { + &ccwdev_attr_group, + NULL, +}; + +static int ccw_device_add(struct ccw_device *cdev) { - return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); + struct device *dev = &cdev->dev; + + dev->bus = &ccw_bus_type; + return device_add(dev); } -static inline void -device_remove_files(struct device *dev) +static int match_dev_id(struct device *dev, void *data) { - sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_dev_id *dev_id = data; + + return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); } -/* this is a simple abstraction for device_register that sets the - * correct bus type and adds the bus specific files */ -int -ccw_device_register(struct ccw_device *cdev) +/** + * get_ccwdev_by_dev_id() - obtain device from a ccw device id + * @dev_id: id of the device to be searched + * + * This function searches all devices attached to the ccw bus for a device + * matching @dev_id. + * Returns: + * If a device is found its reference count is increased and returned; + * else %NULL is returned. + */ +struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) { - struct device *dev = &cdev->dev; - int ret; + struct device *dev; - dev->bus = &ccw_bus_type; + dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); - if ((ret = device_add(dev))) - return ret; + return dev ? to_ccwdev(dev) : NULL; +} +EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); + +static void ccw_device_do_unbind_bind(struct ccw_device *cdev) +{ + int ret; - set_bit(1, &cdev->private->registered); - if ((ret = device_add_files(dev))) { - if (test_and_clear_bit(1, &cdev->private->registered)) - device_del(dev); + if (device_is_registered(&cdev->dev)) { + device_release_driver(&cdev->dev); + ret = device_attach(&cdev->dev); + WARN_ON(ret == -ENODEV); } - return ret; } -struct match_data { - unsigned int devno; - struct ccw_device * sibling; -}; - -static int -match_devno(struct device * dev, void * data) +static void +ccw_device_release(struct device *dev) { - struct match_data * d = (struct match_data *)data; - struct ccw_device * cdev; + struct ccw_device *cdev; cdev = to_ccwdev(dev); - if ((cdev->private->state == DEV_STATE_DISCONNECTED) && - (cdev->private->devno == d->devno) && - (cdev != d->sibling)) { - cdev->private->state = DEV_STATE_NOT_OPER; - return 1; - } - return 0; + /* Release reference of parent subchannel. */ + put_device(cdev->dev.parent); + kfree(cdev->private); + kfree(cdev); } -static struct ccw_device * -get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) +static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { - struct device *dev; - struct match_data data = { - .devno = devno, - .sibling = sibling, - }; - - dev = bus_find_device(&css_bus_type, NULL, &data, match_devno); + struct ccw_device *cdev; - return dev ? to_ccwdev(dev) : NULL; + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (cdev) { + cdev->private = kzalloc(sizeof(struct ccw_device_private), + GFP_KERNEL | GFP_DMA); + if (cdev->private) + return cdev; + } + kfree(cdev); + return ERR_PTR(-ENOMEM); } -static void -ccw_device_add_changed(void *data) +static void ccw_device_todo(struct work_struct *work); + +static int io_subchannel_initialize_dev(struct subchannel *sch, + struct ccw_device *cdev) { + struct ccw_device_private *priv = cdev->private; + int ret; - struct ccw_device *cdev; + priv->cdev = cdev; + priv->int_class = IRQIO_CIO; + priv->state = DEV_STATE_NOT_OPER; + priv->dev_id.devno = sch->schib.pmcw.dev; + priv->dev_id.ssid = sch->schid.ssid; + priv->schid = sch->schid; - cdev = (struct ccw_device *)data; - if (device_add(&cdev->dev)) { - put_device(&cdev->dev); - return; - } - set_bit(1, &cdev->private->registered); - if (device_add_files(&cdev->dev)) { - if (test_and_clear_bit(1, &cdev->private->registered)) - device_unregister(&cdev->dev); + INIT_WORK(&priv->todo_work, ccw_device_todo); + INIT_LIST_HEAD(&priv->cmb_list); + init_waitqueue_head(&priv->wait_q); + init_timer(&priv->timer); + + atomic_set(&priv->onoff, 0); + cdev->ccwlock = sch->lock; + cdev->dev.parent = &sch->dev; + cdev->dev.release = ccw_device_release; + cdev->dev.groups = ccwdev_attr_groups; + /* Do first half of device_register. */ + device_initialize(&cdev->dev); + ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + if (ret) + goto out_put; + if (!get_device(&sch->dev)) { + ret = -ENODEV; + goto out_put; } -} + priv->flags.initialized = 1; + spin_lock_irq(sch->lock); + sch_set_cdev(sch, cdev); + spin_unlock_irq(sch->lock); + return 0; -extern int css_get_ssd_info(struct subchannel *sch); +out_put: + /* Release reference from device_initialize(). */ + put_device(&cdev->dev); + return ret; +} -void -ccw_device_do_unreg_rereg(void *data) +static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) { struct ccw_device *cdev; - struct subchannel *sch; - int need_rename; + int ret; - cdev = (struct ccw_device *)data; - sch = to_subchannel(cdev->dev.parent); - if (cdev->private->devno != sch->schib.pmcw.dev) { - /* - * The device number has changed. This is usually only when - * a device has been detached under VM and then re-appeared - * on another subchannel because of a different attachment - * order than before. Ideally, we should should just switch - * subchannels, but unfortunately, this is not possible with - * the current implementation. - * Instead, we search for the old subchannel for this device - * number and deregister so there are no collisions with the - * newly registered ccw_device. - * FIXME: Find another solution so the block layer doesn't - * get possibly sick... - */ - struct ccw_device *other_cdev; - - need_rename = 1; - other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, - cdev); - if (other_cdev) { - struct subchannel *other_sch; - - other_sch = to_subchannel(other_cdev->dev.parent); - if (get_device(&other_sch->dev)) { - stsch(other_sch->irq, &other_sch->schib); - if (other_sch->schib.pmcw.dnv) { - other_sch->schib.pmcw.intparm = 0; - cio_modify(other_sch); - } - device_unregister(&other_sch->dev); - } - } - /* Update ssd info here. */ - css_get_ssd_info(sch); - cdev->private->devno = sch->schib.pmcw.dev; - } else - need_rename = 0; - device_remove_files(&cdev->dev); - if (test_and_clear_bit(1, &cdev->private->registered)) - device_del(&cdev->dev); - if (need_rename) - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_add_changed, (void *)cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); + cdev = io_subchannel_allocate_dev(sch); + if (!IS_ERR(cdev)) { + ret = io_subchannel_initialize_dev(sch, cdev); + if (ret) + cdev = ERR_PTR(ret); + } + return cdev; } -static void -ccw_device_release(struct device *dev) +static void io_subchannel_recog(struct ccw_device *, struct subchannel *); + +static void sch_create_and_recog_new_device(struct subchannel *sch) { struct ccw_device *cdev; - cdev = to_ccwdev(dev); - kfree(cdev->private); - kfree(cdev); + /* Need to allocate a new ccw device. */ + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + /* OK, we did everything we could... */ + css_sch_device_unregister(sch); + return; + } + /* Start recognition for the new ccw device. */ + io_subchannel_recog(cdev, sch); } /* * Register recognized device. */ -static void -io_subchannel_register(void *data) +static void io_subchannel_register(struct ccw_device *cdev) { - struct ccw_device *cdev; struct subchannel *sch; - int ret; + int ret, adjust_init_count = 1; unsigned long flags; - cdev = (struct ccw_device *) data; sch = to_subchannel(cdev->dev.parent); - - if (klist_node_attached(&cdev->dev.knode_parent)) { - bus_rescan_devices(&ccw_bus_type); + /* + * Check if subchannel is still registered. It may have become + * unregistered if a machine check hit us after finishing + * device recognition but before the register work could be + * queued. + */ + if (!device_is_registered(&sch->dev)) + goto out_err; + css_update_ssd_info(sch); + /* + * io_subchannel_register() will also be called after device + * recognition has been done for a boxed device (which will already + * be registered). We need to reprobe since we may now have sense id + * information. + */ + if (device_is_registered(&cdev->dev)) { + if (!cdev->drv) { + ret = device_reprobe(&cdev->dev); + if (ret) + /* We can't do much here. */ + CIO_MSG_EVENT(0, "device_reprobe() returned" + " %d for 0.%x.%04x\n", ret, + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + } + adjust_init_count = 0; goto out; } + /* + * Now we know this subchannel will stay, we can throw + * our delayed uevent. + */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ - ret = ccw_device_register(cdev); + ret = ccw_device_add(cdev); if (ret) { - printk (KERN_WARNING "%s: could not register %s\n", - __func__, cdev->dev.bus_id); + CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, ret); + spin_lock_irqsave(sch->lock, flags); + sch_set_cdev(sch, NULL); + spin_unlock_irqrestore(sch->lock, flags); + /* Release initial device reference. */ put_device(&cdev->dev); - spin_lock_irqsave(&sch->lock, flags); - sch->dev.driver_data = NULL; - spin_unlock_irqrestore(&sch->lock, flags); - kfree (cdev->private); - kfree (cdev); - put_device(&sch->dev); - if (atomic_dec_and_test(&ccw_device_init_count)) - wake_up(&ccw_device_init_wq); - return; + goto out_err; } - - ret = subchannel_add_files(cdev->dev.parent); - if (ret) - printk(KERN_WARNING "%s: could not add attributes to %s\n", - __func__, sch->dev.bus_id); - put_device(&cdev->dev); out: cdev->private->flags.recog_done = 1; - put_device(&sch->dev); wake_up(&cdev->private->wait_q); - if (atomic_dec_and_test(&ccw_device_init_count)) +out_err: + if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); } -void -ccw_device_call_sch_unregister(void *data) +static void ccw_device_call_sch_unregister(struct ccw_device *cdev) { - struct ccw_device *cdev = data; struct subchannel *sch; + /* Get subchannel reference for local processing. */ + if (!get_device(cdev->dev.parent)) + return; sch = to_subchannel(cdev->dev.parent); - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&cdev->dev); + css_sch_device_unregister(sch); + /* Release subchannel reference for local processing. */ put_device(&sch->dev); } @@ -705,318 +914,782 @@ ccw_device_call_sch_unregister(void *data) void io_subchannel_recog_done(struct ccw_device *cdev) { - struct subchannel *sch; - if (css_init_done == 0) { cdev->private->flags.recog_done = 1; return; } switch (cdev->private->state) { + case DEV_STATE_BOXED: + /* Device did not respond in time. */ case DEV_STATE_NOT_OPER: cdev->private->flags.recog_done = 1; /* Remove device found not operational. */ - if (!get_device(&cdev->dev)) - break; - sch = to_subchannel(cdev->dev.parent); - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, (void *) cdev); - queue_work(slow_path_wq, &cdev->private->kick_work); + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); break; - case DEV_STATE_BOXED: - /* Device did not respond in time. */ case DEV_STATE_OFFLINE: /* * We can't register the device in interrupt context so * we schedule a work item. */ - if (!get_device(&cdev->dev)) - break; - PREPARE_WORK(&cdev->private->kick_work, - io_subchannel_register, (void *) cdev); - queue_work(slow_path_wq, &cdev->private->kick_work); + ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); break; } } -static int -io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) +static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) { - int rc; - struct ccw_device_private *priv; - - sch->dev.driver_data = cdev; - sch->driver = &io_subchannel_driver; - cdev->ccwlock = &sch->lock; - /* Init private data. */ - priv = cdev->private; - priv->devno = sch->schib.pmcw.dev; - priv->irq = sch->irq; - priv->state = DEV_STATE_NOT_OPER; - INIT_LIST_HEAD(&priv->cmb_list); - init_waitqueue_head(&priv->wait_q); - init_timer(&priv->timer); - - /* Set an initial name for the device. */ - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); - /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ - spin_lock_irq(&sch->lock); - rc = ccw_device_recognition(cdev); - spin_unlock_irq(&sch->lock); - if (rc) { - if (atomic_dec_and_test(&ccw_device_init_count)) - wake_up(&ccw_device_init_wq); - } - return rc; + spin_lock_irq(sch->lock); + ccw_device_recognition(cdev); + spin_unlock_irq(sch->lock); } -static int -io_subchannel_probe (struct device *pdev) +static int ccw_device_move_to_sch(struct ccw_device *cdev, + struct subchannel *sch) { - struct subchannel *sch; - struct ccw_device *cdev; - int rc; - unsigned long flags; - - sch = to_subchannel(pdev); - if (sch->dev.driver_data) { - /* - * This subchannel already has an associated ccw_device. - * Register it and exit. This happens for all early - * device, e.g. the console. - */ - cdev = sch->dev.driver_data; - device_initialize(&cdev->dev); - ccw_device_register(cdev); - subchannel_add_files(&sch->dev); - /* - * Check if the device is already online. If it is - * the reference count needs to be corrected - * (see ccw_device_online and css_init_done for the - * ugly details). - */ - if (cdev->private->state != DEV_STATE_NOT_OPER && - cdev->private->state != DEV_STATE_OFFLINE && - cdev->private->state != DEV_STATE_BOXED) - get_device(&cdev->dev); - return 0; - } - cdev = kmalloc (sizeof(*cdev), GFP_KERNEL); - if (!cdev) - return -ENOMEM; - memset(cdev, 0, sizeof(struct ccw_device)); - cdev->private = kmalloc(sizeof(struct ccw_device_private), - GFP_KERNEL | GFP_DMA); - if (!cdev->private) { - kfree(cdev); - return -ENOMEM; - } - memset(cdev->private, 0, sizeof(struct ccw_device_private)); - atomic_set(&cdev->private->onoff, 0); - cdev->dev = (struct device) { - .parent = pdev, - .release = ccw_device_release, - }; - INIT_LIST_HEAD(&cdev->private->kick_work.entry); - /* Do first half of device_register. */ - device_initialize(&cdev->dev); + struct subchannel *old_sch; + int rc, old_enabled = 0; - if (!get_device(&sch->dev)) { - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); + old_sch = to_subchannel(cdev->dev.parent); + /* Obtain child reference for new parent. */ + if (!get_device(&sch->dev)) return -ENODEV; + + if (!sch_is_pseudo_sch(old_sch)) { + spin_lock_irq(old_sch->lock); + old_enabled = old_sch->schib.pmcw.ena; + rc = 0; + if (old_enabled) + rc = cio_disable_subchannel(old_sch); + spin_unlock_irq(old_sch->lock); + if (rc == -EBUSY) { + /* Release child reference for new parent. */ + put_device(&sch->dev); + return rc; + } } - rc = io_subchannel_recog(cdev, to_subchannel(pdev)); + mutex_lock(&sch->reg_mutex); + rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); + mutex_unlock(&sch->reg_mutex); if (rc) { - spin_lock_irqsave(&sch->lock, flags); - sch->dev.driver_data = NULL; - spin_unlock_irqrestore(&sch->lock, flags); - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); + CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, sch->schid.ssid, + sch->schib.pmcw.dev, rc); + if (old_enabled) { + /* Try to reenable the old subchannel. */ + spin_lock_irq(old_sch->lock); + cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); + spin_unlock_irq(old_sch->lock); + } + /* Release child reference for new parent. */ + put_device(&sch->dev); + return rc; + } + /* Clean up old subchannel. */ + if (!sch_is_pseudo_sch(old_sch)) { + spin_lock_irq(old_sch->lock); + sch_set_cdev(old_sch, NULL); + spin_unlock_irq(old_sch->lock); + css_schedule_eval(old_sch->schid); } + /* Release child reference for old parent. */ + put_device(&old_sch->dev); + /* Initialize new subchannel. */ + spin_lock_irq(sch->lock); + cdev->private->schid = sch->schid; + cdev->ccwlock = sch->lock; + if (!sch_is_pseudo_sch(sch)) + sch_set_cdev(sch, cdev); + spin_unlock_irq(sch->lock); + if (!sch_is_pseudo_sch(sch)) + css_update_ssd_info(sch); + return 0; +} - return rc; +static int ccw_device_move_to_orph(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct channel_subsystem *css = to_css(sch->dev.parent); + + return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); } -static void -ccw_device_unregister(void *data) +static void io_subchannel_irq(struct subchannel *sch) { struct ccw_device *cdev; - cdev = (struct ccw_device *)data; - if (test_and_clear_bit(1, &cdev->private->registered)) - device_unregister(&cdev->dev); - put_device(&cdev->dev); + cdev = sch_get_cdev(sch); + + CIO_TRACE_EVENT(6, "IRQ"); + CIO_TRACE_EVENT(6, dev_name(&sch->dev)); + if (cdev) + dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); + else + inc_irq_stat(IRQIO_CIO); } -static int -io_subchannel_remove (struct device *dev) +void io_subchannel_init_config(struct subchannel *sch) +{ + memset(&sch->config, 0, sizeof(sch->config)); + sch->config.csense = 1; +} + +static void io_subchannel_init_fields(struct subchannel *sch) +{ + if (cio_is_console(sch->schid)) + sch->opm = 0xff; + else + sch->opm = chp_get_sch_opm(sch); + sch->lpm = sch->schib.pmcw.pam & sch->opm; + sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; + + CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" + " - PIM = %02X, PAM = %02X, POM = %02X\n", + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, + sch->schib.pmcw.pam, sch->schib.pmcw.pom); + + io_subchannel_init_config(sch); +} + +/* + * Note: We always return 0 so that we bind to the device even on error. + * This is needed so that our remove function is called on unregister. + */ +static int io_subchannel_probe(struct subchannel *sch) { + struct io_subchannel_private *io_priv; struct ccw_device *cdev; - unsigned long flags; + int rc; - if (!dev->driver_data) + if (cio_is_console(sch->schid)) { + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + CIO_MSG_EVENT(0, "Failed to create io subchannel " + "attributes for subchannel " + "0.%x.%04x (rc=%d)\n", + sch->schid.ssid, sch->schid.sch_no, rc); + /* + * The console subchannel already has an associated ccw_device. + * Throw the delayed uevent for the subchannel, register + * the ccw_device and exit. + */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + cdev = sch_get_cdev(sch); + rc = ccw_device_add(cdev); + if (rc) { + /* Release online reference. */ + put_device(&cdev->dev); + goto out_schedule; + } + if (atomic_dec_and_test(&ccw_device_init_count)) + wake_up(&ccw_device_init_wq); return 0; - cdev = dev->driver_data; - /* Set ccw device to not operational and drop reference. */ - spin_lock_irqsave(cdev->ccwlock, flags); - dev->driver_data = NULL; - cdev->private->state = DEV_STATE_NOT_OPER; - spin_unlock_irqrestore(cdev->ccwlock, flags); - /* - * Put unregistration on workqueue to avoid livelocks on the css bus - * semaphore. - */ - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_unregister, (void *) cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); } + io_subchannel_init_fields(sch); + rc = cio_commit_config(sch); + if (rc) + goto out_schedule; + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + goto out_schedule; + /* Allocate I/O subchannel private data. */ + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) + goto out_schedule; + + set_io_private(sch, io_priv); + css_schedule_eval(sch->schid); + return 0; + +out_schedule: + spin_lock_irq(sch->lock); + css_sched_sch_todo(sch, SCH_TODO_UNREG); + spin_unlock_irq(sch->lock); return 0; } static int -io_subchannel_notify(struct device *dev, int event) +io_subchannel_remove (struct subchannel *sch) { + struct io_subchannel_private *io_priv = to_io_private(sch); struct ccw_device *cdev; - cdev = dev->driver_data; + cdev = sch_get_cdev(sch); if (!cdev) - return 0; - if (!cdev->drv) - return 0; - if (!cdev->online) - return 0; - return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; + goto out_free; + io_subchannel_quiesce(sch); + /* Set ccw device to not operational and drop reference. */ + spin_lock_irq(cdev->ccwlock); + sch_set_cdev(sch, NULL); + set_io_private(sch, NULL); + cdev->private->state = DEV_STATE_NOT_OPER; + spin_unlock_irq(cdev->ccwlock); + ccw_device_unregister(cdev); +out_free: + kfree(io_priv); + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); + return 0; } -static void -io_subchannel_verify(struct device *dev) +static void io_subchannel_verify(struct subchannel *sch) { struct ccw_device *cdev; - cdev = dev->driver_data; + cdev = sch_get_cdev(sch); if (cdev) dev_fsm_event(cdev, DEV_EVENT_VERIFY); } -static void -io_subchannel_ioterm(struct device *dev) +static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) { struct ccw_device *cdev; - cdev = dev->driver_data; + cdev = sch_get_cdev(sch); if (!cdev) return; - cdev->private->state = DEV_STATE_CLEAR_VERIFY; - if (cdev->handler) - cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + if (cio_update_schib(sch)) + goto err; + /* Check for I/O on path. */ + if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) + goto out; + if (cdev->private->state == DEV_STATE_ONLINE) { + ccw_device_kill_io(cdev); + goto out; + } + if (cio_clear(sch)) + goto err; +out: + /* Trigger path verification. */ + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + return; + +err: + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); } -static void -io_subchannel_shutdown(struct device *dev) +static int io_subchannel_chp_event(struct subchannel *sch, + struct chp_link *link, int event) +{ + struct ccw_device *cdev = sch_get_cdev(sch); + int mask; + + mask = chp_ssd_get_mask(&sch->ssd_info, link); + if (!mask) + return 0; + switch (event) { + case CHP_VARY_OFF: + sch->opm &= ~mask; + sch->lpm &= ~mask; + if (cdev) + cdev->private->path_gone_mask |= mask; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_VARY_ON: + sch->opm |= mask; + sch->lpm |= mask; + if (cdev) + cdev->private->path_new_mask |= mask; + io_subchannel_verify(sch); + break; + case CHP_OFFLINE: + if (cio_update_schib(sch)) + return -ENODEV; + if (cdev) + cdev->private->path_gone_mask |= mask; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_ONLINE: + if (cio_update_schib(sch)) + return -ENODEV; + sch->lpm |= mask & sch->opm; + if (cdev) + cdev->private->path_new_mask |= mask; + io_subchannel_verify(sch); + break; + } + return 0; +} + +static void io_subchannel_quiesce(struct subchannel *sch) { - struct subchannel *sch; struct ccw_device *cdev; int ret; - sch = to_subchannel(dev); - cdev = dev->driver_data; - - if (cio_is_console(sch->irq)) - return; + spin_lock_irq(sch->lock); + cdev = sch_get_cdev(sch); + if (cio_is_console(sch->schid)) + goto out_unlock; if (!sch->schib.pmcw.ena) - /* Nothing to do. */ - return; + goto out_unlock; ret = cio_disable_subchannel(sch); if (ret != -EBUSY) - /* Subchannel is disabled, we're done. */ - return; - cdev->private->state = DEV_STATE_QUIESCE; + goto out_unlock; if (cdev->handler) - cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); - ret = ccw_device_cancel_halt_clear(cdev); - if (ret == -EBUSY) { - ccw_device_set_timeout(cdev, HZ/10); - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); + cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); + while (ret == -EBUSY) { + cdev->private->state = DEV_STATE_QUIESCE; + cdev->private->iretry = 255; + ret = ccw_device_cancel_halt_clear(cdev); + if (ret == -EBUSY) { + ccw_device_set_timeout(cdev, HZ/10); + spin_unlock_irq(sch->lock); + wait_event(cdev->private->wait_q, + cdev->private->state != DEV_STATE_QUIESCE); + spin_lock_irq(sch->lock); + } + ret = cio_disable_subchannel(sch); + } +out_unlock: + spin_unlock_irq(sch->lock); +} + +static void io_subchannel_shutdown(struct subchannel *sch) +{ + io_subchannel_quiesce(sch); +} + +static int device_is_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return 0; + return (cdev->private->state == DEV_STATE_DISCONNECTED || + cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); +} + +static int recovery_check(struct device *dev, void *data) +{ + struct ccw_device *cdev = to_ccwdev(dev); + int *redo = data; + + spin_lock_irq(cdev->ccwlock); + switch (cdev->private->state) { + case DEV_STATE_DISCONNECTED: + CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + *redo = 1; + break; + case DEV_STATE_DISCONNECTED_SENSE_ID: + *redo = 1; + break; + } + spin_unlock_irq(cdev->ccwlock); + + return 0; +} + +static void recovery_work_func(struct work_struct *unused) +{ + int redo = 0; + + bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); + if (redo) { + spin_lock_irq(&recovery_lock); + if (!timer_pending(&recovery_timer)) { + if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) + recovery_phase++; + mod_timer(&recovery_timer, jiffies + + recovery_delay[recovery_phase] * HZ); + } + spin_unlock_irq(&recovery_lock); + } else + CIO_MSG_EVENT(4, "recovery: end\n"); +} + +static DECLARE_WORK(recovery_work, recovery_work_func); + +static void recovery_func(unsigned long data) +{ + /* + * We can't do our recovery in softirq context and it's not + * performance critical, so we schedule it. + */ + schedule_work(&recovery_work); +} + +static void ccw_device_schedule_recovery(void) +{ + unsigned long flags; + + CIO_MSG_EVENT(4, "recovery: schedule\n"); + spin_lock_irqsave(&recovery_lock, flags); + if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { + recovery_phase = 0; + mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); } + spin_unlock_irqrestore(&recovery_lock, flags); +} + +static int purge_fn(struct device *dev, void *data) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_dev_id *id = &cdev->private->dev_id; + + spin_lock_irq(cdev->ccwlock); + if (is_blacklisted(id->ssid, id->devno) && + (cdev->private->state == DEV_STATE_OFFLINE) && + (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, + id->devno); + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + atomic_set(&cdev->private->onoff, 0); + } + spin_unlock_irq(cdev->ccwlock); + /* Abort loop in case of pending signal. */ + if (signal_pending(current)) + return -EINTR; + + return 0; +} + +/** + * ccw_purge_blacklisted - purge unused, blacklisted devices + * + * Unregister all ccw devices that are offline and on the blacklist. + */ +int ccw_purge_blacklisted(void) +{ + CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); + bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + return 0; +} + +void ccw_device_set_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return; + ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; + cdev->private->state = DEV_STATE_DISCONNECTED; + if (cdev->online) + ccw_device_schedule_recovery(); +} + +void ccw_device_set_notoper(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + CIO_TRACE_EVENT(2, "notoper"); + CIO_TRACE_EVENT(2, dev_name(&sch->dev)); + ccw_device_set_timeout(cdev, 0); cio_disable_subchannel(sch); + cdev->private->state = DEV_STATE_NOT_OPER; } -#ifdef CONFIG_CCW_CONSOLE -static struct ccw_device console_cdev; -static struct ccw_device_private console_private; -static int console_cdev_in_use; +enum io_sch_action { + IO_SCH_UNREG, + IO_SCH_ORPH_UNREG, + IO_SCH_ATTACH, + IO_SCH_UNREG_ATTACH, + IO_SCH_ORPH_ATTACH, + IO_SCH_REPROBE, + IO_SCH_VERIFY, + IO_SCH_DISC, + IO_SCH_NOP, +}; -static int -ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) +static enum io_sch_action sch_get_action(struct subchannel *sch) +{ + struct ccw_device *cdev; + + cdev = sch_get_cdev(sch); + if (cio_update_schib(sch)) { + /* Not operational. */ + if (!cdev) + return IO_SCH_UNREG; + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + return IO_SCH_UNREG; + return IO_SCH_ORPH_UNREG; + } + /* Operational. */ + if (!cdev) + return IO_SCH_ATTACH; + if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + return IO_SCH_UNREG_ATTACH; + return IO_SCH_ORPH_ATTACH; + } + if ((sch->schib.pmcw.pam & sch->opm) == 0) { + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) + return IO_SCH_UNREG; + return IO_SCH_DISC; + } + if (device_is_disconnected(cdev)) + return IO_SCH_REPROBE; + if (cdev->online && !cdev->private->flags.resuming) + return IO_SCH_VERIFY; + if (cdev->private->state == DEV_STATE_NOT_OPER) + return IO_SCH_UNREG_ATTACH; + return IO_SCH_NOP; +} + +/** + * io_subchannel_sch_event - process subchannel event + * @sch: subchannel + * @process: non-zero if function is called in process context + * + * An unspecified event occurred for this subchannel. Adjust data according + * to the current operational state of the subchannel and device. Return + * zero when the event has been handled sufficiently or -EAGAIN when this + * function should be called again in process context. + */ +static int io_subchannel_sch_event(struct subchannel *sch, int process) +{ + unsigned long flags; + struct ccw_device *cdev; + struct ccw_dev_id dev_id; + enum io_sch_action action; + int rc = -EAGAIN; + + spin_lock_irqsave(sch->lock, flags); + if (!device_is_registered(&sch->dev)) + goto out_unlock; + if (work_pending(&sch->todo_work)) + goto out_unlock; + cdev = sch_get_cdev(sch); + if (cdev && work_pending(&cdev->private->todo_work)) + goto out_unlock; + action = sch_get_action(sch); + CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", + sch->schid.ssid, sch->schid.sch_no, process, + action); + /* Perform immediate actions while holding the lock. */ + switch (action) { + case IO_SCH_REPROBE: + /* Trigger device recognition. */ + ccw_device_trigger_reprobe(cdev); + rc = 0; + goto out_unlock; + case IO_SCH_VERIFY: + /* Trigger path verification. */ + io_subchannel_verify(sch); + rc = 0; + goto out_unlock; + case IO_SCH_DISC: + ccw_device_set_disconnected(cdev); + rc = 0; + goto out_unlock; + case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_ATTACH: + ccw_device_set_disconnected(cdev); + break; + case IO_SCH_UNREG_ATTACH: + case IO_SCH_UNREG: + if (!cdev) + break; + if (cdev->private->state == DEV_STATE_SENSE_ID) { + /* + * Note: delayed work triggered by this event + * and repeated calls to sch_event are synchronized + * by the above check for work_pending(cdev). + */ + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); + } else + ccw_device_set_notoper(cdev); + break; + case IO_SCH_NOP: + rc = 0; + goto out_unlock; + default: + break; + } + spin_unlock_irqrestore(sch->lock, flags); + /* All other actions require process context. */ + if (!process) + goto out; + /* Handle attached ccw device. */ + switch (action) { + case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_ATTACH: + /* Move ccw device to orphanage. */ + rc = ccw_device_move_to_orph(cdev); + if (rc) + goto out; + break; + case IO_SCH_UNREG_ATTACH: + spin_lock_irqsave(sch->lock, flags); + if (cdev->private->flags.resuming) { + /* Device will be handled later. */ + rc = 0; + goto out_unlock; + } + sch_set_cdev(sch, NULL); + spin_unlock_irqrestore(sch->lock, flags); + /* Unregister ccw device. */ + ccw_device_unregister(cdev); + break; + default: + break; + } + /* Handle subchannel. */ + switch (action) { + case IO_SCH_ORPH_UNREG: + case IO_SCH_UNREG: + if (!cdev || !cdev->private->flags.resuming) + css_sch_device_unregister(sch); + break; + case IO_SCH_ORPH_ATTACH: + case IO_SCH_UNREG_ATTACH: + case IO_SCH_ATTACH: + dev_id.ssid = sch->schid.ssid; + dev_id.devno = sch->schib.pmcw.dev; + cdev = get_ccwdev_by_dev_id(&dev_id); + if (!cdev) { + sch_create_and_recog_new_device(sch); + break; + } + rc = ccw_device_move_to_sch(cdev, sch); + if (rc) { + /* Release reference from get_ccwdev_by_dev_id() */ + put_device(&cdev->dev); + goto out; + } + spin_lock_irqsave(sch->lock, flags); + ccw_device_trigger_reprobe(cdev); + spin_unlock_irqrestore(sch->lock, flags); + /* Release reference from get_ccwdev_by_dev_id() */ + put_device(&cdev->dev); + break; + default: + break; + } + return 0; + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); +out: + return rc; +} + +static void ccw_device_set_int_class(struct ccw_device *cdev) +{ + struct ccw_driver *cdrv = cdev->drv; + + /* Note: we interpret class 0 in this context as an uninitialized + * field since it translates to a non-I/O interrupt class. */ + if (cdrv->int_class != 0) + cdev->private->int_class = cdrv->int_class; + else + cdev->private->int_class = IRQIO_CIO; +} + +#ifdef CONFIG_CCW_CONSOLE +int __init ccw_device_enable_console(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); int rc; - /* Initialize the ccw_device structure. */ - cdev->dev = (struct device) { - .parent = &sch->dev, - }; - /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; - sch->dev.bus = &css_bus_type; + if (!cdev->drv || !cdev->handler) + return -EINVAL; - rc = io_subchannel_recog(cdev, sch); + io_subchannel_init_fields(sch); + rc = cio_commit_config(sch); if (rc) return rc; - + sch->driver = &io_subchannel_driver; + io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */ spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - rc = -EIO; - if (cdev->private->state != DEV_STATE_OFFLINE) + ccw_device_wait_idle(cdev); + + /* Hold on to an extra reference while device is online. */ + get_device(&cdev->dev); + rc = ccw_device_online(cdev); + if (rc) goto out_unlock; - ccw_device_online(cdev); + while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - if (cdev->private->state != DEV_STATE_ONLINE) - goto out_unlock; - rc = 0; + ccw_device_wait_idle(cdev); + + if (cdev->private->state == DEV_STATE_ONLINE) + cdev->online = 1; + else + rc = -EIO; out_unlock: spin_unlock_irq(cdev->ccwlock); - return 0; + if (rc) /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); + return rc; } -struct ccw_device * -ccw_device_probe_console(void) +struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) { + struct io_subchannel_private *io_priv; + struct ccw_device *cdev; struct subchannel *sch; - int ret; - if (xchg(&console_cdev_in_use, 1) != 0) - return NULL; sch = cio_probe_console(); - if (IS_ERR(sch)) { - console_cdev_in_use = 0; - return (void *) sch; + if (IS_ERR(sch)) + return ERR_CAST(sch); + + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) { + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } - memset(&console_cdev, 0, sizeof(struct ccw_device)); - memset(&console_private, 0, sizeof(struct ccw_device_private)); - console_cdev.private = &console_private; - ret = ccw_device_console_enable(&console_cdev, sch); - if (ret) { - cio_release_console(); - console_cdev_in_use = 0; - return ERR_PTR(ret); + set_io_private(sch, io_priv); + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + put_device(&sch->dev); + kfree(io_priv); + return cdev; + } + cdev->drv = drv; + ccw_device_set_int_class(cdev); + return cdev; +} + +void __init ccw_device_destroy_console(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct io_subchannel_private *io_priv = to_io_private(sch); + + set_io_private(sch, NULL); + put_device(&sch->dev); + put_device(&cdev->dev); + kfree(io_priv); +} + +/** + * ccw_device_wait_idle() - busy wait for device to become idle + * @cdev: ccw device + * + * Poll until activity control is zero, that is, no function or data + * transfer is pending/active. + * Called with device lock being held. + */ +void ccw_device_wait_idle(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + while (1) { + cio_tsch(sch); + if (sch->schib.scsw.cmd.actl == 0) + break; + udelay_simple(100); } - console_cdev.online = 1; - return &console_cdev; } + +static int ccw_device_pm_restore(struct device *dev); + +int ccw_device_force_console(struct ccw_device *cdev) +{ + return ccw_device_pm_restore(&cdev->dev); +} +EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif /* @@ -1027,27 +1700,32 @@ __ccwdev_check_busid(struct device *dev, void *id) { char *bus_id; - bus_id = (char *)id; + bus_id = id; - return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); + return (strcmp(bus_id, dev_name(dev)) == 0); } -struct ccw_device * -get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) +/** + * get_ccwdev_by_busid() - obtain device from a bus id + * @cdrv: driver the device is owned by + * @bus_id: bus id of the device to be searched + * + * This function searches all devices owned by @cdrv for a device with a bus + * id matching @bus_id. + * Returns: + * If a match is found, its reference count of the found device is increased + * and it is returned; else %NULL is returned. + */ +struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, + const char *bus_id) { struct device *dev; - struct device_driver *drv; - - drv = get_driver(&cdrv->driver); - if (!drv) - return NULL; - dev = driver_find_device(drv, NULL, (void *)bus_id, + dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, __ccwdev_check_busid); - put_driver(drv); - return dev ? to_ccwdev(dev) : 0; + return dev ? to_ccwdev(dev) : NULL; } /************************** device driver handling ************************/ @@ -1068,70 +1746,416 @@ ccw_device_probe (struct device *dev) int ret; cdev->drv = cdrv; /* to let the driver call _set_online */ - + ccw_device_set_int_class(cdev); ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; - if (ret) { - cdev->drv = 0; + cdev->drv = NULL; + cdev->private->int_class = IRQIO_CIO; return ret; } return 0; } -static int -ccw_device_remove (struct device *dev) +static int ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; int ret; - pr_debug("removing device %s\n", cdev->dev.bus_id); if (cdrv->remove) cdrv->remove(cdev); + + spin_lock_irq(cdev->ccwlock); if (cdev->online) { cdev->online = 0; - spin_lock_irq(cdev->ccwlock); ret = ccw_device_offline(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else - //FIXME: we can't fail! - pr_debug("ccw_device_offline returned %d, device %s\n", - ret, cdev->dev.bus_id); + CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " + "device 0.%x.%04x\n", + ret, cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + /* Give up reference obtained in ccw_device_set_online(). */ + put_device(&cdev->dev); + spin_lock_irq(cdev->ccwlock); } ccw_device_set_timeout(cdev, 0); - cdev->drv = 0; + cdev->drv = NULL; + cdev->private->int_class = IRQIO_CIO; + spin_unlock_irq(cdev->ccwlock); + return 0; +} + +static void ccw_device_shutdown(struct device *dev) +{ + struct ccw_device *cdev; + + cdev = to_ccwdev(dev); + if (cdev->drv && cdev->drv->shutdown) + cdev->drv->shutdown(cdev); + disable_cmf(cdev); +} + +static int ccw_device_pm_prepare(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + + if (work_pending(&cdev->private->todo_work)) + return -EAGAIN; + /* Fail while device is being set online/offline. */ + if (atomic_read(&cdev->private->onoff)) + return -EAGAIN; + + if (cdev->online && cdev->drv && cdev->drv->prepare) + return cdev->drv->prepare(cdev); + return 0; } -int -ccw_driver_register (struct ccw_driver *cdriver) +static void ccw_device_pm_complete(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + + if (cdev->online && cdev->drv && cdev->drv->complete) + cdev->drv->complete(cdev); +} + +static int ccw_device_pm_freeze(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret, cm_enabled; + + /* Fail suspend while device is in transistional state. */ + if (!dev_fsm_final_state(cdev)) + return -EAGAIN; + if (!cdev->online) + return 0; + if (cdev->drv && cdev->drv->freeze) { + ret = cdev->drv->freeze(cdev); + if (ret) + return ret; + } + + spin_lock_irq(sch->lock); + cm_enabled = cdev->private->cmb != NULL; + spin_unlock_irq(sch->lock); + if (cm_enabled) { + /* Don't have the css write on memory. */ + ret = ccw_set_cmf(cdev, 0); + if (ret) + return ret; + } + /* From here on, disallow device driver I/O. */ + spin_lock_irq(sch->lock); + ret = cio_disable_subchannel(sch); + spin_unlock_irq(sch->lock); + + return ret; +} + +static int ccw_device_pm_thaw(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int ret, cm_enabled; + + if (!cdev->online) + return 0; + + spin_lock_irq(sch->lock); + /* Allow device driver I/O again. */ + ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); + cm_enabled = cdev->private->cmb != NULL; + spin_unlock_irq(sch->lock); + if (ret) + return ret; + + if (cm_enabled) { + ret = ccw_set_cmf(cdev, 1); + if (ret) + return ret; + } + + if (cdev->drv && cdev->drv->thaw) + ret = cdev->drv->thaw(cdev); + + return ret; +} + +static void __ccw_device_pm_restore(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + spin_lock_irq(sch->lock); + if (cio_is_console(sch->schid)) { + cio_enable_subchannel(sch, (u32)(addr_t)sch); + goto out_unlock; + } + /* + * While we were sleeping, devices may have gone or become + * available again. Kick re-detection. + */ + cdev->private->flags.resuming = 1; + cdev->private->path_new_mask = LPM_ANYPATH; + css_sched_sch_todo(sch, SCH_TODO_EVAL); + spin_unlock_irq(sch->lock); + css_wait_for_slow_path(); + + /* cdev may have been moved to a different subchannel. */ + sch = to_subchannel(cdev->dev.parent); + spin_lock_irq(sch->lock); + if (cdev->private->state != DEV_STATE_ONLINE && + cdev->private->state != DEV_STATE_OFFLINE) + goto out_unlock; + + ccw_device_recognition(cdev); + spin_unlock_irq(sch->lock); + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || + cdev->private->state == DEV_STATE_DISCONNECTED); + spin_lock_irq(sch->lock); + +out_unlock: + cdev->private->flags.resuming = 0; + spin_unlock_irq(sch->lock); +} + +static int resume_handle_boxed(struct ccw_device *cdev) +{ + cdev->private->state = DEV_STATE_BOXED; + if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) + return 0; + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + return -ENODEV; +} + +static int resume_handle_disc(struct ccw_device *cdev) +{ + cdev->private->state = DEV_STATE_DISCONNECTED; + if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) + return 0; + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + return -ENODEV; +} + +static int ccw_device_pm_restore(struct device *dev) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch; + int ret = 0; + + __ccw_device_pm_restore(cdev); + sch = to_subchannel(cdev->dev.parent); + spin_lock_irq(sch->lock); + if (cio_is_console(sch->schid)) + goto out_restore; + + /* check recognition results */ + switch (cdev->private->state) { + case DEV_STATE_OFFLINE: + case DEV_STATE_ONLINE: + cdev->private->flags.donotify = 0; + break; + case DEV_STATE_BOXED: + ret = resume_handle_boxed(cdev); + if (ret) + goto out_unlock; + goto out_restore; + default: + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; + } + /* check if the device type has changed */ + if (!ccw_device_test_sense_data(cdev)) { + ccw_device_update_sense_data(cdev); + ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); + ret = -ENODEV; + goto out_unlock; + } + if (!cdev->online) + goto out_unlock; + + if (ccw_device_online(cdev)) { + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; + } + spin_unlock_irq(sch->lock); + wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); + spin_lock_irq(sch->lock); + + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + ret = -ENODEV; + goto out_unlock; + } + + /* reenable cmf, if needed */ + if (cdev->private->cmb) { + spin_unlock_irq(sch->lock); + ret = ccw_set_cmf(cdev, 1); + spin_lock_irq(sch->lock); + if (ret) { + CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " + "(rc=%d)\n", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, ret); + ret = 0; + } + } + +out_restore: + spin_unlock_irq(sch->lock); + if (cdev->online && cdev->drv && cdev->drv->restore) + ret = cdev->drv->restore(cdev); + return ret; + +out_unlock: + spin_unlock_irq(sch->lock); + return ret; +} + +static const struct dev_pm_ops ccw_pm_ops = { + .prepare = ccw_device_pm_prepare, + .complete = ccw_device_pm_complete, + .freeze = ccw_device_pm_freeze, + .thaw = ccw_device_pm_thaw, + .restore = ccw_device_pm_restore, +}; + +static struct bus_type ccw_bus_type = { + .name = "ccw", + .match = ccw_bus_match, + .uevent = ccw_uevent, + .probe = ccw_device_probe, + .remove = ccw_device_remove, + .shutdown = ccw_device_shutdown, + .pm = &ccw_pm_ops, +}; + +/** + * ccw_driver_register() - register a ccw driver + * @cdriver: driver to be registered + * + * This function is mainly a wrapper around driver_register(). + * Returns: + * %0 on success and a negative error value on failure. + */ +int ccw_driver_register(struct ccw_driver *cdriver) { struct device_driver *drv = &cdriver->driver; drv->bus = &ccw_bus_type; - drv->name = cdriver->name; - drv->probe = ccw_device_probe; - drv->remove = ccw_device_remove; return driver_register(drv); } -void -ccw_driver_unregister (struct ccw_driver *cdriver) +/** + * ccw_driver_unregister() - deregister a ccw driver + * @cdriver: driver to be deregistered + * + * This function is mainly a wrapper around driver_unregister(). + */ +void ccw_driver_unregister(struct ccw_driver *cdriver) { driver_unregister(&cdriver->driver); } +static void ccw_device_todo(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + struct subchannel *sch; + enum cdev_todo todo; + + priv = container_of(work, struct ccw_device_private, todo_work); + cdev = priv->cdev; + sch = to_subchannel(cdev->dev.parent); + /* Find out todo. */ + spin_lock_irq(cdev->ccwlock); + todo = priv->todo; + priv->todo = CDEV_TODO_NOTHING; + CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", + priv->dev_id.ssid, priv->dev_id.devno, todo); + spin_unlock_irq(cdev->ccwlock); + /* Perform todo. */ + switch (todo) { + case CDEV_TODO_ENABLE_CMF: + cmf_reenable(cdev); + break; + case CDEV_TODO_REBIND: + ccw_device_do_unbind_bind(cdev); + break; + case CDEV_TODO_REGISTER: + io_subchannel_register(cdev); + break; + case CDEV_TODO_UNREG_EVAL: + if (!sch_is_pseudo_sch(sch)) + css_schedule_eval(sch->schid); + /* fall-through */ + case CDEV_TODO_UNREG: + if (sch_is_pseudo_sch(sch)) + ccw_device_unregister(cdev); + else + ccw_device_call_sch_unregister(cdev); + break; + default: + break; + } + /* Release workqueue ref. */ + put_device(&cdev->dev); +} + +/** + * ccw_device_sched_todo - schedule ccw device operation + * @cdev: ccw device + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with ccwdev lock held. + */ +void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) +{ + CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", + cdev->private->dev_id.ssid, cdev->private->dev_id.devno, + todo); + if (cdev->private->todo >= todo) + return; + cdev->private->todo = todo; + /* Get workqueue ref. */ + if (!get_device(&cdev->dev)) + return; + if (!queue_work(cio_work_q, &cdev->private->todo_work)) { + /* Already queued, release workqueue ref. */ + put_device(&cdev->dev); + } +} + +/** + * ccw_device_siosl() - initiate logging + * @cdev: ccw device + * + * This function is used to invoke model-dependent logging within the channel + * subsystem. + */ +int ccw_device_siosl(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + return chsc_siosl(sch->schid); +} +EXPORT_SYMBOL_GPL(ccw_device_siosl); + MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); EXPORT_SYMBOL(ccw_driver_register); EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(get_ccwdev_by_busid); -EXPORT_SYMBOL(ccw_bus_type); -EXPORT_SYMBOL(ccw_device_work); -EXPORT_SYMBOL(ccw_device_notify_work); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index a3aa056d724..8d1d2987317 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -1,6 +1,13 @@ #ifndef S390_DEVICE_H #define S390_DEVICE_H +#include <asm/ccwdev.h> +#include <linux/atomic.h> +#include <linux/wait.h> +#include <linux/notifier.h> +#include <linux/kernel_stat.h> +#include "io_sch.h" + /* * states of the device statemachine */ @@ -15,14 +22,14 @@ enum dev_state { DEV_STATE_DISBAND_PGID, DEV_STATE_BOXED, /* states to wait for i/o completion before doing something */ - DEV_STATE_CLEAR_VERIFY, DEV_STATE_TIMEOUT_KILL, - DEV_STATE_WAIT4IO, DEV_STATE_QUIESCE, /* special states for devices gone not operational */ DEV_STATE_DISCONNECTED, DEV_STATE_DISCONNECTED_SENSE_ID, DEV_STATE_CMFCHANGE, + DEV_STATE_CMFUPDATE, + DEV_STATE_STEAL_LOCK, /* last element! */ NR_DEV_STATES }; @@ -50,7 +57,16 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS]; static inline void dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) { - dev_jumptable[cdev->private->state][dev_event](cdev, dev_event); + int state = cdev->private->state; + + if (dev_event == DEV_EVENT_INTERRUPT) { + if (state == DEV_STATE_ONLINE) + inc_irq_stat(cdev->private->int_class); + else if (state != DEV_STATE_CMFCHANGE && + state != DEV_STATE_CMFUPDATE) + inc_irq_stat(IRQIO_CIO); + } + dev_jumptable[state][dev_event](cdev, dev_event); } /* @@ -65,20 +81,24 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern struct workqueue_struct *ccw_device_work; -extern struct workqueue_struct *ccw_device_notify_work; +int __init io_subchannel_init(void); void io_subchannel_recog_done(struct ccw_device *cdev); +void io_subchannel_init_config(struct subchannel *sch); int ccw_device_cancel_halt_clear(struct ccw_device *); -int ccw_device_register(struct ccw_device *); -void ccw_device_do_unreg_rereg(void *); -void ccw_device_call_sch_unregister(void *); +int ccw_device_is_orphan(struct ccw_device *); -int ccw_device_recognition(struct ccw_device *); +void ccw_device_recognition(struct ccw_device *); int ccw_device_online(struct ccw_device *); int ccw_device_offline(struct ccw_device *); +void ccw_device_update_sense_data(struct ccw_device *); +int ccw_device_test_sense_data(struct ccw_device *); +void ccw_device_schedule_sch_unregister(struct ccw_device *); +int ccw_purge_blacklisted(void); +void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); +struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id); /* Function prototypes for device status and basic sense stuff. */ void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); @@ -86,30 +106,45 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); int ccw_device_do_sense(struct ccw_device *, struct irb *); +/* Function prototype for internal request handling. */ +int lpm_adjust(int lpm, int mask); +void ccw_request_start(struct ccw_device *); +int ccw_request_cancel(struct ccw_device *cdev); +void ccw_request_handler(struct ccw_device *cdev); +void ccw_request_timeout(struct ccw_device *cdev); +void ccw_request_notoper(struct ccw_device *cdev); + /* Function prototypes for sense id stuff. */ void ccw_device_sense_id_start(struct ccw_device *); -void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event); void ccw_device_sense_id_done(struct ccw_device *, int); /* Function prototypes for path grouping stuff. */ -void ccw_device_sense_pgid_start(struct ccw_device *); -void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event); -void ccw_device_sense_pgid_done(struct ccw_device *, int); - void ccw_device_verify_start(struct ccw_device *); -void ccw_device_verify_irq(struct ccw_device *, enum dev_event); void ccw_device_verify_done(struct ccw_device *, int); void ccw_device_disband_start(struct ccw_device *); -void ccw_device_disband_irq(struct ccw_device *, enum dev_event); void ccw_device_disband_done(struct ccw_device *, int); +void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *); +void ccw_device_stlck_done(struct ccw_device *, void *, int); + int ccw_device_call_handler(struct ccw_device *); int ccw_device_stlck(struct ccw_device *); -/* qdio needs this. */ +/* Helper function for machine check handling. */ +void ccw_device_trigger_reprobe(struct ccw_device *); +void ccw_device_kill_io(struct ccw_device *); +int ccw_device_notify(struct ccw_device *, int); +void ccw_device_set_disconnected(struct ccw_device *cdev); +void ccw_device_set_notoper(struct ccw_device *cdev); + void ccw_device_set_timeout(struct ccw_device *, int); +/* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); +void cmf_retry_copy_block(struct ccw_device *); +int cmf_reenable(struct ccw_device *); +int ccw_set_cmf(struct ccw_device *cdev, int enable); +extern struct device_attribute dev_attr_cmb_enable; #endif diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 9b7f6f548b1..0bc902b3cd8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -1,19 +1,19 @@ /* - * drivers/s390/cio/device_fsm.c * finite state machine for device handling * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Cornelia Huck(cohuck@de.ibm.com) + * Copyright IBM Corp. 2002, 2008 + * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/string.h> #include <asm/ccwdev.h> -#include <asm/qdio.h> +#include <asm/cio.h> +#include <asm/chpid.h> #include "cio.h" #include "cio_debug.h" @@ -21,54 +21,71 @@ #include "device.h" #include "chsc.h" #include "ioasm.h" -#include "qdio.h" +#include "chp.h" -int -device_is_online(struct subchannel *sch) -{ - struct ccw_device *cdev; - - if (!sch->dev.driver_data) - return 0; - cdev = sch->dev.driver_data; - return (cdev->private->state == DEV_STATE_ONLINE); -} +static int timeout_log_enabled; -int -device_is_disconnected(struct subchannel *sch) +static int __init ccw_timeout_log_setup(char *unused) { - struct ccw_device *cdev; - - if (!sch->dev.driver_data) - return 0; - cdev = sch->dev.driver_data; - return (cdev->private->state == DEV_STATE_DISCONNECTED || - cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); + timeout_log_enabled = 1; + return 1; } -void -device_set_disconnected(struct subchannel *sch) -{ - struct ccw_device *cdev; - - if (!sch->dev.driver_data) - return; - cdev = sch->dev.driver_data; - ccw_device_set_timeout(cdev, 0); - cdev->private->flags.fake_irb = 0; - cdev->private->state = DEV_STATE_DISCONNECTED; -} +__setup("ccw_timeout_log", ccw_timeout_log_setup); -void -device_set_waiting(struct subchannel *sch) +static void ccw_timeout_log(struct ccw_device *cdev) { - struct ccw_device *cdev; + struct schib schib; + struct subchannel *sch; + struct io_subchannel_private *private; + union orb *orb; + int cc; - if (!sch->dev.driver_data) - return; - cdev = sch->dev.driver_data; - ccw_device_set_timeout(cdev, 10*HZ); - cdev->private->state = DEV_STATE_WAIT4IO; + sch = to_subchannel(cdev->dev.parent); + private = to_io_private(sch); + orb = &private->orb; + cc = stsch_err(sch->schid, &schib); + + printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " + "device information:\n", get_tod_clock()); + printk(KERN_WARNING "cio: orb:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + orb, sizeof(*orb), 0); + printk(KERN_WARNING "cio: ccw device bus id: %s\n", + dev_name(&cdev->dev)); + printk(KERN_WARNING "cio: subchannel bus id: %s\n", + dev_name(&sch->dev)); + printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " + "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); + + if (orb->tm.b) { + printk(KERN_WARNING "cio: orb indicates transport mode\n"); + printk(KERN_WARNING "cio: last tcw:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->tm.tcw, + sizeof(struct tcw), 0); + } else { + printk(KERN_WARNING "cio: orb indicates command mode\n"); + if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || + (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) + printk(KERN_WARNING "cio: last channel program " + "(intern):\n"); + else + printk(KERN_WARNING "cio: last channel program:\n"); + + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->cmd.cpa, + sizeof(struct ccw1), 0); + } + printk(KERN_WARNING "cio: ccw device state: %d\n", + cdev->private->state); + printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); + printk(KERN_WARNING "cio: schib:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + &schib, sizeof(schib), 0); + printk(KERN_WARNING "cio: ccw device flags:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + &cdev->private->flags, sizeof(cdev->private->flags), 0); } /* @@ -81,6 +98,8 @@ ccw_device_timeout(unsigned long data) cdev = (struct ccw_device *) data; spin_lock_irq(cdev->ccwlock); + if (timeout_log_enabled) + ccw_timeout_log(cdev); dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); spin_unlock_irq(cdev->ccwlock); } @@ -105,18 +124,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) add_timer(&cdev->private->timer); } -/* Kill any pending timers after machine check. */ -void -device_kill_pending_timer(struct subchannel *sch) -{ - struct ccw_device *cdev; - - if (!sch->dev.driver_data) - return; - cdev = sch->dev.driver_data; - ccw_device_set_timeout(cdev, 0); -} - /* * Cancel running i/o. This is called repeatedly since halt/clear are * asynchronous operations. We do one try with cio_cancel, two tries @@ -132,27 +139,30 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - ret = stsch(sch->irq, &sch->schib); - if (ret || !sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) return -ENODEV; - if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) - /* Not operational or no activity -> done. */ + if (!sch->schib.pmcw.ena) + /* Not operational -> done. */ return 0; /* Stage 1: cancel io. */ - if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && - !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { - ret = cio_cancel(sch); - if (ret != -EINVAL) - return ret; - /* cancel io unsuccessful. From now on it is asynchronous. */ + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { + if (!scsw_is_tm(&sch->schib.scsw)) { + ret = cio_cancel(sch); + if (ret != -EINVAL) + return ret; + } + /* cancel io unsuccessful or not applicable (transport mode). + * Continue with asynchronous instructions. */ cdev->private->iretry = 3; /* 3 halt retries. */ } - if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { /* Stage 2: halt io. */ if (cdev->private->iretry) { cdev->private->iretry--; ret = cio_halt(sch); - return (ret == 0) ? -EBUSY : ret; + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; } /* halt io unsuccessful. */ cdev->private->iretry = 255; /* 255 clear retries. */ @@ -163,33 +173,27 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) ret = cio_clear (sch); return (ret == 0) ? -EBUSY : ret; } - panic("Can't stop i/o on subchannel.\n"); + /* Function was unsuccessful */ + CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", + cdev->private->dev_id.ssid, cdev->private->dev_id.devno); + return -EIO; } -static int -ccw_device_handle_oper(struct ccw_device *cdev) +void ccw_device_update_sense_data(struct ccw_device *cdev) { - struct subchannel *sch; + memset(&cdev->id, 0, sizeof(cdev->id)); + cdev->id.cu_type = cdev->private->senseid.cu_type; + cdev->id.cu_model = cdev->private->senseid.cu_model; + cdev->id.dev_type = cdev->private->senseid.dev_type; + cdev->id.dev_model = cdev->private->senseid.dev_model; +} - sch = to_subchannel(cdev->dev.parent); - cdev->private->flags.recog_done = 1; - /* - * Check if cu type and device type still match. If - * not, it is certainly another device and we have to - * de- and re-register. Also check here for non-matching devno. - */ - if (cdev->id.cu_type != cdev->private->senseid.cu_type || - cdev->id.cu_model != cdev->private->senseid.cu_model || - cdev->id.dev_type != cdev->private->senseid.dev_type || - cdev->id.dev_model != cdev->private->senseid.dev_model || - cdev->private->devno != sch->schib.pmcw.dev) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_do_unreg_rereg, (void *)cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); - return 0; - } - cdev->private->flags.donotify = 1; - return 1; +int ccw_device_test_sense_data(struct ccw_device *cdev) +{ + return cdev->id.cu_type == cdev->private->senseid.cu_type && + cdev->id.cu_model == cdev->private->senseid.cu_model && + cdev->id.dev_type == cdev->private->senseid.dev_type && + cdev->id.dev_model == cdev->private->senseid.dev_model; } /* @@ -197,18 +201,22 @@ ccw_device_handle_oper(struct ccw_device *cdev) * been varied online on the SE so we have to find out by magic (i. e. driving * the channel subsystem to device selection and updating our path masks). */ -static inline void +static void __recover_lost_chpids(struct subchannel *sch, int old_lpm) { int mask, i; + struct chp_id chpid; + chp_id_init(&chpid); for (i = 0; i<8; i++) { mask = 0x80 >> i; if (!(sch->lpm & mask)) continue; if (old_lpm & mask) continue; - chpid_is_actually_online(sch->schib.pmcw.chpid[i]); + chpid.id = sch->schib.pmcw.chpid[i]; + if (!chp_is_registered(chpid)) + css_schedule_eval_all(); } } @@ -219,81 +227,73 @@ static void ccw_device_recog_done(struct ccw_device *cdev, int state) { struct subchannel *sch; - int notify, old_lpm, same_dev; + int old_lpm; sch = to_subchannel(cdev->dev.parent); - ccw_device_set_timeout(cdev, 0); - cio_disable_subchannel(sch); + if (cio_disable_subchannel(sch)) + state = DEV_STATE_NOT_OPER; /* * Now that we tried recognition, we have performed device selection * through ssch() and the path information is up to date. */ old_lpm = sch->lpm; - stsch(sch->irq, &sch->schib); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + + /* Check since device may again have become not operational. */ + if (cio_update_schib(sch)) + state = DEV_STATE_NOT_OPER; + else + sch->lpm = sch->schib.pmcw.pam & sch->opm; + if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) /* Force reprobe on all chpids. */ old_lpm = 0; if (sch->lpm != old_lpm) __recover_lost_chpids(sch, old_lpm); - if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { - if (state == DEV_STATE_NOT_OPER) { - cdev->private->flags.recog_done = 1; - cdev->private->state = DEV_STATE_DISCONNECTED; - return; - } - /* Boxed devices don't need extra treatment. */ + if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && + (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { + cdev->private->flags.recog_done = 1; + cdev->private->state = DEV_STATE_DISCONNECTED; + wake_up(&cdev->private->wait_q); + return; + } + if (cdev->private->flags.resuming) { + cdev->private->state = state; + cdev->private->flags.recog_done = 1; + wake_up(&cdev->private->wait_q); + return; } - notify = 0; - same_dev = 0; /* Keep the compiler quiet... */ switch (state) { case DEV_STATE_NOT_OPER: - CIO_DEBUG(KERN_WARNING, 2, - "SenseID : unknown device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); break; case DEV_STATE_OFFLINE: - if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { - same_dev = ccw_device_handle_oper(cdev); - notify = 1; + if (!cdev->online) { + ccw_device_update_sense_data(cdev); + break; } - /* fill out sense information */ - cdev->id = (struct ccw_device_id) { - .cu_type = cdev->private->senseid.cu_type, - .cu_model = cdev->private->senseid.cu_model, - .dev_type = cdev->private->senseid.dev_type, - .dev_model = cdev->private->senseid.dev_model, - }; - if (notify) { - cdev->private->state = DEV_STATE_OFFLINE; - if (same_dev) { - /* Get device online again. */ - ccw_device_online(cdev); - wake_up(&cdev->private->wait_q); - } - return; + cdev->private->state = DEV_STATE_OFFLINE; + cdev->private->flags.recog_done = 1; + if (ccw_device_test_sense_data(cdev)) { + cdev->private->flags.donotify = 1; + ccw_device_online(cdev); + wake_up(&cdev->private->wait_q); + } else { + ccw_device_update_sense_data(cdev); + ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); } - /* Issue device info message. */ - CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " - "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", cdev->private->devno, - cdev->id.cu_type, cdev->id.cu_model, - cdev->id.dev_type, cdev->id.dev_model); - break; + return; case DEV_STATE_BOXED: - CIO_DEBUG(KERN_WARNING, 2, - "SenseID : boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + if (cdev->id.cu_type != 0) { /* device was recognized before */ + cdev->private->flags.recog_done = 1; + cdev->private->state = DEV_STATE_BOXED; + wake_up(&cdev->private->wait_q); + return; + } break; } cdev->private->state = state; io_subchannel_recog_done(cdev); - if (state != DEV_STATE_NOT_OPER) - wake_up(&cdev->private->wait_q); + wake_up(&cdev->private->wait_q); } /* @@ -315,22 +315,54 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } } -static void -ccw_device_oper_notify(void *data) +/** + * ccw_device_notify() - inform the device's driver about an event + * @cdev: device for which an event occurred + * @event: event that occurred + * + * Returns: + * -%EINVAL if the device is offline or has no driver. + * -%EOPNOTSUPP if the device's driver has no notifier registered. + * %NOTIFY_OK if the driver wants to keep the device. + * %NOTIFY_BAD if the driver doesn't want to keep the device. + */ +int ccw_device_notify(struct ccw_device *cdev, int event) { - struct ccw_device *cdev; - struct subchannel *sch; - int ret; - - cdev = (struct ccw_device *)data; - sch = to_subchannel(cdev->dev.parent); - ret = (sch->driver && sch->driver->notify) ? - sch->driver->notify(&sch->dev, CIO_OPER) : 0; - if (!ret) - /* Driver doesn't want device back. */ - ccw_device_do_unreg_rereg((void *)cdev); + int ret = -EINVAL; + + if (!cdev->drv) + goto out; + if (!cdev->online) + goto out; + CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", + cdev->private->dev_id.ssid, cdev->private->dev_id.devno, + event); + if (!cdev->drv->notify) { + ret = -EOPNOTSUPP; + goto out; + } + if (cdev->drv->notify(cdev, event)) + ret = NOTIFY_OK; else - wake_up(&cdev->private->wait_q); + ret = NOTIFY_BAD; +out: + return ret; +} + +static void ccw_device_oper_notify(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { + /* Reenable channel measurements, if needed. */ + ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); + /* Save indication for new paths. */ + cdev->private->path_new_mask = sch->vpm; + return; + } + /* Driver doesn't want device back. */ + ccw_device_set_notoper(cdev); + ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); } /* @@ -343,6 +375,8 @@ ccw_device_done(struct ccw_device *cdev, int state) sch = to_subchannel(cdev->dev.parent); + ccw_device_set_timeout(cdev, 0); + if (state != DEV_STATE_ONLINE) cio_disable_subchannel(sch); @@ -351,74 +385,52 @@ ccw_device_done(struct ccw_device *cdev, int state) cdev->private->state = state; - - if (state == DEV_STATE_BOXED) - CIO_DEBUG(KERN_WARNING, 2, - "Boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); - - if (cdev->private->flags.donotify) { + switch (state) { + case DEV_STATE_BOXED: + CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", + cdev->private->dev_id.devno, sch->schid.sch_no); + if (cdev->online && + ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); cdev->private->flags.donotify = 0; - PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, - (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } - wake_up(&cdev->private->wait_q); - - if (css_init_done && state != DEV_STATE_ONLINE) - put_device (&cdev->dev); -} - -/* - * Function called from device_pgid.c after sense path ground has completed. - */ -void -ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) -{ - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - switch (err) { - case 0: - /* Start Path Group verification. */ - sch->vpm = 0; /* Start with no path groups set. */ - cdev->private->state = DEV_STATE_VERIFY; - ccw_device_verify_start(cdev); break; - case -ETIME: /* Sense path group id stopped by timeout. */ - case -EUSERS: /* device is reserved for someone else. */ - ccw_device_done(cdev, DEV_STATE_BOXED); + case DEV_STATE_NOT_OPER: + CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", + cdev->private->dev_id.devno, sch->schid.sch_no); + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + else + ccw_device_set_disconnected(cdev); + cdev->private->flags.donotify = 0; break; - case -EOPNOTSUPP: /* path grouping not supported, just set online. */ - cdev->private->options.pgroup = 0; - ccw_device_done(cdev, DEV_STATE_ONLINE); + case DEV_STATE_DISCONNECTED: + CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " + "%04x\n", cdev->private->dev_id.devno, + sch->schid.sch_no); + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { + cdev->private->state = DEV_STATE_NOT_OPER; + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + } else + ccw_device_set_disconnected(cdev); + cdev->private->flags.donotify = 0; break; default: - ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; } + + if (cdev->private->flags.donotify) { + cdev->private->flags.donotify = 0; + ccw_device_oper_notify(cdev); + } + wake_up(&cdev->private->wait_q); } /* * Start device recognition. */ -int -ccw_device_recognition(struct ccw_device *cdev) +void ccw_device_recognition(struct ccw_device *cdev) { - struct subchannel *sch; - int ret; - - if ((cdev->private->state != DEV_STATE_NOT_OPER) && - (cdev->private->state != DEV_STATE_BOXED)) - return -EINVAL; - sch = to_subchannel(cdev->dev.parent); - ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); - if (ret != 0) - /* Couldn't enable the subchannel for i/o. Sick device. */ - return ret; - - /* After 60s the device recognition is considered to have failed. */ - ccw_device_set_timeout(cdev, 60*HZ); + struct subchannel *sch = to_subchannel(cdev->dev.parent); /* * We used to start here with a sense pgid to find out whether a device @@ -430,103 +442,129 @@ ccw_device_recognition(struct ccw_device *cdev) */ cdev->private->flags.recog_done = 0; cdev->private->state = DEV_STATE_SENSE_ID; + if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { + ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); + return; + } ccw_device_sense_id_start(cdev); - return 0; } /* - * Handle timeout in device recognition. + * Handle events for states that use the ccw request infrastructure. */ -static void -ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) +static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) { - int ret; - - ret = ccw_device_cancel_halt_clear(cdev); - switch (ret) { - case 0: - ccw_device_recog_done(cdev, DEV_STATE_BOXED); + switch (e) { + case DEV_EVENT_NOTOPER: + ccw_request_notoper(cdev); break; - case -ENODEV: - ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); + case DEV_EVENT_INTERRUPT: + ccw_request_handler(cdev); + break; + case DEV_EVENT_TIMEOUT: + ccw_request_timeout(cdev); break; default: - ccw_device_set_timeout(cdev, 3*HZ); + break; } } +static void ccw_device_report_path_events(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int path_event[8]; + int chp, mask; + + for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { + path_event[chp] = PE_NONE; + if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) + path_event[chp] |= PE_PATH_GONE; + if (mask & cdev->private->path_new_mask & sch->vpm) + path_event[chp] |= PE_PATH_AVAILABLE; + if (mask & cdev->private->pgid_reset_mask & sch->vpm) + path_event[chp] |= PE_PATHGROUP_ESTABLISHED; + } + if (cdev->online && cdev->drv->path_event) + cdev->drv->path_event(cdev, path_event); +} -static void -ccw_device_nopath_notify(void *data) +static void ccw_device_reset_path_events(struct ccw_device *cdev) { - struct ccw_device *cdev; - struct subchannel *sch; - int ret; + cdev->private->path_gone_mask = 0; + cdev->private->path_new_mask = 0; + cdev->private->pgid_reset_mask = 0; +} - cdev = (struct ccw_device *)data; - sch = to_subchannel(cdev->dev.parent); - /* Extra sanity. */ - if (sch->lpm) - return; - ret = (sch->driver && sch->driver->notify) ? - sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; - if (!ret) { - if (get_device(&sch->dev)) { - /* Driver doesn't want to keep device. */ - cio_disable_subchannel(sch); - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, - (void *)cdev); - queue_work(ccw_device_work, - &cdev->private->kick_work); - } else - put_device(&sch->dev); - } - } else { - cio_disable_subchannel(sch); - ccw_device_set_timeout(cdev, 0); - cdev->private->flags.fake_irb = 0; - cdev->private->state = DEV_STATE_DISCONNECTED; - wake_up(&cdev->private->wait_q); +static void create_fake_irb(struct irb *irb, int type) +{ + memset(irb, 0, sizeof(*irb)); + if (type == FAKE_CMD_IRB) { + struct cmd_scsw *scsw = &irb->scsw.cmd; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } else if (type == FAKE_TM_IRB) { + struct tm_scsw *scsw = &irb->scsw.tm; + scsw->x = 1; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; } } -void -ccw_device_verify_done(struct ccw_device *cdev, int err) +void ccw_device_verify_done(struct ccw_device *cdev, int err) { - cdev->private->flags.doverify = 0; + struct subchannel *sch; + + sch = to_subchannel(cdev->dev.parent); + /* Update schib - pom may have changed. */ + if (cio_update_schib(sch)) { + err = -ENODEV; + goto callback; + } + /* Update lpm with verified path mask. */ + sch->lpm = sch->vpm; + /* Repeat path verification? */ + if (cdev->private->flags.doverify) { + ccw_device_verify_start(cdev); + return; + } +callback: switch (err) { - case -EOPNOTSUPP: /* path grouping not supported, just set online. */ - cdev->private->options.pgroup = 0; case 0: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { - memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw = (struct scsw) { - .cc = 1, - .fctl = SCSW_FCTL_START_FUNC, - .actl = SCSW_ACTL_START_PEND, - .stctl = SCSW_STCTL_STATUS_PEND, - }; + create_fake_irb(&cdev->private->irb, + cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, &cdev->private->irb); memset(&cdev->private->irb, 0, sizeof(struct irb)); } + ccw_device_report_path_events(cdev); break; case -ETIME: + case -EUSERS: + /* Reset oper notify indication after verify error. */ + cdev->private->flags.donotify = 0; ccw_device_done(cdev, DEV_STATE_BOXED); break; + case -EACCES: + /* Reset oper notify indication after verify error. */ + cdev->private->flags.donotify = 0; + ccw_device_done(cdev, DEV_STATE_DISCONNECTED); + break; default: - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); + /* Reset oper notify indication after verify error. */ + cdev->private->flags.donotify = 0; ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; } + ccw_device_reset_path_events(cdev); } /* @@ -542,24 +580,16 @@ ccw_device_online(struct ccw_device *cdev) (cdev->private->state != DEV_STATE_BOXED)) return -EINVAL; sch = to_subchannel(cdev->dev.parent); - if (css_init_done && !get_device(&cdev->dev)) - return -ENODEV; - ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); + ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); if (ret != 0) { /* Couldn't enable the subchannel for i/o. Sick device. */ if (ret == -ENODEV) dev_fsm_event(cdev, DEV_EVENT_NOTOPER); return ret; } - /* Do we want to do path grouping? */ - if (!cdev->private->options.pgroup) { - /* No, set state online immediately. */ - ccw_device_done(cdev, DEV_STATE_ONLINE); - return 0; - } - /* Do a SensePGID first. */ - cdev->private->state = DEV_STATE_SENSE_PGID; - ccw_device_sense_pgid_start(cdev); + /* Start initial path verification. */ + cdev->private->state = DEV_STATE_VERIFY; + ccw_device_verify_start(cdev); return 0; } @@ -574,6 +604,7 @@ ccw_device_disband_done(struct ccw_device *cdev, int err) ccw_device_done(cdev, DEV_STATE_BOXED); break; default: + cdev->private->flags.donotify = 0; ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; } @@ -587,18 +618,30 @@ ccw_device_offline(struct ccw_device *cdev) { struct subchannel *sch; + /* Allow ccw_device_offline while disconnected. */ + if (cdev->private->state == DEV_STATE_DISCONNECTED || + cdev->private->state == DEV_STATE_NOT_OPER) { + cdev->private->flags.donotify = 0; + ccw_device_done(cdev, DEV_STATE_NOT_OPER); + return 0; + } + if (cdev->private->state == DEV_STATE_BOXED) { + ccw_device_done(cdev, DEV_STATE_BOXED); + return 0; + } + if (ccw_device_is_orphan(cdev)) { + ccw_device_done(cdev, DEV_STATE_OFFLINE); + return 0; + } sch = to_subchannel(cdev->dev.parent); - if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) + if (cio_update_schib(sch)) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) { - if (sch->schib.scsw.actl != 0) - return -EBUSY; - return -EINVAL; - } - if (sch->schib.scsw.actl != 0) + if (scsw_actl(&sch->schib.scsw) != 0) return -EBUSY; + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; /* Are we doing path grouping? */ - if (!cdev->private->options.pgroup) { + if (!cdev->private->flags.pgroup) { /* No, set state offline immediately. */ ccw_device_done(cdev, DEV_STATE_OFFLINE); return 0; @@ -610,82 +653,26 @@ ccw_device_offline(struct ccw_device *cdev) } /* - * Handle timeout in device online/offline process. + * Handle not operational event in non-special state. */ -static void -ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) -{ - int ret; - - ret = ccw_device_cancel_halt_clear(cdev); - switch (ret) { - case 0: - ccw_device_done(cdev, DEV_STATE_BOXED); - break; - case -ENODEV: - ccw_device_done(cdev, DEV_STATE_NOT_OPER); - break; - default: - ccw_device_set_timeout(cdev, 3*HZ); - } -} - -/* - * Handle not oper event in device recognition. - */ -static void -ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) -{ - ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); -} - -/* - * Handle not operational event while offline. - */ -static void -ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) +static void ccw_device_generic_notoper(struct ccw_device *cdev, + enum dev_event dev_event) { - struct subchannel *sch; - - cdev->private->state = DEV_STATE_NOT_OPER; - sch = to_subchannel(cdev->dev.parent); - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, (void *)cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - wake_up(&cdev->private->wait_q); + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + else + ccw_device_set_disconnected(cdev); } /* - * Handle not operational event while online. + * Handle path verification event in offline state. */ -static void -ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) +static void ccw_device_offline_verify(struct ccw_device *cdev, + enum dev_event dev_event) { - struct subchannel *sch; + struct subchannel *sch = to_subchannel(cdev->dev.parent); - sch = to_subchannel(cdev->dev.parent); - if (sch->driver->notify && - sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { - ccw_device_set_timeout(cdev, 0); - cdev->private->flags.fake_irb = 0; - cdev->private->state = DEV_STATE_DISCONNECTED; - wake_up(&cdev->private->wait_q); - return; - } - cdev->private->state = DEV_STATE_NOT_OPER; - cio_disable_subchannel(sch); - if (sch->schib.scsw.actl != 0) { - // FIXME: not-oper indication to device driver ? - ccw_device_call_handler(cdev); - } - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, (void *)cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - wake_up(&cdev->private->wait_q); + css_schedule_eval(sch->schid); } /* @@ -696,8 +683,6 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) { struct subchannel *sch; - if (!cdev->private->options.pgroup) - return; if (cdev->private->state == DEV_STATE_W4SENSE) { cdev->private->flags.doverify = 1; return; @@ -707,13 +692,17 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) * Since we might not just be coming from an interrupt from the * subchannel we have to update the schib. */ - stsch(sch->irq, &sch->schib); + if (cio_update_schib(sch)) { + ccw_device_verify_done(cdev, -ENODEV); + return; + } - if (sch->schib.scsw.actl != 0 || - (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { + if (scsw_actl(&sch->schib.scsw) != 0 || + (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || + (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered - * to the device driver. Can't do path verfication now, + * to the device driver. Can't do path verification now, * delay until final status was delivered. */ cdev->private->flags.doverify = 1; @@ -725,24 +714,41 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) } /* + * Handle path verification event in boxed state. + */ +static void ccw_device_boxed_verify(struct ccw_device *cdev, + enum dev_event dev_event) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + if (cdev->online) { + if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) + ccw_device_done(cdev, DEV_STATE_NOT_OPER); + else + ccw_device_online_verify(cdev, dev_event); + } else + css_schedule_eval(sch->schid); +} + +/* * Got an interrupt for a normal io (state online). */ static void ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; + int is_cmd; - irb = (struct irb *) __LC_IRB; + irb = &__get_cpu_var(cio_irb); + is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) - && (!irb->scsw.cc)) { - if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if (!scsw_is_solicited(&irb->scsw)) { + if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !irb->esw.esw0.erw.cons) { /* Unit check but no sense data. Need basic sense. */ if (ccw_device_do_sense(cdev, irb) != 0) goto call_handler_unsol; - memcpy(irb, &cdev->private->irb, sizeof(struct irb)); + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); cdev->private->state = DEV_STATE_W4SENSE; cdev->private->intparm = 0; return; @@ -750,11 +756,13 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) call_handler_unsol: if (cdev->handler) cdev->handler (cdev, 0, irb); + if (cdev->private->flags.doverify) + ccw_device_online_verify(cdev, 0); return; } /* Accumulate status and find out if a basic sense is needed. */ ccw_device_accumulate_irb(cdev, irb); - if (cdev->private->flags.dosense) { + if (is_cmd && cdev->private->flags.dosense) { if (ccw_device_do_sense(cdev, irb) == 0) { cdev->private->state = DEV_STATE_W4SENSE; } @@ -775,24 +783,16 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) int ret; ccw_device_set_timeout(cdev, 0); + cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); cdev->private->state = DEV_STATE_TIMEOUT_KILL; return; } - if (ret == -ENODEV) { - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, - &cdev->private->kick_work); - } else - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); - } else if (cdev->handler) + if (ret) + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); + else if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-ETIMEDOUT)); } @@ -800,26 +800,40 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) /* * Got an interrupt for a basic sense. */ -void +static void ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; - irb = (struct irb *) __LC_IRB; + irb = &__get_cpu_var(cio_irb); /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) + if (scsw_stctl(&irb->scsw) == + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { + if (scsw_cc(&irb->scsw) == 1) /* Basic sense hasn't started. Try again. */ ccw_device_do_sense(cdev, irb); else { - printk("Huh? %s(%s): unsolicited interrupt...\n", - __FUNCTION__, cdev->dev.bus_id); + CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " + "interrupt during w4sense...\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); if (cdev->handler) cdev->handler (cdev, 0, irb); } return; } + /* + * Check if a halt or clear has been issued in the meanwhile. If yes, + * only deliver the halt/clear interrupt to the device driver as if it + * had killed the original request. + */ + if (scsw_fctl(&irb->scsw) & + (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { + cdev->private->flags.dosense = 0; + memset(&cdev->private->irb, 0, sizeof(struct irb)); + ccw_device_accumulate_irb(cdev, irb); + goto call_handler; + } /* Add basic sense info to irb. */ ccw_device_accumulate_basic_sense(cdev, irb); if (cdev->private->flags.dosense) { @@ -827,7 +841,10 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_do_sense(cdev, irb); return; } +call_handler: cdev->private->state = DEV_STATE_ONLINE; + /* In case sensing interfered with setting the device online */ + wake_up(&cdev->private->wait_q); /* Call the handler. */ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) /* Start delayed path verification. */ @@ -835,37 +852,15 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) } static void -ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) -{ - struct irb *irb; - - irb = (struct irb *) __LC_IRB; - /* Accumulate status. We don't do basic sense. */ - ccw_device_accumulate_irb(cdev, irb); - /* Try to start delayed device verification. */ - ccw_device_online_verify(cdev, 0); - /* Note: Don't call handler for cio initiated clear! */ -} - -static void ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) { - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); ccw_device_set_timeout(cdev, 0); + /* Start delayed path verification. */ + ccw_device_online_verify(cdev, 0); /* OK, i/o is dead now. Call interrupt handler. */ - cdev->private->state = DEV_STATE_ONLINE; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } else if (cdev->private->flags.doverify) - /* Start delayed path verification. */ - ccw_device_online_verify(cdev, 0); + ERR_PTR(-EIO)); } static void @@ -878,128 +873,36 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_set_timeout(cdev, 3*HZ); return; } - if (ret == -ENODEV) { - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, - &cdev->private->kick_work); - } else - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); - return; - } - //FIXME: Can we get here? - cdev->private->state = DEV_STATE_ONLINE; + /* Start delayed path verification. */ + ccw_device_online_verify(cdev, 0); if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); -} - -static void -ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) -{ - struct irb *irb; - struct subchannel *sch; - - irb = (struct irb *) __LC_IRB; - /* - * Accumulate status and find out if a basic sense is needed. - * This is fine since we have already adapted the lpm. - */ - ccw_device_accumulate_irb(cdev, irb); - if (cdev->private->flags.dosense) { - if (ccw_device_do_sense(cdev, irb) == 0) { - cdev->private->state = DEV_STATE_W4SENSE; - } - return; - } - - /* Iff device is idle, reset timeout. */ - sch = to_subchannel(cdev->dev.parent); - if (!stsch(sch->irq, &sch->schib)) - if (sch->schib.scsw.actl == 0) - ccw_device_set_timeout(cdev, 0); - /* Call the handler. */ - ccw_device_call_handler(cdev); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } else if (cdev->private->flags.doverify) - ccw_device_online_verify(cdev, 0); + ERR_PTR(-EIO)); } -static void -ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) +void ccw_device_kill_io(struct ccw_device *cdev) { int ret; - struct subchannel *sch; - sch = to_subchannel(cdev->dev.parent); - ccw_device_set_timeout(cdev, 0); + cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); cdev->private->state = DEV_STATE_TIMEOUT_KILL; return; } - if (ret == -ENODEV) { - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, - &cdev->private->kick_work); - } else - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); - return; - } + /* Start delayed path verification. */ + ccw_device_online_verify(cdev, 0); if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } else if (cdev->private->flags.doverify) - /* Start delayed path verification. */ - ccw_device_online_verify(cdev, 0); -} - -static void -ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) -{ - /* When the I/O has terminated, we have to start verification. */ - if (cdev->private->options.pgroup) - cdev->private->flags.doverify = 1; + ERR_PTR(-EIO)); } static void -ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) +ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) { - struct irb *irb; - - switch (dev_event) { - case DEV_EVENT_INTERRUPT: - irb = (struct irb *) __LC_IRB; - /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) - /* FIXME: we should restart stlck here, but this - * is extremely unlikely ... */ - goto out_wakeup; - - ccw_device_accumulate_irb(cdev, irb); - /* We don't care about basic sense etc. */ - break; - default: /* timeout */ - break; - } -out_wakeup: - wake_up(&cdev->private->wait_q); + /* Start verification after current task finished. */ + cdev->private->flags.doverify = 1; } static void @@ -1008,60 +911,54 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) + if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) /* Couldn't enable the subchannel for i/o. Sick device. */ return; - - /* After 60s the device recognition is considered to have failed. */ - ccw_device_set_timeout(cdev, 60*HZ); - cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; ccw_device_sense_id_start(cdev); } -void -device_trigger_reprobe(struct subchannel *sch) +void ccw_device_trigger_reprobe(struct ccw_device *cdev) { - struct ccw_device *cdev; + struct subchannel *sch; - if (!sch->dev.driver_data) - return; - cdev = sch->dev.driver_data; if (cdev->private->state != DEV_STATE_DISCONNECTED) return; + sch = to_subchannel(cdev->dev.parent); /* Update some values. */ - if (stsch(sch->irq, &sch->schib)) + if (cio_update_schib(sch)) return; - /* * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; - /* Re-set some bits in the pmcw that were lost. */ - sch->schib.pmcw.isc = 3; - sch->schib.pmcw.csense = 1; - sch->schib.pmcw.ena = 0; - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; - sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; + sch->lpm = sch->schib.pmcw.pam & sch->opm; + /* + * Use the initial configuration since we can't be shure that the old + * paths are valid. + */ + io_subchannel_init_config(sch); + if (cio_commit_config(sch)) + return; + /* We should also udate ssd info, but this has to wait. */ - ccw_device_start_id(cdev, 0); + /* Check if this is another device which appeared on the same sch. */ + if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) + css_schedule_eval(sch->schid); + else + ccw_device_start_id(cdev, 0); } -static void -ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) +static void ccw_device_disabled_irq(struct ccw_device *cdev, + enum dev_event dev_event) { struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); /* - * An interrupt in state offline means a previous disable was not - * successful. Try again. + * An interrupt in a disabled state means a previous disable was not + * successful - should not happen, but we try to disable again. */ cio_disable_subchannel(sch); } @@ -1074,15 +971,19 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) dev_fsm_event(cdev, dev_event); } +static void ccw_device_update_cmfblock(struct ccw_device *cdev, + enum dev_event dev_event) +{ + cmf_retry_copy_block(cdev); + cdev->private->state = DEV_STATE_ONLINE; + dev_fsm_event(cdev, dev_event); +} static void ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) { ccw_device_set_timeout(cdev, 0); - if (dev_event == DEV_EVENT_NOTOPER) - cdev->private->state = DEV_STATE_NOT_OPER; - else - cdev->private->state = DEV_STATE_OFFLINE; + cdev->private->state = DEV_STATE_NOT_OPER; wake_up(&cdev->private->wait_q); } @@ -1092,17 +993,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) int ret; ret = ccw_device_cancel_halt_clear(cdev); - switch (ret) { - case 0: - cdev->private->state = DEV_STATE_OFFLINE; - wake_up(&cdev->private->wait_q); - break; - case -ENODEV: + if (ret == -EBUSY) { + ccw_device_set_timeout(cdev, HZ/10); + } else { cdev->private->state = DEV_STATE_NOT_OPER; wake_up(&cdev->private->wait_q); - break; - default: - ccw_device_set_timeout(cdev, HZ/10); } } @@ -1116,93 +1011,70 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) } /* - * Bug operation action. - */ -static void -ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) -{ - printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n", - cdev->private->state, dev_event); - BUG(); -} - -/* * device statemachine */ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_STATE_NOT_OPER] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, - [DEV_EVENT_INTERRUPT] = ccw_device_bug, + [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_SENSE_PGID] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_SENSE_ID] = { - [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_OFFLINE] = { - [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, + [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, [DEV_EVENT_TIMEOUT] = ccw_device_nop, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_offline_verify, }, [DEV_STATE_VERIFY] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, [DEV_STATE_ONLINE] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_irq, [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, [DEV_STATE_W4SENSE] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, [DEV_STATE_DISBAND_PGID] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_BOXED] = { - [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, - [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, - [DEV_EVENT_VERIFY] = ccw_device_nop, - }, - /* states to wait for i/o completion before doing something */ - [DEV_STATE_CLEAR_VERIFY] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, + [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_nop, [DEV_EVENT_TIMEOUT] = ccw_device_nop, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, }, + /* states to wait for i/o completion before doing something */ [DEV_STATE_TIMEOUT_KILL] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME }, - [DEV_STATE_WAIT4IO] = { - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, - [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, - }, [DEV_STATE_QUIESCE] = { [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, @@ -1213,13 +1085,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_STATE_DISCONNECTED] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, [DEV_EVENT_INTERRUPT] = ccw_device_start_id, - [DEV_EVENT_TIMEOUT] = ccw_device_bug, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_TIMEOUT] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_start_id, }, [DEV_STATE_DISCONNECTED_SENSE_ID] = { - [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, - [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, - [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_CMFCHANGE] = { @@ -1228,23 +1100,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, }, + [DEV_STATE_CMFUPDATE] = { + [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, + [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, + [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, + [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, + }, + [DEV_STATE_STEAL_LOCK] = { + [DEV_EVENT_NOTOPER] = ccw_device_request_event, + [DEV_EVENT_INTERRUPT] = ccw_device_request_event, + [DEV_EVENT_TIMEOUT] = ccw_device_request_event, + [DEV_EVENT_VERIFY] = ccw_device_nop, + }, }; -/* - * io_subchannel_irq is called for "real" interrupts or for status - * pending conditions on msch. - */ -void -io_subchannel_irq (struct device *pdev) -{ - struct ccw_device *cdev; - - cdev = to_subchannel(pdev)->dev.driver_data; - - CIO_TRACE_EVENT (3, "IRQ"); - CIO_TRACE_EVENT (3, pdev->bus_id); - if (cdev) - dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); -} - EXPORT_SYMBOL_GPL(ccw_device_set_timeout); diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 0e68fb511dc..d4fa30541a3 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -1,99 +1,42 @@ /* - * drivers/s390/cio/device_id.c + * CCW device SENSE ID I/O handling. * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Cornelia Huck(cohuck@de.ibm.com) - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * Sense ID functions. + * Copyright IBM Corp. 2002, 2009 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ -#include <linux/module.h> -#include <linux/config.h> -#include <linux/init.h> - +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/errno.h> #include <asm/ccwdev.h> -#include <asm/delay.h> +#include <asm/setup.h> #include <asm/cio.h> -#include <asm/lowcore.h> +#include <asm/diag.h> #include "cio.h" #include "cio_debug.h" -#include "css.h" #include "device.h" -#include "ioasm.h" - -/* - * diag210 is used under VM to get information about a virtual device - */ -#ifdef CONFIG_ARCH_S390X -int -diag210(struct diag210 * addr) -{ - /* - * diag 210 needs its data below the 2GB border, so we - * use a static data area to be sure - */ - static struct diag210 diag210_tmp; - static DEFINE_SPINLOCK(diag210_lock); - unsigned long flags; - int ccode; - - spin_lock_irqsave(&diag210_lock, flags); - diag210_tmp = *addr; +#include "io_sch.h" - asm volatile ( - " lhi %0,-1\n" - " sam31\n" - " diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" - "1: sam64\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" - : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" ); +#define SENSE_ID_RETRIES 256 +#define SENSE_ID_TIMEOUT (10 * HZ) +#define SENSE_ID_MIN_LEN 4 +#define SENSE_ID_BASIC_LEN 7 - *addr = diag210_tmp; - spin_unlock_irqrestore(&diag210_lock, flags); - - return ccode; -} -#else -int -diag210(struct diag210 * addr) -{ - int ccode; - - asm volatile ( - " lhi %0,-1\n" - " diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" - : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" ); - - return ccode; -} -#endif - -/* - * Input : - * devno - device number - * ps - pointer to sense ID data area - * Output : none +/** + * diag210_to_senseid - convert diag 0x210 data to sense id information + * @senseid: sense id + * @diag: diag 0x210 data + * + * Return 0 on success, non-zero otherwise. */ -static void -VM_virtual_device_info (__u16 devno, struct senseid *ps) +static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag) { static struct { - int vrdcvcla, vrdcvtyp, cu_type; + int class, type, cu_type; } vm_devices[] = { { 0x08, 0x01, 0x3480 }, { 0x08, 0x02, 0x3430 }, @@ -125,231 +68,155 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps) { 0x40, 0xc0, 0x5080 }, { 0x80, 0x00, 0x3215 }, }; - struct diag210 diag_data; - int ccode, i; - - CIO_TRACE_EVENT (4, "VMvdinf"); - - diag_data = (struct diag210) { - .vrdcdvno = devno, - .vrdclen = sizeof (diag_data), - }; - - ccode = diag210 (&diag_data); - ps->reserved = 0xff; - - /* Special case for bloody osa devices. */ - if (diag_data.vrdcvcla == 0x02 && - diag_data.vrdcvtyp == 0x20) { - ps->cu_type = 0x3088; - ps->cu_model = 0x60; - return; + int i; + + /* Special case for osa devices. */ + if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) { + senseid->cu_type = 0x3088; + senseid->cu_model = 0x60; + senseid->reserved = 0xff; + return 0; } - for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++) - if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla && - diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) { - ps->cu_type = vm_devices[i].cu_type; - return; + for (i = 0; i < ARRAY_SIZE(vm_devices); i++) { + if (diag->vrdcvcla == vm_devices[i].class && + diag->vrdcvtyp == vm_devices[i].type) { + senseid->cu_type = vm_devices[i].cu_type; + senseid->reserved = 0xff; + return 0; } - CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):" - "vdev class : %02X, vdev type : %04X \n ... " - "rdev class : %02X, rdev type : %04X, " - "rdev model: %02X\n", - devno, ccode, - diag_data.vrdcvcla, diag_data.vrdcvtyp, - diag_data.vrdcrccl, diag_data.vrdccrty, - diag_data.vrdccrmd); + } + + return -ENODEV; } -/* - * Start Sense ID helper function. - * Try to obtain the 'control unit'/'device type' information - * associated with the subchannel. +/** + * diag_get_dev_info - retrieve device information via diag 0x210 + * @cdev: ccw device + * + * Returns zero on success, non-zero otherwise. */ -static int -__ccw_device_sense_id_start(struct ccw_device *cdev) +static int diag210_get_dev_info(struct ccw_device *cdev) { - struct subchannel *sch; - struct ccw1 *ccw; - int ret; - - sch = to_subchannel(cdev->dev.parent); - /* Setup sense channel program. */ - ccw = cdev->private->iccws; - if (sch->schib.pmcw.pim != 0x80) { - /* more than one path installed. */ - ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; - ccw->cda = 0; - ccw->count = 0; - ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; - ccw++; - } - ccw->cmd_code = CCW_CMD_SENSE_ID; - ccw->cda = (__u32) __pa (&cdev->private->senseid); - ccw->count = sizeof (struct senseid); - ccw->flags = CCW_FLAG_SLI; - - /* Reset device status. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); - - /* Try on every path. */ - ret = -ENODEV; - while (cdev->private->imask != 0) { - if ((sch->opm & cdev->private->imask) != 0 && - cdev->private->iretry > 0) { - cdev->private->iretry--; - ret = cio_start (sch, cdev->private->iccws, - cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if (ret != -EACCES) - return ret; - } - cdev->private->imask >>= 1; - cdev->private->iretry = 5; - } - return ret; + struct ccw_dev_id *dev_id = &cdev->private->dev_id; + struct senseid *senseid = &cdev->private->senseid; + struct diag210 diag_data; + int rc; + + if (dev_id->ssid != 0) + return -ENODEV; + memset(&diag_data, 0, sizeof(diag_data)); + diag_data.vrdcdvno = dev_id->devno; + diag_data.vrdclen = sizeof(diag_data); + rc = diag210(&diag_data); + CIO_TRACE_EVENT(4, "diag210"); + CIO_HEX_EVENT(4, &rc, sizeof(rc)); + CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data)); + if (rc != 0 && rc != 2) + goto err_failed; + if (diag210_to_senseid(senseid, &diag_data)) + goto err_unknown; + return 0; + +err_unknown: + CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n", + dev_id->ssid, dev_id->devno); + return -ENODEV; +err_failed: + CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n", + dev_id->ssid, dev_id->devno, rc); + return -ENODEV; } -void -ccw_device_sense_id_start(struct ccw_device *cdev) +/* + * Initialize SENSE ID data. + */ +static void snsid_init(struct ccw_device *cdev) { - int ret; - - memset (&cdev->private->senseid, 0, sizeof (struct senseid)); - cdev->private->senseid.cu_type = 0xFFFF; - cdev->private->imask = 0x80; - cdev->private->iretry = 5; - ret = __ccw_device_sense_id_start(cdev); - if (ret && ret != -EBUSY) - ccw_device_sense_id_done(cdev, ret); + cdev->private->flags.esid = 0; + memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid)); + cdev->private->senseid.cu_type = 0xffff; } /* - * Called from interrupt context to check if a valid answer - * to Sense ID was received. + * Check for complete SENSE ID data. */ -static int -ccw_device_check_sense_id(struct ccw_device *cdev) +static int snsid_check(struct ccw_device *cdev, void *data) { - struct subchannel *sch; - struct irb *irb; - - sch = to_subchannel(cdev->dev.parent); - irb = &cdev->private->irb; - /* Did we get a proper answer ? */ - if (cdev->private->senseid.cu_type != 0xFFFF && - cdev->private->senseid.reserved == 0xFF) { - if (irb->scsw.count < sizeof (struct senseid) - 8) - cdev->private->flags.esid = 1; - return 0; /* Success */ - } - /* Check the error cases. */ - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) - return -ETIME; - if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) { - /* - * if the device doesn't support the SenseID - * command further retries wouldn't help ... - * NB: We don't check here for intervention required like we - * did before, because tape devices with no tape inserted - * may present this status *in conjunction with* the - * sense id information. So, for intervention required, - * we use the "whack it until it talks" strategy... - */ - CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " - "reports cmd reject\n", - cdev->private->devno, sch->irq); + struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd; + int len = sizeof(struct senseid) - scsw->count; + + /* Check for incomplete SENSE ID data. */ + if (len < SENSE_ID_MIN_LEN) + goto out_restart; + if (cdev->private->senseid.cu_type == 0xffff) + goto out_restart; + /* Check for incompatible SENSE ID data. */ + if (cdev->private->senseid.reserved != 0xff) return -EOPNOTSUPP; - } - if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " - "lpum %02X, cnt %02d, sns :" - " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, - irb->esw.esw0.sublog.lpum, - irb->esw.esw0.erw.scnt, - irb->ecw[0], irb->ecw[1], - irb->ecw[2], irb->ecw[3], - irb->ecw[4], irb->ecw[5], - irb->ecw[6], irb->ecw[7]); - return -EAGAIN; - } - if (irb->scsw.cc == 3) { - if ((sch->orb.lpm & - sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) - CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" - " subchannel %04x is 'not operational'\n", - sch->orb.lpm, cdev->private->devno, - sch->irq); - return -EACCES; - } - /* Hmm, whatever happened, try again. */ - CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " - "subchannel %04x returns status %02X%02X\n", - cdev->private->devno, sch->irq, - irb->scsw.dstat, irb->scsw.cstat); + /* Check for extended-identification information. */ + if (len > SENSE_ID_BASIC_LEN) + cdev->private->flags.esid = 1; + return 0; + +out_restart: + snsid_init(cdev); return -EAGAIN; } /* - * Got interrupt for Sense ID. + * Process SENSE ID request result. */ -void -ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) +static void snsid_callback(struct ccw_device *cdev, void *data, int rc) { - struct subchannel *sch; - struct irb *irb; - int ret; - - sch = to_subchannel(cdev->dev.parent); - irb = (struct irb *) __LC_IRB; - /* Retry sense id, if needed. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if ((irb->scsw.cc == 1) || !irb->scsw.actl) { - ret = __ccw_device_sense_id_start(cdev); - if (ret && ret != -EBUSY) - ccw_device_sense_id_done(cdev, ret); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct senseid *senseid = &cdev->private->senseid; + int vm = 0; + + if (rc && MACHINE_IS_VM) { + /* Try diag 0x210 fallback on z/VM. */ + snsid_init(cdev); + if (diag210_get_dev_info(cdev) == 0) { + rc = 0; + vm = 1; } - return; - } - if (ccw_device_accumulate_and_sense(cdev, irb) != 0) - return; - ret = ccw_device_check_sense_id(cdev); - memset(&cdev->private->irb, 0, sizeof(struct irb)); - switch (ret) { - /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ - case 0: /* Sense id succeeded. */ - case -ETIME: /* Sense id stopped by timeout. */ - ccw_device_sense_id_done(cdev, ret); - break; - case -EACCES: /* channel is not operational. */ - sch->lpm &= ~cdev->private->imask; - cdev->private->imask >>= 1; - cdev->private->iretry = 5; - /* fall through. */ - case -EAGAIN: /* try again. */ - ret = __ccw_device_sense_id_start(cdev); - if (ret == 0 || ret == -EBUSY) - break; - /* fall through. */ - default: /* Sense ID failed. Try asking VM. */ - if (MACHINE_IS_VM) { - VM_virtual_device_info (cdev->private->devno, - &cdev->private->senseid); - if (cdev->private->senseid.cu_type != 0xFFFF) { - /* Got the device information from VM. */ - ccw_device_sense_id_done(cdev, 0); - return; - } - } - /* - * If we can't couldn't identify the device type we - * consider the device "not operational". - */ - ccw_device_sense_id_done(cdev, -ENODEV); - break; } + CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x " + "%04x/%02x%s\n", id->ssid, id->devno, rc, + senseid->cu_type, senseid->cu_model, senseid->dev_type, + senseid->dev_model, vm ? " (diag210)" : ""); + ccw_device_sense_id_done(cdev, rc); } -EXPORT_SYMBOL(diag210); +/** + * ccw_device_sense_id_start - perform SENSE ID + * @cdev: ccw device + * + * Execute a SENSE ID channel program on @cdev to update its sense id + * information. When finished, call ccw_device_sense_id_done with a + * return code specifying the result. + */ +void ccw_device_sense_id_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + struct ccw1 *cp = cdev->private->iccws; + + CIO_TRACE_EVENT(4, "snsid"); + CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); + /* Data setup. */ + snsid_init(cdev); + /* Channel program setup. */ + cp->cmd_code = CCW_CMD_SENSE_ID; + cp->cda = (u32) (addr_t) &cdev->private->senseid; + cp->count = sizeof(struct senseid); + cp->flags = CCW_FLAG_SLI; + /* Request setup. */ + memset(req, 0, sizeof(*req)); + req->cp = cp; + req->timeout = SENSE_ID_TIMEOUT; + req->maxretries = SENSE_ID_RETRIES; + req->lpm = sch->schib.pmcw.pam & sch->opm; + req->check = snsid_check; + req->callback = snsid_callback; + ccw_request_start(cdev); +} diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 02d01a0de16..f3c417943da 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -1,14 +1,9 @@ /* - * drivers/s390/cio/device_ops.c + * Copyright IBM Corp. 2002, 2009 * - * $Revision: 1.56 $ - * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> @@ -16,20 +11,31 @@ #include <linux/list.h> #include <linux/device.h> #include <linux/delay.h> +#include <linux/completion.h> #include <asm/ccwdev.h> #include <asm/idals.h> -#include <asm/qdio.h> +#include <asm/chpid.h> +#include <asm/fcx.h> #include "cio.h" #include "cio_debug.h" #include "css.h" #include "chsc.h" #include "device.h" -#include "qdio.h" +#include "chp.h" -int -ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) +/** + * ccw_device_set_options_mask() - set some options and unset the rest + * @cdev: device for which the options are to be set + * @flags: options to be set + * + * All flags specified in @flags are set, all flags not specified in @flags + * are cleared. + * Returns: + * %0 on success, -%EINVAL on an invalid flag combination. + */ +int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) { /* * The flag usage is mutal exclusive ... @@ -41,51 +47,158 @@ ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; + cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0; return 0; } -int -ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) +/** + * ccw_device_set_options() - set some options + * @cdev: device for which the options are to be set + * @flags: options to be set + * + * All flags specified in @flags are set, the remainder is left untouched. + * Returns: + * %0 on success, -%EINVAL if an invalid flag combination would ensue. + */ +int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) +{ + /* + * The flag usage is mutal exclusive ... + */ + if (((flags & CCWDEV_EARLY_NOTIFICATION) && + (flags & CCWDEV_REPORT_ALL)) || + ((flags & CCWDEV_EARLY_NOTIFICATION) && + cdev->private->options.repall) || + ((flags & CCWDEV_REPORT_ALL) && + cdev->private->options.fast)) + return -EINVAL; + cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0; + cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; + cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; + cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; + cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0; + return 0; +} + +/** + * ccw_device_clear_options() - clear some options + * @cdev: device for which the options are to be cleared + * @flags: options to be cleared + * + * All flags specified in @flags are cleared, the remainder is left untouched. + */ +void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags) +{ + cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0; + cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; + cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; + cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; + cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0; +} + +/** + * ccw_device_is_pathgroup - determine if paths to this device are grouped + * @cdev: ccw device + * + * Return non-zero if there is a path group, zero otherwise. + */ +int ccw_device_is_pathgroup(struct ccw_device *cdev) +{ + return cdev->private->flags.pgroup; +} +EXPORT_SYMBOL(ccw_device_is_pathgroup); + +/** + * ccw_device_is_multipath - determine if device is operating in multipath mode + * @cdev: ccw device + * + * Return non-zero if device is operating in multipath mode, zero otherwise. + */ +int ccw_device_is_multipath(struct ccw_device *cdev) +{ + return cdev->private->flags.mpath; +} +EXPORT_SYMBOL(ccw_device_is_multipath); + +/** + * ccw_device_clear() - terminate I/O request processing + * @cdev: target ccw device + * @intparm: interruption parameter; value is only used if no I/O is + * outstanding, otherwise the intparm associated with the I/O request + * is returned + * + * ccw_device_clear() calls csch on @cdev's subchannel. + * Returns: + * %0 on success, + * -%ENODEV on device not operational, + * -%EINVAL on invalid device state. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) { struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; + sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE && - cdev->private->state != DEV_STATE_WAIT4IO && cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; - sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; + ret = cio_clear(sch); if (ret == 0) cdev->private->intparm = intparm; return ret; } -int -ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags) +/** + * ccw_device_start_key() - start a s390 channel program with key + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to + * @cdev's interrupt handler. Allows a device driver to associate + * the interrupt with a particular I/O request. + * @lpm: defines the channel path to be used for a specific I/O request. A + * value of 0 will make cio use the opm. + * @key: storage key to be used for the I/O + * @flags: additional flags; defines the action to be performed for I/O + * processing. + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; + * -%EACCES, if no path specified in @lpm is operational; + * -%ENODEV, if the device is not operational. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags) { struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state == DEV_STATE_VERIFY) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { - cdev->private->flags.fake_irb = 1; + cdev->private->flags.fake_irb = FAKE_CMD_IRB; cdev->private->intparm = intparm; return 0; } else @@ -93,24 +206,64 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, return -EBUSY; } if (cdev->private->state != DEV_STATE_ONLINE || - ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || + ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || cdev->private->flags.doverify) return -EBUSY; ret = cio_set_options (sch, flags); if (ret) return ret; + /* Adjust requested path mask to exclude unusable paths. */ + if (lpm) { + lpm &= sch->lpm; + if (lpm == 0) + return -EACCES; + } ret = cio_start_key (sch, cpa, lpm, key); - if (ret == 0) + switch (ret) { + case 0: cdev->private->intparm = intparm; + break; + case -EACCES: + case -ENODEV: + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + break; + } return ret; } - -int -ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags, int expires) +/** + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to + * @cdev's interrupt handler. Allows a device driver to associate + * the interrupt with a particular I/O request. + * @lpm: defines the channel path to be used for a specific I/O request. A + * value of 0 will make cio use the opm. + * @key: storage key to be used for the I/O + * @flags: additional flags; defines the action to be performed for I/O + * processing. + * @expires: timeout value in jiffies + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; + * -%EACCES, if no path specified in @lpm is operational; + * -%ENODEV, if the device is not operational. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags, int expires) { int ret; @@ -123,18 +276,67 @@ ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, return ret; } -int -ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, unsigned long flags) +/** + * ccw_device_start() - start a s390 channel program + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to + * @cdev's interrupt handler. Allows a device driver to associate + * the interrupt with a particular I/O request. + * @lpm: defines the channel path to be used for a specific I/O request. A + * value of 0 will make cio use the opm. + * @flags: additional flags; defines the action to be performed for I/O + * processing. + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; + * -%EACCES, if no path specified in @lpm is operational; + * -%ENODEV, if the device is not operational. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, unsigned long flags) { return ccw_device_start_key(cdev, cpa, intparm, lpm, PAGE_DEFAULT_KEY, flags); } -int -ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, unsigned long flags, - int expires) +/** + * ccw_device_start_timeout() - start a s390 channel program with timeout + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to + * @cdev's interrupt handler. Allows a device driver to associate + * the interrupt with a particular I/O request. + * @lpm: defines the channel path to be used for a specific I/O request. A + * value of 0 will make cio use the opm. + * @flags: additional flags; defines the action to be performed for I/O + * processing. + * @expires: timeout value in jiffies + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; + * -%EACCES, if no path specified in @lpm is operational; + * -%ENODEV, if the device is not operational. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, + unsigned long flags, int expires) { return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, PAGE_DEFAULT_KEY, flags, @@ -142,43 +344,70 @@ ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, } -int -ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) +/** + * ccw_device_halt() - halt I/O request processing + * @cdev: target ccw device + * @intparm: interruption parameter; value is only used if no I/O is + * outstanding, otherwise the intparm associated with the I/O request + * is returned + * + * ccw_device_halt() calls hsch on @cdev's subchannel. + * Returns: + * %0 on success, + * -%ENODEV on device not operational, + * -%EINVAL on invalid device state, + * -%EBUSY on device busy or interrupt pending. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) { struct subchannel *sch; int ret; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; + sch = to_subchannel(cdev->dev.parent); + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE && - cdev->private->state != DEV_STATE_WAIT4IO && cdev->private->state != DEV_STATE_W4SENSE) return -EINVAL; - sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; + ret = cio_halt(sch); if (ret == 0) cdev->private->intparm = intparm; return ret; } -int -ccw_device_resume(struct ccw_device *cdev) +/** + * ccw_device_resume() - resume channel program execution + * @cdev: target ccw device + * + * ccw_device_resume() calls rsch on @cdev's subchannel. + * Returns: + * %0 on success, + * -%ENODEV on device not operational, + * -%EINVAL on invalid device state, + * -%EBUSY on device busy or interrupt pending. + * Context: + * Interrupts disabled, ccw device lock held + */ +int ccw_device_resume(struct ccw_device *cdev) { struct subchannel *sch; - if (!cdev) + if (!cdev || !cdev->dev.parent) return -ENODEV; sch = to_subchannel(cdev->dev.parent); - if (!sch) - return -ENODEV; + if (!sch->schib.pmcw.ena) + return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE || - !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) + !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return -EINVAL; return cio_resume(sch); } @@ -189,12 +418,9 @@ ccw_device_resume(struct ccw_device *cdev) int ccw_device_call_handler(struct ccw_device *cdev) { - struct subchannel *sch; unsigned int stctl; int ending_status; - sch = to_subchannel(cdev->dev.parent); - /* * we allow for the device action handler if . * - we received ending status @@ -203,7 +429,7 @@ ccw_device_call_handler(struct ccw_device *cdev) * - fast notification was requested (primary status) * - unsolicited interrupts */ - stctl = cdev->private->irb.scsw.stctl; + stctl = scsw_stctl(&cdev->private->irb.scsw); ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || (stctl == SCSW_STCTL_STATUS_PEND); @@ -214,6 +440,9 @@ ccw_device_call_handler(struct ccw_device *cdev) (stctl & SCSW_STCTL_PRIM_STATUS))) return 0; + /* Clear pending timers for device driver initiated I/O. */ + if (ending_status) + ccw_device_set_timeout(cdev, 0); /* * Now we are ready to call the device driver interrupt handler. */ @@ -229,11 +458,21 @@ ccw_device_call_handler(struct ccw_device *cdev) return 1; } -/* - * Search for CIW command in extended sense data. +/** + * ccw_device_get_ciw() - Search for CIW command in extended sense data. + * @cdev: ccw device to inspect + * @ct: command type to look for + * + * During SenseID, command information words (CIWs) describing special + * commands available to the device may have been stored in the extended + * sense data. This function searches for CIWs of a specified command + * type in the extended sense data. + * Returns: + * %NULL if no extended sense data has been stored or if no CIW of the + * specified command type could be found, + * else a pointer to the CIW of the specified command type. */ -struct ciw * -ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) +struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) { int ciw_cnt; @@ -245,347 +484,309 @@ ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) return NULL; } -__u8 -ccw_device_get_path_mask(struct ccw_device *cdev) +/** + * ccw_device_get_path_mask() - get currently available paths + * @cdev: ccw device to be queried + * Returns: + * %0 if no subchannel for the device is available, + * else the mask of currently available paths for the ccw device's subchannel. + */ +__u8 ccw_device_get_path_mask(struct ccw_device *cdev) { struct subchannel *sch; - sch = to_subchannel(cdev->dev.parent); - if (!sch) + if (!cdev->dev.parent) return 0; - else - return sch->vpm; + + sch = to_subchannel(cdev->dev.parent); + return sch->lpm; } -static void -ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) +struct stlck_data { + struct completion done; + int rc; +}; + +void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc) { - if (!ip) - /* unsolicited interrupt */ - return; - - /* Abuse intparm for error reporting. */ - if (IS_ERR(irb)) - cdev->private->intparm = -EIO; - else if ((irb->scsw.dstat != - (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || - (irb->scsw.cstat != 0)) { - /* - * We didn't get channel end / device end. Check if path - * verification has been started; we can retry after it has - * finished. We also retry unit checks except for command reject - * or intervention required. - */ - if (cdev->private->flags.doverify || - cdev->private->state == DEV_STATE_VERIFY) - cdev->private->intparm = -EAGAIN; - if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && - !(irb->ecw[0] & - (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) - cdev->private->intparm = -EAGAIN; - else - cdev->private->intparm = -EIO; - - } else - cdev->private->intparm = 0; - wake_up(&cdev->private->wait_q); + struct stlck_data *sdata = data; + + sdata->rc = rc; + complete(&sdata->done); } -static inline int -__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) +/* + * Perform unconditional reserve + release. + */ +int ccw_device_stlck(struct ccw_device *cdev) { - int ret; - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - do { - ret = cio_start (sch, ccw, lpm); - if ((ret == -EBUSY) || (ret == -EACCES)) { - /* Try again later. */ - spin_unlock_irq(&sch->lock); - msleep(10); - spin_lock_irq(&sch->lock); - continue; - } - if (ret != 0) - /* Non-retryable error. */ - break; - /* Wait for end of request. */ - cdev->private->intparm = magic; - spin_unlock_irq(&sch->lock); - wait_event(cdev->private->wait_q, - (cdev->private->intparm == -EIO) || - (cdev->private->intparm == -EAGAIN) || - (cdev->private->intparm == 0)); - spin_lock_irq(&sch->lock); - /* Check at least for channel end / device end */ - if (cdev->private->intparm == -EIO) { - /* Non-retryable error. */ - ret = -EIO; - break; - } - if (cdev->private->intparm == 0) - /* Success. */ - break; - /* Try again later. */ - spin_unlock_irq(&sch->lock); - msleep(10); - spin_lock_irq(&sch->lock); - } while (1); + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct stlck_data data; + u8 *buffer; + int rc; + + /* Check if steal lock operation is valid for this device. */ + if (cdev->drv) { + if (!cdev->private->options.force) + return -EINVAL; + } + buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); + if (!buffer) + return -ENOMEM; + init_completion(&data.done); + data.rc = -EIO; + spin_lock_irq(sch->lock); + rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); + if (rc) + goto out_unlock; + /* Perform operation. */ + cdev->private->state = DEV_STATE_STEAL_LOCK, + ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); + spin_unlock_irq(sch->lock); + /* Wait for operation to finish. */ + if (wait_for_completion_interruptible(&data.done)) { + /* Got a signal. */ + spin_lock_irq(sch->lock); + ccw_request_cancel(cdev); + spin_unlock_irq(sch->lock); + wait_for_completion(&data.done); + } + rc = data.rc; + /* Check results. */ + spin_lock_irq(sch->lock); + cio_disable_subchannel(sch); + cdev->private->state = DEV_STATE_BOXED; +out_unlock: + spin_unlock_irq(sch->lock); + kfree(buffer); - return ret; + return rc; } /** - * read_dev_chars() - read device characteristics - * @param cdev target ccw device - * @param buffer pointer to buffer for rdc data - * @param length size of rdc data - * @returns 0 for success, negative error value on failure + * chp_get_chp_desc - return newly allocated channel-path descriptor + * @cdev: device to obtain the descriptor for + * @chp_idx: index of the channel path * - * Context: - * called for online device, lock not held - **/ -int -read_dev_chars (struct ccw_device *cdev, void **buffer, int length) + * On success return a newly allocated copy of the channel-path description + * data associated with the given channel path. Return %NULL on error. + */ +struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, + int chp_idx) { - void (*handler)(struct ccw_device *, unsigned long, struct irb *); struct subchannel *sch; - int ret; - struct ccw1 *rdc_ccw; + struct chp_id chpid; - if (!cdev) - return -ENODEV; - if (!buffer || !length) - return -EINVAL; sch = to_subchannel(cdev->dev.parent); + chp_id_init(&chpid); + chpid.id = sch->schib.pmcw.chpid[chp_idx]; + return chp_get_chp_desc(chpid); +} - CIO_TRACE_EVENT (4, "rddevch"); - CIO_TRACE_EVENT (4, sch->dev.bus_id); - - rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); - if (!rdc_ccw) - return -ENOMEM; - memset(rdc_ccw, 0, sizeof(struct ccw1)); - rdc_ccw->cmd_code = CCW_CMD_RDC; - rdc_ccw->count = length; - rdc_ccw->flags = CCW_FLAG_SLI; - ret = set_normalized_cda (rdc_ccw, (*buffer)); - if (ret != 0) { - kfree(rdc_ccw); - return ret; - } - - spin_lock_irq(&sch->lock); - /* Save interrupt handler. */ - handler = cdev->handler; - /* Temporarily install own handler. */ - cdev->handler = ccw_device_wake_up; - if (cdev->private->state != DEV_STATE_ONLINE) - ret = -ENODEV; - else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || - cdev->private->flags.doverify) - ret = -EBUSY; - else - /* 0x00D9C4C3 == ebcdic "RDC" */ - ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0); - - /* Restore interrupt handler. */ - cdev->handler = handler; - spin_unlock_irq(&sch->lock); - - clear_normalized_cda (rdc_ccw); - kfree(rdc_ccw); - - return ret; +/** + * ccw_device_get_id - obtain a ccw device id + * @cdev: device to obtain the id for + * @dev_id: where to fill in the values + */ +void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) +{ + *dev_id = cdev->private->dev_id; } +EXPORT_SYMBOL(ccw_device_get_id); -/* - * Read Configuration data using path mask +/** + * ccw_device_tm_start_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. */ -int -read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm) +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) { - void (*handler)(struct ccw_device *, unsigned long, struct irb *); struct subchannel *sch; - struct ciw *ciw; - char *rcd_buf; - int ret; - struct ccw1 *rcd_ccw; + int rc; - if (!cdev) - return -ENODEV; - if (!buffer || !length) - return -EINVAL; sch = to_subchannel(cdev->dev.parent); - - CIO_TRACE_EVENT (4, "rdconf"); - CIO_TRACE_EVENT (4, sch->dev.bus_id); - - /* - * scan for RCD command in extended SenseID data - */ - ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD); - if (!ciw || ciw->cmd == 0) - return -EOPNOTSUPP; - - rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); - if (!rcd_ccw) - return -ENOMEM; - memset(rcd_ccw, 0, sizeof(struct ccw1)); - rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA); - if (!rcd_buf) { - kfree(rcd_ccw); - return -ENOMEM; + if (!sch->schib.pmcw.ena) + return -EINVAL; + if (cdev->private->state == DEV_STATE_VERIFY) { + /* Remember to fake irb when finished. */ + if (!cdev->private->flags.fake_irb) { + cdev->private->flags.fake_irb = FAKE_TM_IRB; + cdev->private->intparm = intparm; + return 0; + } else + /* There's already a fake I/O around. */ + return -EBUSY; } - memset (rcd_buf, 0, ciw->count); - rcd_ccw->cmd_code = ciw->cmd; - rcd_ccw->cda = (__u32) __pa (rcd_buf); - rcd_ccw->count = ciw->count; - rcd_ccw->flags = CCW_FLAG_SLI; - - spin_lock_irq(&sch->lock); - /* Save interrupt handler. */ - handler = cdev->handler; - /* Temporarily install own handler. */ - cdev->handler = ccw_device_wake_up; if (cdev->private->state != DEV_STATE_ONLINE) - ret = -ENODEV; - else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || - cdev->private->flags.doverify) - ret = -EBUSY; - else - /* 0x00D9C3C4 == ebcdic "RCD" */ - ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm); - - /* Restore interrupt handler. */ - cdev->handler = handler; - spin_unlock_irq(&sch->lock); - - /* - * on success we update the user input parms - */ - if (ret) { - kfree (rcd_buf); - *buffer = NULL; - *length = 0; - } else { - *length = ciw->count; - *buffer = rcd_buf; + return -EIO; + /* Adjust requested path mask to exclude unusable paths. */ + if (lpm) { + lpm &= sch->lpm; + if (lpm == 0) + return -EACCES; } - kfree(rcd_ccw); + rc = cio_tm_start_key(sch, tcw, lpm, key); + if (rc == 0) + cdev->private->intparm = intparm; + return rc; +} +EXPORT_SYMBOL(ccw_device_tm_start_key); +/** + * ccw_device_tm_start_timeout_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) +{ + int ret; + + ccw_device_set_timeout(cdev, expires); + ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); + if (ret != 0) + ccw_device_set_timeout(cdev, 0); return ret; } +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); -/* - * Read Configuration data +/** + * ccw_device_tm_start - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. */ -int -read_conf_data (struct ccw_device *cdev, void **buffer, int *length) +int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm) { - return read_conf_data_lpm (cdev, buffer, length, 0); + return ccw_device_tm_start_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY); } +EXPORT_SYMBOL(ccw_device_tm_start); -/* - * Try to break the lock on a boxed device. +/** + * ccw_device_tm_start_timeout - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. */ -int -ccw_device_stlck(struct ccw_device *cdev) +int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, int expires) { - void *buf, *buf2; - unsigned long flags; - struct subchannel *sch; - int ret; - - if (!cdev) - return -ENODEV; + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY, expires); +} +EXPORT_SYMBOL(ccw_device_tm_start_timeout); - if (cdev->drv && !cdev->private->options.force) - return -EINVAL; +/** + * ccw_device_get_mdc - accumulate max data count + * @cdev: ccw device for which the max data count is accumulated + * @mask: mask of paths to use + * + * Return the number of 64K-bytes blocks all paths at least support + * for a transport command. Return values <= 0 indicate failures. + */ +int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct channel_path *chp; + struct chp_id chpid; + int mdc = 0, i; + + /* Adjust requested path mask to excluded varied off paths. */ + if (mask) + mask &= sch->lpm; + else + mask = sch->lpm; - sch = to_subchannel(cdev->dev.parent); - - CIO_TRACE_EVENT(2, "stl lock"); - CIO_TRACE_EVENT(2, cdev->dev.bus_id); + chp_id_init(&chpid); + for (i = 0; i < 8; i++) { + if (!(mask & (0x80 >> i))) + continue; + chpid.id = sch->schib.pmcw.chpid[i]; + chp = chpid_to_chp(chpid); + if (!chp) + continue; - buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); - if (!buf2) { - kfree(buf); - return -ENOMEM; - } - spin_lock_irqsave(&sch->lock, flags); - ret = cio_enable_subchannel(sch, 3); - if (ret) - goto out_unlock; - /* - * Setup ccw. We chain an unconditional reserve and a release so we - * only break the lock. - */ - cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK; - cdev->private->iccws[0].cda = (__u32) __pa(buf); - cdev->private->iccws[0].count = 32; - cdev->private->iccws[0].flags = CCW_FLAG_CC; - cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE; - cdev->private->iccws[1].cda = (__u32) __pa(buf2); - cdev->private->iccws[1].count = 32; - cdev->private->iccws[1].flags = 0; - ret = cio_start(sch, cdev->private->iccws, 0); - if (ret) { - cio_disable_subchannel(sch); //FIXME: return code? - goto out_unlock; + mutex_lock(&chp->lock); + if (!chp->desc_fmt1.f) { + mutex_unlock(&chp->lock); + return 0; + } + if (!chp->desc_fmt1.r) + mdc = 1; + mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) : + chp->desc_fmt1.mdc; + mutex_unlock(&chp->lock); } - cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; - spin_unlock_irqrestore(&sch->lock, flags); - wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); - spin_lock_irqsave(&sch->lock, flags); - cio_disable_subchannel(sch); //FIXME: return code? - if ((cdev->private->irb.scsw.dstat != - (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || - (cdev->private->irb.scsw.cstat != 0)) - ret = -EIO; - /* Clear irb. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); -out_unlock: - if (buf) - kfree(buf); - if (buf2) - kfree(buf2); - spin_unlock_irqrestore(&sch->lock, flags); - return ret; + + return mdc; } +EXPORT_SYMBOL(ccw_device_get_mdc); -void * -ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) +/** + * ccw_device_tm_intrg - perform interrogate function + * @cdev: ccw device on which to perform the interrogate function + * + * Perform an interrogate function on the given ccw device. Return zero on + * success, non-zero otherwise. + */ +int ccw_device_tm_intrg(struct ccw_device *cdev) { - struct subchannel *sch; + struct subchannel *sch = to_subchannel(cdev->dev.parent); - sch = to_subchannel(cdev->dev.parent); - return chsc_get_chp_desc(sch, chp_no); + if (!sch->schib.pmcw.ena) + return -EINVAL; + if (cdev->private->state != DEV_STATE_ONLINE) + return -EIO; + if (!scsw_is_tm(&sch->schib.scsw) || + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND)) + return -EINVAL; + return cio_tm_intrg(sch); } +EXPORT_SYMBOL(ccw_device_tm_intrg); -// FIXME: these have to go: - -int -_ccw_device_get_subchannel_number(struct ccw_device *cdev) +/** + * ccw_device_get_schid - obtain a subchannel id + * @cdev: device to obtain the id for + * @schid: where to fill in the values + */ +void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) { - return cdev->private->irq; -} + struct subchannel *sch = to_subchannel(cdev->dev.parent); -int -_ccw_device_get_device_number(struct ccw_device *cdev) -{ - return cdev->private->devno; + *schid = sch->schid; } - +EXPORT_SYMBOL_GPL(ccw_device_get_schid); MODULE_LICENSE("GPL"); +EXPORT_SYMBOL(ccw_device_set_options_mask); EXPORT_SYMBOL(ccw_device_set_options); +EXPORT_SYMBOL(ccw_device_clear_options); EXPORT_SYMBOL(ccw_device_clear); EXPORT_SYMBOL(ccw_device_halt); EXPORT_SYMBOL(ccw_device_resume); @@ -595,9 +796,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key); EXPORT_SYMBOL(ccw_device_start_key); EXPORT_SYMBOL(ccw_device_get_ciw); EXPORT_SYMBOL(ccw_device_get_path_mask); -EXPORT_SYMBOL(read_conf_data); -EXPORT_SYMBOL(read_dev_chars); -EXPORT_SYMBOL(_ccw_device_get_subchannel_number); -EXPORT_SYMBOL(_ccw_device_get_device_number); EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); -EXPORT_SYMBOL_GPL(read_conf_data_lpm); diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 0adac8a6733..37ada05e82a 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -1,448 +1,669 @@ /* - * drivers/s390/cio/device_pgid.c + * CCW device PGID and path verification I/O handling. * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Cornelia Huck(cohuck@de.ibm.com) - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * Path Group ID functions. + * Copyright IBM Corp. 2002, 2009 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ -#include <linux/config.h> -#include <linux/module.h> -#include <linux/init.h> - +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/bitops.h> #include <asm/ccwdev.h> #include <asm/cio.h> -#include <asm/delay.h> -#include <asm/lowcore.h> #include "cio.h" #include "cio_debug.h" -#include "css.h" #include "device.h" +#include "io_sch.h" + +#define PGID_RETRIES 256 +#define PGID_TIMEOUT (10 * HZ) + +static void verify_start(struct ccw_device *cdev); /* - * Start Sense Path Group ID helper function. Used in ccw_device_recog - * and ccw_device_sense_pgid. + * Process path verification data and report result. */ -static int -__ccw_device_sense_pgid_start(struct ccw_device *cdev) +static void verify_done(struct ccw_device *cdev, int rc) { - struct subchannel *sch; - struct ccw1 *ccw; - int ret; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + int mpath = cdev->private->flags.mpath; + int pgroup = cdev->private->flags.pgroup; - sch = to_subchannel(cdev->dev.parent); - /* Setup sense path group id channel program. */ - ccw = cdev->private->iccws; - ccw->cmd_code = CCW_CMD_SENSE_PGID; - ccw->cda = (__u32) __pa (&cdev->private->pgid); - ccw->count = sizeof (struct pgid); - ccw->flags = CCW_FLAG_SLI; - - /* Reset device status. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); - /* Try on every path. */ - ret = -ENODEV; - while (cdev->private->imask != 0) { - /* Try every path multiple times. */ - if (cdev->private->iretry > 0) { - cdev->private->iretry--; - ret = cio_start (sch, cdev->private->iccws, - cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if (ret != -EACCES) - return ret; - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not " - "operational'\n", - cdev->private->devno, sch->irq, - cdev->private->imask); - - } - cdev->private->imask >>= 1; - cdev->private->iretry = 5; + if (rc) + goto out; + /* Ensure consistent multipathing state at device and channel. */ + if (sch->config.mp != mpath) { + sch->config.mp = mpath; + rc = cio_commit_config(sch); } - return ret; +out: + CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d " + "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath, + sch->vpm); + ccw_device_verify_done(cdev, rc); } -void -ccw_device_sense_pgid_start(struct ccw_device *cdev) +/* + * Create channel program to perform a NOOP. + */ +static void nop_build_cp(struct ccw_device *cdev) { - int ret; + struct ccw_request *req = &cdev->private->req; + struct ccw1 *cp = cdev->private->iccws; - cdev->private->state = DEV_STATE_SENSE_PGID; - cdev->private->imask = 0x80; - cdev->private->iretry = 5; - memset (&cdev->private->pgid, 0, sizeof (struct pgid)); - ret = __ccw_device_sense_pgid_start(cdev); - if (ret && ret != -EBUSY) - ccw_device_sense_pgid_done(cdev, ret); + cp->cmd_code = CCW_CMD_NOOP; + cp->cda = 0; + cp->count = 0; + cp->flags = CCW_FLAG_SLI; + req->cp = cp; } /* - * Called from interrupt context to check if a valid answer - * to Sense Path Group ID was received. + * Perform NOOP on a single path. */ -static int -__ccw_device_check_sense_pgid(struct ccw_device *cdev) +static void nop_do(struct ccw_device *cdev) { - struct subchannel *sch; - struct irb *irb; - - sch = to_subchannel(cdev->dev.parent); - irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) - return -ETIME; - if (irb->esw.esw0.erw.cons && - (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) { - /* - * If the device doesn't support the Sense Path Group ID - * command further retries wouldn't help ... - */ - return -EOPNOTSUPP; - } - if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " - "lpum %02X, cnt %02d, sns : " - "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, - irb->esw.esw0.sublog.lpum, - irb->esw.esw0.erw.scnt, - irb->ecw[0], irb->ecw[1], - irb->ecw[2], irb->ecw[3], - irb->ecw[4], irb->ecw[5], - irb->ecw[6], irb->ecw[7]); - return -EAGAIN; - } - if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, sch->orb.lpm); - return -EACCES; - } - if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " - "is reserved by someone else\n", - cdev->private->devno, sch->irq); - return -EUSERS; - } - return 0; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & + ~cdev->private->path_noirq_mask); + if (!req->lpm) + goto out_nopath; + nop_build_cp(cdev); + ccw_request_start(cdev); + return; + +out_nopath: + verify_done(cdev, sch->vpm ? 0 : -EACCES); } /* - * Got interrupt for Sense Path Group ID. + * Adjust NOOP I/O status. */ -void -ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) +static enum io_status nop_filter(struct ccw_device *cdev, void *data, + struct irb *irb, enum io_status status) { - struct subchannel *sch; - struct irb *irb; - int ret; + /* Only subchannel status might indicate a path error. */ + if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0) + return IO_DONE; + return status; +} - irb = (struct irb *) __LC_IRB; - /* Retry sense pgid for cc=1. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) { - ret = __ccw_device_sense_pgid_start(cdev); - if (ret && ret != -EBUSY) - ccw_device_sense_pgid_done(cdev, ret); - } - return; - } - if (ccw_device_accumulate_and_sense(cdev, irb) != 0) - return; - sch = to_subchannel(cdev->dev.parent); - ret = __ccw_device_check_sense_pgid(cdev); - memset(&cdev->private->irb, 0, sizeof(struct irb)); - switch (ret) { - /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ - case 0: /* Sense Path Group ID successful. */ - if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) - memcpy(&cdev->private->pgid, &global_pgid, - sizeof(struct pgid)); - ccw_device_sense_pgid_done(cdev, 0); - break; - case -EOPNOTSUPP: /* Sense Path Group ID not supported */ - ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP); - break; - case -ETIME: /* Sense path group id stopped by timeout. */ - ccw_device_sense_pgid_done(cdev, -ETIME); +/* + * Process NOOP request result for a single path. + */ +static void nop_callback(struct ccw_device *cdev, void *data, int rc) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + + switch (rc) { + case 0: + sch->vpm |= req->lpm; break; - case -EACCES: /* channel is not operational. */ - sch->lpm &= ~cdev->private->imask; - cdev->private->imask >>= 1; - cdev->private->iretry = 5; - /* Fall through. */ - case -EAGAIN: /* Try again. */ - ret = __ccw_device_sense_pgid_start(cdev); - if (ret != 0 && ret != -EBUSY) - ccw_device_sense_pgid_done(cdev, -ENODEV); + case -ETIME: + cdev->private->path_noirq_mask |= req->lpm; break; - case -EUSERS: /* device is reserved for someone else. */ - ccw_device_sense_pgid_done(cdev, -EUSERS); + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; + default: + goto err; } + /* Continue on the next path. */ + req->lpm >>= 1; + nop_do(cdev); + return; + +err: + verify_done(cdev, rc); } /* - * Path Group ID helper function. + * Create channel program to perform SET PGID on a single path. */ -static int -__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) +static void spid_build_cp(struct ccw_device *cdev, u8 fn) { - struct subchannel *sch; - struct ccw1 *ccw; - int ret; + struct ccw_request *req = &cdev->private->req; + struct ccw1 *cp = cdev->private->iccws; + int i = 8 - ffs(req->lpm); + struct pgid *pgid = &cdev->private->pgid[i]; - sch = to_subchannel(cdev->dev.parent); - - /* Setup sense path group id channel program. */ - cdev->private->pgid.inf.fc = func; - ccw = cdev->private->iccws; - if (!cdev->private->flags.pgid_single) { - cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH; - ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; - ccw->cda = 0; - ccw->count = 0; - ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; - ccw++; - } else - cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH; - - ccw->cmd_code = CCW_CMD_SET_PGID; - ccw->cda = (__u32) __pa (&cdev->private->pgid); - ccw->count = sizeof (struct pgid); - ccw->flags = CCW_FLAG_SLI; - - /* Reset device status. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); - - /* Try multiple times. */ - ret = -ENODEV; - if (cdev->private->iretry > 0) { - cdev->private->iretry--; - ret = cio_start (sch, cdev->private->iccws, - cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if ((ret != -EACCES) && (ret != -ENODEV)) - return ret; + pgid->inf.fc = fn; + cp->cmd_code = CCW_CMD_SET_PGID; + cp->cda = (u32) (addr_t) pgid; + cp->count = sizeof(*pgid); + cp->flags = CCW_FLAG_SLI; + req->cp = cp; +} + +static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) +{ + if (rc) { + /* We don't know the path groups' state. Abort. */ + verify_done(cdev, rc); + return; } - /* PGID command failed on this path. Switch it off. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; - CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, cdev->private->imask); - return ret; + /* + * Path groups have been reset. Restart path verification but + * leave paths in path_noirq_mask out. + */ + cdev->private->flags.pgid_unknown = 0; + verify_start(cdev); } /* - * Called from interrupt context to check if a valid answer - * to Set Path Group ID was received. + * Reset pathgroups and restart path verification, leave unusable paths out. */ -static int -__ccw_device_check_pgid(struct ccw_device *cdev) +static void pgid_wipeout_start(struct ccw_device *cdev) { - struct subchannel *sch; - struct irb *irb; - - sch = to_subchannel(cdev->dev.parent); - irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) - return -ETIME; - if (irb->esw.esw0.erw.cons) { - if (irb->ecw[0] & SNS0_CMD_REJECT) - return -EOPNOTSUPP; - /* Hmm, whatever happened, try again. */ - CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " - "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, irb->esw.esw0.erw.scnt, - irb->ecw[0], irb->ecw[1], - irb->ecw[2], irb->ecw[3], - irb->ecw[4], irb->ecw[5], - irb->ecw[6], irb->ecw[7]); - return -EAGAIN; - } - if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, - cdev->private->imask); - return -EACCES; - } - return 0; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", + id->ssid, id->devno, cdev->private->pgid_valid_mask, + cdev->private->path_noirq_mask); + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam; + req->callback = pgid_wipeout_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); } -static void -__ccw_device_verify_start(struct ccw_device *cdev) +/* + * Perform establish/resign SET PGID on a single path. + */ +static void spid_do(struct ccw_device *cdev) { - struct subchannel *sch; - __u8 imask, func; - int ret; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + u8 fn; - sch = to_subchannel(cdev->dev.parent); - while (sch->vpm != sch->lpm) { - /* Find first unequal bit in vpm vs. lpm */ - for (imask = 0x80; imask != 0; imask >>= 1) - if ((sch->vpm & imask) != (sch->lpm & imask)) - break; - cdev->private->imask = imask; - func = (sch->vpm & imask) ? - SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; - ret = __ccw_device_do_pgid(cdev, func); - if (ret == 0 || ret == -EBUSY) - return; - cdev->private->iretry = 5; + /* Use next available path that is not already in correct state. */ + req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); + if (!req->lpm) + goto out_nopath; + /* Channel program setup. */ + if (req->lpm & sch->opm) + fn = SPID_FUNC_ESTABLISH; + else + fn = SPID_FUNC_RESIGN; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); + return; + +out_nopath: + if (cdev->private->flags.pgid_unknown) { + /* At least one SPID could be partially done. */ + pgid_wipeout_start(cdev); + return; } - ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); + verify_done(cdev, sch->vpm ? 0 : -EACCES); } - + /* - * Got interrupt for Set Path Group ID. + * Process SET PGID request result for a single path. */ -void -ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) +static void spid_callback(struct ccw_device *cdev, void *data, int rc) { - struct subchannel *sch; - struct irb *irb; - int ret; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; - irb = (struct irb *) __LC_IRB; - /* Retry set pgid for cc=1. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) - __ccw_device_verify_start(cdev); - return; - } - if (ccw_device_accumulate_and_sense(cdev, irb) != 0) - return; - sch = to_subchannel(cdev->dev.parent); - ret = __ccw_device_check_pgid(cdev); - memset(&cdev->private->irb, 0, sizeof(struct irb)); - switch (ret) { - /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ + switch (rc) { case 0: - /* Establish or Resign Path Group done. Update vpm. */ - if ((sch->lpm & cdev->private->imask) != 0) - sch->vpm |= cdev->private->imask; - else - sch->vpm &= ~cdev->private->imask; - cdev->private->iretry = 5; - __ccw_device_verify_start(cdev); - break; - case -EOPNOTSUPP: - /* - * One of those strange devices which claim to be able - * to do multipathing but not for Set Path Group ID. - */ - if (cdev->private->flags.pgid_single) { - ccw_device_verify_done(cdev, -EOPNOTSUPP); - break; - } - cdev->private->flags.pgid_single = 1; - /* fall through. */ - case -EAGAIN: /* Try again. */ - __ccw_device_verify_start(cdev); + sch->vpm |= req->lpm & sch->opm; break; - case -ETIME: /* Set path group id stopped by timeout. */ - ccw_device_verify_done(cdev, -ETIME); + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; break; - case -EACCES: /* channel is not operational. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; - cdev->private->iretry = 5; - __ccw_device_verify_start(cdev); + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; + case -EOPNOTSUPP: + if (cdev->private->flags.mpath) { + /* Try without multipathing. */ + cdev->private->flags.mpath = 0; + goto out_restart; + } + /* Try without pathgrouping. */ + cdev->private->flags.pgroup = 0; + goto out_restart; + default: + goto err; + } + req->lpm >>= 1; + spid_do(cdev); + return; + +out_restart: + verify_start(cdev); + return; +err: + verify_done(cdev, rc); +} + +static void spid_start(struct ccw_device *cdev) +{ + struct ccw_request *req = &cdev->private->req; + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = 0x80; + req->singlepath = 1; + req->callback = spid_callback; + spid_do(cdev); +} + +static int pgid_is_reset(struct pgid *p) +{ + char *c; + + for (c = (char *)p + 1; c < (char *)(p + 1); c++) { + if (*c != 0) + return 0; } + return 1; } -void -ccw_device_verify_start(struct ccw_device *cdev) +static int pgid_cmp(struct pgid *p1, struct pgid *p2) { - cdev->private->flags.pgid_single = 0; - cdev->private->iretry = 5; - __ccw_device_verify_start(cdev); + return memcmp((char *) p1 + 1, (char *) p2 + 1, + sizeof(struct pgid) - 1); } -static void -__ccw_device_disband_start(struct ccw_device *cdev) +/* + * Determine pathgroup state from PGID data. + */ +static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, + int *mismatch, u8 *reserved, u8 *reset) { - struct subchannel *sch; - int ret; + struct pgid *pgid = &cdev->private->pgid[0]; + struct pgid *first = NULL; + int lpm; + int i; - sch = to_subchannel(cdev->dev.parent); - while (cdev->private->imask != 0) { - if (sch->lpm & cdev->private->imask) { - ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); - if (ret == 0) - return; + *mismatch = 0; + *reserved = 0; + *reset = 0; + for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) { + if ((cdev->private->pgid_valid_mask & lpm) == 0) + continue; + if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) + *reserved |= lpm; + if (pgid_is_reset(pgid)) { + *reset |= lpm; + continue; + } + if (!first) { + first = pgid; + continue; } - cdev->private->iretry = 5; - cdev->private->imask >>= 1; + if (pgid_cmp(pgid, first) != 0) + *mismatch = 1; } - ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); + if (!first) + first = &channel_subsystems[0]->global_pgid; + *p = first; +} + +static u8 pgid_to_donepm(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct pgid *pgid; + int i; + int lpm; + u8 donepm = 0; + + /* Set bits for paths which are already in the target state. */ + for (i = 0; i < 8; i++) { + lpm = 0x80 >> i; + if ((cdev->private->pgid_valid_mask & lpm) == 0) + continue; + pgid = &cdev->private->pgid[i]; + if (sch->opm & lpm) { + if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) + continue; + } else { + if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) + continue; + } + if (cdev->private->flags.mpath) { + if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) + continue; + } else { + if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) + continue; + } + donepm |= lpm; + } + + return donepm; +} + +static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) +{ + int i; + + for (i = 0; i < 8; i++) + memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); } /* - * Got interrupt for Unset Path Group ID. + * Process SENSE PGID data and report result. */ -void -ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) +static void snid_done(struct ccw_device *cdev, int rc) { - struct subchannel *sch; - struct irb *irb; - int ret; + struct ccw_dev_id *id = &cdev->private->dev_id; + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct pgid *pgid; + int mismatch = 0; + u8 reserved = 0; + u8 reset = 0; + u8 donepm; - irb = (struct irb *) __LC_IRB; - /* Retry set pgid for cc=1. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) - __ccw_device_disband_start(cdev); - return; + if (rc) + goto out; + pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); + if (reserved == cdev->private->pgid_valid_mask) + rc = -EUSERS; + else if (mismatch) + rc = -EOPNOTSUPP; + else { + donepm = pgid_to_donepm(cdev); + sch->vpm = donepm & sch->opm; + cdev->private->pgid_reset_mask |= reset; + cdev->private->pgid_todo_mask &= + ~(donepm | cdev->private->path_noirq_mask); + pgid_fill(cdev, pgid); } - if (ccw_device_accumulate_and_sense(cdev, irb) != 0) - return; - sch = to_subchannel(cdev->dev.parent); - ret = __ccw_device_check_pgid(cdev); - memset(&cdev->private->irb, 0, sizeof(struct irb)); - switch (ret) { - /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ - case 0: /* disband successful. */ - sch->vpm = 0; - ccw_device_disband_done(cdev, ret); +out: + CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, + id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, + cdev->private->pgid_todo_mask, mismatch, reserved, reset); + switch (rc) { + case 0: + if (cdev->private->flags.pgid_unknown) { + pgid_wipeout_start(cdev); + return; + } + /* Anything left to do? */ + if (cdev->private->pgid_todo_mask == 0) { + verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); + return; + } + /* Perform path-grouping. */ + spid_start(cdev); break; case -EOPNOTSUPP: - /* - * One of those strange devices which claim to be able - * to do multipathing but not for Unset Path Group ID. - */ - cdev->private->flags.pgid_single = 1; - /* fall through. */ - case -EAGAIN: /* Try again. */ - __ccw_device_disband_start(cdev); + /* Path-grouping not supported. */ + cdev->private->flags.pgroup = 0; + cdev->private->flags.mpath = 0; + verify_start(cdev); + break; + default: + verify_done(cdev, rc); + } +} + +/* + * Create channel program to perform a SENSE PGID on a single path. + */ +static void snid_build_cp(struct ccw_device *cdev) +{ + struct ccw_request *req = &cdev->private->req; + struct ccw1 *cp = cdev->private->iccws; + int i = 8 - ffs(req->lpm); + + /* Channel program setup. */ + cp->cmd_code = CCW_CMD_SENSE_PGID; + cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; + cp->count = sizeof(struct pgid); + cp->flags = CCW_FLAG_SLI; + req->cp = cp; +} + +/* + * Perform SENSE PGID on a single path. + */ +static void snid_do(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + int ret; + + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & + ~cdev->private->path_noirq_mask); + if (!req->lpm) + goto out_nopath; + snid_build_cp(cdev); + ccw_request_start(cdev); + return; + +out_nopath: + if (cdev->private->pgid_valid_mask) + ret = 0; + else if (cdev->private->path_noirq_mask) + ret = -ETIME; + else + ret = -EACCES; + snid_done(cdev, ret); +} + +/* + * Process SENSE PGID request result for single path. + */ +static void snid_callback(struct ccw_device *cdev, void *data, int rc) +{ + struct ccw_request *req = &cdev->private->req; + + switch (rc) { + case 0: + cdev->private->pgid_valid_mask |= req->lpm; break; - case -ETIME: /* Set path group id stopped by timeout. */ - ccw_device_disband_done(cdev, -ETIME); + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; break; - case -EACCES: /* channel is not operational. */ - cdev->private->imask >>= 1; - cdev->private->iretry = 5; - __ccw_device_disband_start(cdev); + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; + default: + goto err; } + /* Continue on the next path. */ + req->lpm >>= 1; + snid_do(cdev); + return; + +err: + snid_done(cdev, rc); } -void -ccw_device_disband_start(struct ccw_device *cdev) +/* + * Perform path verification. + */ +static void verify_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + struct ccw_dev_id *devid = &cdev->private->dev_id; + + sch->vpm = 0; + sch->lpm = sch->schib.pmcw.pam; + + /* Initialize PGID data. */ + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + cdev->private->pgid_valid_mask = 0; + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; + cdev->private->path_notoper_mask = 0; + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = 0x80; + req->singlepath = 1; + if (cdev->private->flags.pgroup) { + CIO_TRACE_EVENT(4, "snid"); + CIO_HEX_EVENT(4, devid, sizeof(*devid)); + req->callback = snid_callback; + snid_do(cdev); + } else { + CIO_TRACE_EVENT(4, "nop"); + CIO_HEX_EVENT(4, devid, sizeof(*devid)); + req->filter = nop_filter; + req->callback = nop_callback; + nop_do(cdev); + } +} + +/** + * ccw_device_verify_start - perform path verification + * @cdev: ccw device + * + * Perform an I/O on each available channel path to @cdev to determine which + * paths are operational. The resulting path mask is stored in sch->vpm. + * If device options specify pathgrouping, establish a pathgroup for the + * operational paths. When finished, call ccw_device_verify_done with a + * return code specifying the result. + */ +void ccw_device_verify_start(struct ccw_device *cdev) { - cdev->private->flags.pgid_single = 0; - cdev->private->iretry = 5; - cdev->private->imask = 0x80; - __ccw_device_disband_start(cdev); + CIO_TRACE_EVENT(4, "vrfy"); + CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); + /* + * Initialize pathgroup and multipath state with target values. + * They may change in the course of path verification. + */ + cdev->private->flags.pgroup = cdev->private->options.pgroup; + cdev->private->flags.mpath = cdev->private->options.mpath; + cdev->private->flags.doverify = 0; + cdev->private->path_noirq_mask = 0; + verify_start(cdev); } + +/* + * Process disband SET PGID request result. + */ +static void disband_callback(struct ccw_device *cdev, void *data, int rc) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + + if (rc) + goto out; + /* Ensure consistent multipathing state at device and channel. */ + cdev->private->flags.mpath = 0; + if (sch->config.mp) { + sch->config.mp = 0; + rc = cio_commit_config(sch); + } +out: + CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno, + rc); + ccw_device_disband_done(cdev, rc); +} + +/** + * ccw_device_disband_start - disband pathgroup + * @cdev: ccw device + * + * Execute a SET PGID channel program on @cdev to disband a previously + * established pathgroup. When finished, call ccw_device_disband_done with + * a return code specifying the result. + */ +void ccw_device_disband_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_TRACE_EVENT(4, "disb"); + CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); + /* Request setup. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam & sch->opm; + req->singlepath = 1; + req->callback = disband_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); +} + +static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) +{ + struct ccw_request *req = &cdev->private->req; + struct ccw1 *cp = cdev->private->iccws; + + cp[0].cmd_code = CCW_CMD_STLCK; + cp[0].cda = (u32) (addr_t) buf1; + cp[0].count = 32; + cp[0].flags = CCW_FLAG_CC; + cp[1].cmd_code = CCW_CMD_RELEASE; + cp[1].cda = (u32) (addr_t) buf2; + cp[1].count = 32; + cp[1].flags = 0; + req->cp = cp; +} + +static void stlck_callback(struct ccw_device *cdev, void *data, int rc) +{ + ccw_device_stlck_done(cdev, data, rc); +} + +/** + * ccw_device_stlck_start - perform unconditional release + * @cdev: ccw device + * @data: data pointer to be passed to ccw_device_stlck_done + * @buf1: data pointer used in channel program + * @buf2: data pointer used in channel program + * + * Execute a channel program on @cdev to release an existing PGID reservation. + * When finished, call ccw_device_stlck_done with a return code specifying the + * result. + */ +void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1, + void *buf2) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_request *req = &cdev->private->req; + + CIO_TRACE_EVENT(4, "stlck"); + CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); + /* Request setup. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam & sch->opm; + req->data = data; + req->callback = stlck_callback; + stlck_build_cp(cdev, buf1, buf2); + ccw_request_start(cdev); +} + diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 12a24d4331a..15b56a15db1 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -1,15 +1,11 @@ /* - * drivers/s390/cio/device_status.c - * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation - * Author(s): Cornelia Huck(cohuck@de.ibm.com) + * Copyright IBM Corp. 2002 + * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * * Status accumulation and basic sense functions. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -21,33 +17,31 @@ #include "css.h" #include "device.h" #include "ioasm.h" +#include "io_sch.h" /* * Check for any kind of channel or interface control check but don't * issue the message for the console device */ -static inline void +static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { - if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | - SCHN_STAT_CHN_CTRL_CHK | - SCHN_STAT_INTF_CTRL_CHK))) + char dbf_text[15]; + + if (!scsw_is_valid_cstat(&irb->scsw) || + !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | + SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) return; - CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " "received" - " ... device %04X on subchannel %04X, dev_stat " + " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->devno, cdev->private->irq, - irb->scsw.dstat, irb->scsw.cstat); - - if (irb->scsw.cc != 3) { - char dbf_text[15]; - - sprintf(dbf_text, "chk%x", cdev->private->irq); - CIO_TRACE_EVENT(0, dbf_text); - CIO_HEX_EVENT(0, irb, sizeof (struct irb)); - } + cdev->private->dev_id.devno, cdev->private->schid.ssid, + cdev->private->schid.sch_no, + scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); + sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + CIO_TRACE_EVENT(0, dbf_text); + CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } /* @@ -59,21 +53,23 @@ ccw_device_path_notoper(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - stsch (sch->irq, &sch->schib); + if (cio_update_schib(sch)) + goto doverify; - CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " - "not operational \n", __FUNCTION__, sch->irq, + CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " + "not operational \n", __func__, + sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; - if (cdev->private->options.pgroup) - cdev->private->flags.doverify = 1; +doverify: + cdev->private->flags.doverify = 1; } /* * Copy valid bits from the extended control word to device irb. */ -static inline void +static void ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) { /* @@ -81,12 +77,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) * are condition that have to be met for the extended control * bit to have meaning. Sick. */ - cdev->private->irb.scsw.ectl = 0; - if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && - !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) - cdev->private->irb.scsw.ectl = irb->scsw.ectl; + cdev->private->irb.scsw.cmd.ectl = 0; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && + !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) + cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; /* Check if extended control word is valid. */ - if (!cdev->private->irb.scsw.ectl) + if (!cdev->private->irb.scsw.cmd.ectl) return; /* Copy concurrent sense / model dependent information. */ memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); @@ -95,14 +91,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) /* * Check if extended status word is valid. */ -static inline int +static int ccw_device_accumulate_esw_valid(struct irb *irb) { - if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) + if (!irb->scsw.cmd.eswf && + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) return 0; - if (irb->scsw.stctl == - (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && - !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) + if (irb->scsw.cmd.stctl == + (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && + !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return 0; return 1; } @@ -110,7 +107,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb) /* * Copy valid bits from the extended status word to device irb. */ -static inline void +static void ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) { struct irb *cdev_irb; @@ -125,7 +122,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; /* Copy subchannel logout information if esw is of format 0. */ - if (irb->scsw.eswf) { + if (irb->scsw.cmd.eswf) { cdev_sublog = &cdev_irb->esw.esw0.sublog; sublog = &irb->esw.esw0.sublog; /* Copy extended status flags. */ @@ -134,7 +131,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) * Copy fields that have a meaning for channel data check * channel control check and interface control check. */ - if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | + if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)) { /* Copy ancillary report bit. */ @@ -155,7 +152,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) /* Copy i/o-error alert. */ cdev_sublog->ioerr = sublog->ioerr; /* Copy channel path timeout bit. */ - if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) + if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; /* Copy failing storage address validity flag. */ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; @@ -179,7 +176,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; /* Copy path verification required flag. */ cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; - if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) + if (irb->esw.esw0.erw.pvrf) cdev->private->flags.doverify = 1; /* Copy concurrent sense bit. */ cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; @@ -200,77 +197,90 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - + /* No irb accumulation for transport mode irbs. */ + if (scsw_is_tm(&irb->scsw)) { + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + return; + } /* * Don't accumulate unsolicited interrupts. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) + if (!scsw_is_solicited(&irb->scsw)) return; cdev_irb = &cdev->private->irb; + /* + * If the clear function had been performed, all formerly pending + * status at the subchannel has been cleared and we must not pass + * intermediate accumulated status to the device driver. + */ + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) + memset(&cdev->private->irb, 0, sizeof(struct irb)); + /* Copy bits which are valid only for the start function. */ - if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { + if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { /* Copy key. */ - cdev_irb->scsw.key = irb->scsw.key; + cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; /* Copy suspend control bit. */ - cdev_irb->scsw.sctl = irb->scsw.sctl; + cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; /* Accumulate deferred condition code. */ - cdev_irb->scsw.cc |= irb->scsw.cc; + cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; /* Copy ccw format bit. */ - cdev_irb->scsw.fmt = irb->scsw.fmt; + cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; /* Copy prefetch bit. */ - cdev_irb->scsw.pfch = irb->scsw.pfch; + cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; /* Copy initial-status-interruption-control. */ - cdev_irb->scsw.isic = irb->scsw.isic; + cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; /* Copy address limit checking control. */ - cdev_irb->scsw.alcc = irb->scsw.alcc; + cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; /* Copy suppress suspend bit. */ - cdev_irb->scsw.ssi = irb->scsw.ssi; + cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; } /* Take care of the extended control bit and extended control word. */ ccw_device_accumulate_ecw(cdev, irb); /* Accumulate function control. */ - cdev_irb->scsw.fctl |= irb->scsw.fctl; + cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; /* Copy activity control. */ - cdev_irb->scsw.actl= irb->scsw.actl; + cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; /* Accumulate status control. */ - cdev_irb->scsw.stctl |= irb->scsw.stctl; + cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; /* * Copy ccw address if it is valid. This is a bit simplified * but should be close enough for all practical purposes. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || - ((irb->scsw.stctl == + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || + ((irb->scsw.cmd.stctl == (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && - (irb->scsw.actl & SCSW_ACTL_DEVACT) && - (irb->scsw.actl & SCSW_ACTL_SCHACT)) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) - cdev_irb->scsw.cpa = irb->scsw.cpa; + (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && + (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || + (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) + cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; /* Accumulate device status, but not the device busy flag. */ - cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; - cdev_irb->scsw.dstat |= irb->scsw.dstat; + cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; + /* dstat is not always valid. */ + if (irb->scsw.cmd.stctl & + (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS + | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) + cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; /* Accumulate subchannel status. */ - cdev_irb->scsw.cstat |= irb->scsw.cstat; + cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; /* Copy residual count if it is valid. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) - cdev_irb->scsw.count = irb->scsw.count; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) + == 0) + cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; /* Take care of bits in the extended status word. */ ccw_device_accumulate_esw(cdev, irb); @@ -287,7 +297,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * sense facility available/supported when enabling the * concurrent sense facility. */ - if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !(cdev_irb->esw.esw0.erw.cons)) cdev->private->flags.dosense = 1; } @@ -299,11 +309,13 @@ int ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) { struct subchannel *sch; + struct ccw1 *sense_ccw; + int rc; sch = to_subchannel(cdev->dev.parent); /* A sense is required, can we do it now ? */ - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) /* * we received an Unit Check but we have no final * status yet, therefore we must delay the SENSE @@ -315,13 +327,16 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) /* * We have ending status but no sense information. Do a basic sense. */ - sch = to_subchannel(cdev->dev.parent); - sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; - sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); - sch->sense_ccw.count = SENSE_MAX_COUNT; - sch->sense_ccw.flags = CCW_FLAG_SLI; - - return cio_start (sch, &sch->sense_ccw, 0xff); + sense_ccw = &to_io_private(sch)->sense_ccw; + sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; + sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw); + sense_ccw->count = SENSE_MAX_COUNT; + sense_ccw->flags = CCW_FLAG_SLI; + + rc = cio_start(sch, sense_ccw, 0xff); + if (rc == -ENODEV || rc == -EACCES) + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + return rc; } /* @@ -335,26 +350,24 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && - (irb->scsw.dstat & DEV_STAT_CHN_END)) { + if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; } /* Check if path verification is required. */ if (ccw_device_accumulate_esw_valid(irb) && - irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) + irb->esw.esw0.erw.pvrf) cdev->private->flags.doverify = 1; } @@ -366,11 +379,11 @@ int ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) { ccw_device_accumulate_irb(cdev, irb); - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) return -EBUSY; /* Check for basic sense. */ if (cdev->private->flags.dosense && - !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { + !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; return 0; diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c new file mode 100644 index 00000000000..c4f7bf3e24c --- /dev/null +++ b/drivers/s390/cio/eadm_sch.c @@ -0,0 +1,418 @@ +/* + * Driver for s390 eadm subchannels + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/kernel_stat.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/list.h> + +#include <asm/css_chars.h> +#include <asm/debug.h> +#include <asm/isc.h> +#include <asm/cio.h> +#include <asm/scsw.h> +#include <asm/eadm.h> + +#include "eadm_sch.h" +#include "ioasm.h" +#include "cio.h" +#include "css.h" +#include "orb.h" + +MODULE_DESCRIPTION("driver for s390 eadm subchannels"); +MODULE_LICENSE("GPL"); + +#define EADM_TIMEOUT (5 * HZ) +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(eadm_list); + +static debug_info_t *eadm_debug; + +#define EADM_LOG(imp, txt) do { \ + debug_text_event(eadm_debug, imp, txt); \ + } while (0) + +static void EADM_LOG_HEX(int level, void *data, int length) +{ + if (!debug_level_enabled(eadm_debug, level)) + return; + while (length > 0) { + debug_event(eadm_debug, level, data, length); + length -= eadm_debug->buf_size; + data += eadm_debug->buf_size; + } +} + +static void orb_init(union orb *orb) +{ + memset(orb, 0, sizeof(union orb)); + orb->eadm.compat1 = 1; + orb->eadm.compat2 = 1; + orb->eadm.fmt = 1; + orb->eadm.x = 1; +} + +static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob) +{ + union orb *orb = &get_eadm_private(sch)->orb; + int cc; + + orb_init(orb); + orb->eadm.aob = (u32)__pa(aob); + orb->eadm.intparm = (u32)(addr_t)sch; + orb->eadm.key = PAGE_DEFAULT_KEY >> 4; + + EADM_LOG(6, "start"); + EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid)); + + cc = ssch(sch->schid, orb); + switch (cc) { + case 0: + sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND; + break; + case 1: /* status pending */ + case 2: /* busy */ + return -EBUSY; + case 3: /* not operational */ + return -ENODEV; + } + return 0; +} + +static int eadm_subchannel_clear(struct subchannel *sch) +{ + int cc; + + cc = csch(sch->schid); + if (cc) + return -ENODEV; + + sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND; + return 0; +} + +static void eadm_subchannel_timeout(unsigned long data) +{ + struct subchannel *sch = (struct subchannel *) data; + + spin_lock_irq(sch->lock); + EADM_LOG(1, "timeout"); + EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid)); + if (eadm_subchannel_clear(sch)) + EADM_LOG(0, "clear failed"); + spin_unlock_irq(sch->lock); +} + +static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) +{ + struct eadm_private *private = get_eadm_private(sch); + + if (expires == 0) { + del_timer(&private->timer); + return; + } + if (timer_pending(&private->timer)) { + if (mod_timer(&private->timer, jiffies + expires)) + return; + } + private->timer.function = eadm_subchannel_timeout; + private->timer.data = (unsigned long) sch; + private->timer.expires = jiffies + expires; + add_timer(&private->timer); +} + +static void eadm_subchannel_irq(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + struct eadm_scsw *scsw = &sch->schib.scsw.eadm; + struct irb *irb = &__get_cpu_var(cio_irb); + int error = 0; + + EADM_LOG(6, "irq"); + EADM_LOG_HEX(6, irb, sizeof(*irb)); + + inc_irq_stat(IRQIO_ADM); + + if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) + && scsw->eswf == 1 && irb->esw.eadm.erw.r) + error = -EIO; + + if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC) + error = -ETIMEDOUT; + + eadm_subchannel_set_timeout(sch, 0); + + if (private->state != EADM_BUSY) { + EADM_LOG(1, "irq unsol"); + EADM_LOG_HEX(1, irb, sizeof(*irb)); + private->state = EADM_NOT_OPER; + css_sched_sch_todo(sch, SCH_TODO_EVAL); + return; + } + scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error); + private->state = EADM_IDLE; + + if (private->completion) + complete(private->completion); +} + +static struct subchannel *eadm_get_idle_sch(void) +{ + struct eadm_private *private; + struct subchannel *sch; + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + list_for_each_entry(private, &eadm_list, head) { + sch = private->sch; + spin_lock(sch->lock); + if (private->state == EADM_IDLE) { + private->state = EADM_BUSY; + list_move_tail(&private->head, &eadm_list); + spin_unlock(sch->lock); + spin_unlock_irqrestore(&list_lock, flags); + + return sch; + } + spin_unlock(sch->lock); + } + spin_unlock_irqrestore(&list_lock, flags); + + return NULL; +} + +int eadm_start_aob(struct aob *aob) +{ + struct eadm_private *private; + struct subchannel *sch; + unsigned long flags; + int ret; + + sch = eadm_get_idle_sch(); + if (!sch) + return -EBUSY; + + spin_lock_irqsave(sch->lock, flags); + eadm_subchannel_set_timeout(sch, EADM_TIMEOUT); + ret = eadm_subchannel_start(sch, aob); + if (!ret) + goto out_unlock; + + /* Handle start subchannel failure. */ + eadm_subchannel_set_timeout(sch, 0); + private = get_eadm_private(sch); + private->state = EADM_NOT_OPER; + css_sched_sch_todo(sch, SCH_TODO_EVAL); + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(eadm_start_aob); + +static int eadm_subchannel_probe(struct subchannel *sch) +{ + struct eadm_private *private; + int ret; + + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); + if (!private) + return -ENOMEM; + + INIT_LIST_HEAD(&private->head); + init_timer(&private->timer); + + spin_lock_irq(sch->lock); + set_eadm_private(sch, private); + private->state = EADM_IDLE; + private->sch = sch; + sch->isc = EADM_SCH_ISC; + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (ret) { + set_eadm_private(sch, NULL); + spin_unlock_irq(sch->lock); + kfree(private); + goto out; + } + spin_unlock_irq(sch->lock); + + spin_lock_irq(&list_lock); + list_add(&private->head, &eadm_list); + spin_unlock_irq(&list_lock); + + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } +out: + return ret; +} + +static void eadm_quiesce(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + DECLARE_COMPLETION_ONSTACK(completion); + int ret; + + spin_lock_irq(sch->lock); + if (private->state != EADM_BUSY) + goto disable; + + if (eadm_subchannel_clear(sch)) + goto disable; + + private->completion = &completion; + spin_unlock_irq(sch->lock); + + wait_for_completion_io(&completion); + + spin_lock_irq(sch->lock); + private->completion = NULL; + +disable: + eadm_subchannel_set_timeout(sch, 0); + do { + ret = cio_disable_subchannel(sch); + } while (ret == -EBUSY); + + spin_unlock_irq(sch->lock); +} + +static int eadm_subchannel_remove(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + + spin_lock_irq(&list_lock); + list_del(&private->head); + spin_unlock_irq(&list_lock); + + eadm_quiesce(sch); + + spin_lock_irq(sch->lock); + set_eadm_private(sch, NULL); + spin_unlock_irq(sch->lock); + + kfree(private); + + return 0; +} + +static void eadm_subchannel_shutdown(struct subchannel *sch) +{ + eadm_quiesce(sch); +} + +static int eadm_subchannel_freeze(struct subchannel *sch) +{ + return cio_disable_subchannel(sch); +} + +static int eadm_subchannel_restore(struct subchannel *sch) +{ + return cio_enable_subchannel(sch, (u32)(unsigned long)sch); +} + +/** + * eadm_subchannel_sch_event - process subchannel event + * @sch: subchannel + * @process: non-zero if function is called in process context + * + * An unspecified event occurred for this subchannel. Adjust data according + * to the current operational state of the subchannel. Return zero when the + * event has been handled sufficiently or -EAGAIN when this function should + * be called again in process context. + */ +static int eadm_subchannel_sch_event(struct subchannel *sch, int process) +{ + struct eadm_private *private; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(sch->lock, flags); + if (!device_is_registered(&sch->dev)) + goto out_unlock; + + if (work_pending(&sch->todo_work)) + goto out_unlock; + + if (cio_update_schib(sch)) { + css_sched_sch_todo(sch, SCH_TODO_UNREG); + goto out_unlock; + } + private = get_eadm_private(sch); + if (private->state == EADM_NOT_OPER) + private->state = EADM_IDLE; + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); + + return ret; +} + +static struct css_device_id eadm_subchannel_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, eadm_subchannel_ids); + +static struct css_driver eadm_subchannel_driver = { + .drv = { + .name = "eadm_subchannel", + .owner = THIS_MODULE, + }, + .subchannel_type = eadm_subchannel_ids, + .irq = eadm_subchannel_irq, + .probe = eadm_subchannel_probe, + .remove = eadm_subchannel_remove, + .shutdown = eadm_subchannel_shutdown, + .sch_event = eadm_subchannel_sch_event, + .freeze = eadm_subchannel_freeze, + .thaw = eadm_subchannel_restore, + .restore = eadm_subchannel_restore, +}; + +static int __init eadm_sch_init(void) +{ + int ret; + + if (!css_general_characteristics.eadm) + return -ENXIO; + + eadm_debug = debug_register("eadm_log", 16, 1, 16); + if (!eadm_debug) + return -ENOMEM; + + debug_register_view(eadm_debug, &debug_hex_ascii_view); + debug_set_level(eadm_debug, 2); + + isc_register(EADM_SCH_ISC); + ret = css_driver_register(&eadm_subchannel_driver); + if (ret) + goto cleanup; + + return ret; + +cleanup: + isc_unregister(EADM_SCH_ISC); + debug_unregister(eadm_debug); + return ret; +} + +static void __exit eadm_sch_exit(void) +{ + css_driver_unregister(&eadm_subchannel_driver); + isc_unregister(EADM_SCH_ISC); + debug_unregister(eadm_debug); +} +module_init(eadm_sch_init); +module_exit(eadm_sch_exit); diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h new file mode 100644 index 00000000000..9664e4653f9 --- /dev/null +++ b/drivers/s390/cio/eadm_sch.h @@ -0,0 +1,22 @@ +#ifndef EADM_SCH_H +#define EADM_SCH_H + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/timer.h> +#include <linux/list.h> +#include "orb.h" + +struct eadm_private { + union orb orb; + enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state; + struct completion *completion; + struct subchannel *sch; + struct timer_list timer; + struct list_head head; +} __aligned(8); + +#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev)) +#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p)) + +#endif diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c new file mode 100644 index 00000000000..ca5e9bb9d45 --- /dev/null +++ b/drivers/s390/cio/fcx.c @@ -0,0 +1,350 @@ +/* + * Functions for assembling fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include "cio.h" + +/** + * tcw_get_intrg - return pointer to associated interrogate tcw + * @tcw: pointer to the original tcw + * + * Return a pointer to the interrogate tcw associated with the specified tcw + * or %NULL if there is no associated interrogate tcw. + */ +struct tcw *tcw_get_intrg(struct tcw *tcw) +{ + return (struct tcw *) ((addr_t) tcw->intrg); +} +EXPORT_SYMBOL(tcw_get_intrg); + +/** + * tcw_get_data - return pointer to input/output data associated with tcw + * @tcw: pointer to the tcw + * + * Return the input or output data address specified in the tcw depending + * on whether the r-bit or the w-bit is set. If neither bit is set, return + * %NULL. + */ +void *tcw_get_data(struct tcw *tcw) +{ + if (tcw->r) + return (void *) ((addr_t) tcw->input); + if (tcw->w) + return (void *) ((addr_t) tcw->output); + return NULL; +} +EXPORT_SYMBOL(tcw_get_data); + +/** + * tcw_get_tccb - return pointer to tccb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tccb associated with this tcw. + */ +struct tccb *tcw_get_tccb(struct tcw *tcw) +{ + return (struct tccb *) ((addr_t) tcw->tccb); +} +EXPORT_SYMBOL(tcw_get_tccb); + +/** + * tcw_get_tsb - return pointer to tsb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tsb associated with this tcw. + */ +struct tsb *tcw_get_tsb(struct tcw *tcw) +{ + return (struct tsb *) ((addr_t) tcw->tsb); +} +EXPORT_SYMBOL(tcw_get_tsb); + +/** + * tcw_init - initialize tcw data structure + * @tcw: pointer to the tcw to be initialized + * @r: initial value of the r-bit + * @w: initial value of the w-bit + * + * Initialize all fields of the specified tcw data structure with zero and + * fill in the format, flags, r and w fields. + */ +void tcw_init(struct tcw *tcw, int r, int w) +{ + memset(tcw, 0, sizeof(struct tcw)); + tcw->format = TCW_FORMAT_DEFAULT; + tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); + if (r) + tcw->r = 1; + if (w) + tcw->w = 1; +} +EXPORT_SYMBOL(tcw_init); + +static inline size_t tca_size(struct tccb *tccb) +{ + return tccb->tcah.tcal - 12; +} + +static u32 calc_dcw_count(struct tccb *tccb) +{ + int offset; + struct dcw *dcw; + u32 count = 0; + size_t size; + + size = tca_size(tccb); + for (offset = 0; offset < size;) { + dcw = (struct dcw *) &tccb->tca[offset]; + count += dcw->count; + if (!(dcw->flags & DCW_FLAGS_CC)) + break; + offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); + } + return count; +} + +static u32 calc_cbc_size(struct tidaw *tidaw, int num) +{ + int i; + u32 cbc_data; + u32 cbc_count = 0; + u64 data_count = 0; + + for (i = 0; i < num; i++) { + if (tidaw[i].flags & TIDAW_FLAGS_LAST) + break; + /* TODO: find out if padding applies to total of data + * transferred or data transferred by this tidaw. Assumption: + * applies to total. */ + data_count += tidaw[i].count; + if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { + cbc_data = 4 + ALIGN(data_count, 4) - data_count; + cbc_count += cbc_data; + data_count += cbc_data; + } + } + return cbc_count; +} + +/** + * tcw_finalize - finalize tcw length fields and tidaw list + * @tcw: pointer to the tcw + * @num_tidaws: the number of tidaws used to address input/output data or zero + * if no tida is used + * + * Calculate the input-/output-count and tccbl field in the tcw, add a + * tcat the tccb and terminate the data tidaw list if used. + * + * Note: in case input- or output-tida is used, the tidaw-list must be stored + * in contiguous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void tcw_finalize(struct tcw *tcw, int num_tidaws) +{ + struct tidaw *tidaw; + struct tccb *tccb; + struct tccb_tcat *tcat; + u32 count; + + /* Terminate tidaw list. */ + tidaw = tcw_get_data(tcw); + if (num_tidaws > 0) + tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; + /* Add tcat to tccb. */ + tccb = tcw_get_tccb(tcw); + tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; + memset(tcat, 0, sizeof(*tcat)); + /* Calculate tcw input/output count and tcat transport count. */ + count = calc_dcw_count(tccb); + if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) + count += calc_cbc_size(tidaw, num_tidaws); + if (tcw->r) + tcw->input_count = count; + else if (tcw->w) + tcw->output_count = count; + tcat->count = ALIGN(count, 4) + 4; + /* Calculate tccbl. */ + tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + + sizeof(struct tccb_tcat) - 20) >> 2; +} +EXPORT_SYMBOL(tcw_finalize); + +/** + * tcw_set_intrg - set the interrogate tcw address of a tcw + * @tcw: the tcw address + * @intrg_tcw: the address of the interrogate tcw + * + * Set the address of the interrogate tcw in the specified tcw. + */ +void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) +{ + tcw->intrg = (u32) ((addr_t) intrg_tcw); +} +EXPORT_SYMBOL(tcw_set_intrg); + +/** + * tcw_set_data - set data address and tida flag of a tcw + * @tcw: the tcw address + * @data: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of a tcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) +{ + if (tcw->r) { + tcw->input = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_INPUT_TIDA; + } else if (tcw->w) { + tcw->output = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; + } +} +EXPORT_SYMBOL(tcw_set_data); + +/** + * tcw_set_tccb - set tccb address of a tcw + * @tcw: the tcw address + * @tccb: the tccb address + * + * Set the address of the tccb in the specified tcw. + */ +void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) +{ + tcw->tccb = (u64) ((addr_t) tccb); +} +EXPORT_SYMBOL(tcw_set_tccb); + +/** + * tcw_set_tsb - set tsb address of a tcw + * @tcw: the tcw address + * @tsb: the tsb address + * + * Set the address of the tsb in the specified tcw. + */ +void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) +{ + tcw->tsb = (u64) ((addr_t) tsb); +} +EXPORT_SYMBOL(tcw_set_tsb); + +/** + * tccb_init - initialize tccb + * @tccb: the tccb address + * @size: the maximum size of the tccb + * @sac: the service-action-code to be user + * + * Initialize the header of the specified tccb by resetting all values to zero + * and filling in defaults for format, sac and initial tcal fields. + */ +void tccb_init(struct tccb *tccb, size_t size, u32 sac) +{ + memset(tccb, 0, size); + tccb->tcah.format = TCCB_FORMAT_DEFAULT; + tccb->tcah.sac = sac; + tccb->tcah.tcal = 12; +} +EXPORT_SYMBOL(tccb_init); + +/** + * tsb_init - initialize tsb + * @tsb: the tsb address + * + * Initialize the specified tsb by resetting all values to zero. + */ +void tsb_init(struct tsb *tsb) +{ + memset(tsb, 0, sizeof(*tsb)); +} +EXPORT_SYMBOL(tsb_init); + +/** + * tccb_add_dcw - add a dcw to the tccb + * @tccb: the tccb address + * @tccb_size: the maximum tccb size + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: pointer to control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified tccb by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space as defined by @tccb_size. + * + * Note: the tcal field of the tccb header will be updates to reflect added + * content. + */ +struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, + void *cd, u8 cd_count, u32 count) +{ + struct dcw *dcw; + int size; + int tca_offset; + + /* Check for space. */ + tca_offset = tca_size(tccb); + size = ALIGN(sizeof(struct dcw) + cd_count, 4); + if (sizeof(struct tccb_tcah) + tca_offset + size + + sizeof(struct tccb_tcat) > tccb_size) + return ERR_PTR(-ENOSPC); + /* Add dcw to tca. */ + dcw = (struct dcw *) &tccb->tca[tca_offset]; + memset(dcw, 0, size); + dcw->cmd = cmd; + dcw->flags = flags; + dcw->count = count; + dcw->cd_count = cd_count; + if (cd) + memcpy(&dcw->cd[0], cd, cd_count); + tccb->tcah.tcal += size; + return dcw; +} +EXPORT_SYMBOL(tccb_add_dcw); + +/** + * tcw_add_tidaw - add a tidaw to a tcw + * @tcw: the tcw address + * @num_tidaws: the current number of tidaws + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified tcw + * (depending on the value of the r-flag and w-flag) and return a pointer to + * the new tidaw. + * + * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller + * must ensure that there is enough space for the new tidaw. The last-tidaw + * flag for the last tidaw in the list will be set by tcw_finalize. + */ +struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, + void *addr, u32 count) +{ + struct tidaw *tidaw; + + /* Add tidaw to tidaw-list. */ + tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; + memset(tidaw, 0, sizeof(struct tidaw)); + tidaw->flags = flags; + tidaw->count = count; + tidaw->addr = (u64) ((addr_t) addr); + return tidaw; +} +EXPORT_SYMBOL(tcw_add_tidaw); diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 00000000000..5a999084a22 --- /dev/null +++ b/drivers/s390/cio/idset.c @@ -0,0 +1,131 @@ +/* + * Copyright IBM Corp. 2007, 2012 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/vmalloc.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include "idset.h" +#include "css.h" + +struct idset { + int num_ssid; + int num_id; + unsigned long bitmap[0]; +}; + +static inline unsigned long bitmap_size(int num_ssid, int num_id) +{ + return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); +} + +static struct idset *idset_new(int num_ssid, int num_id) +{ + struct idset *set; + + set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); + if (set) { + set->num_ssid = num_ssid; + set->num_id = num_id; + memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); + } + return set; +} + +void idset_free(struct idset *set) +{ + vfree(set); +} + +void idset_clear(struct idset *set) +{ + memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); +} + +void idset_fill(struct idset *set) +{ + memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); +} + +static inline void idset_add(struct idset *set, int ssid, int id) +{ + set_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline void idset_del(struct idset *set, int ssid, int id) +{ + clear_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline int idset_contains(struct idset *set, int ssid, int id) +{ + return test_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline int idset_get_first(struct idset *set, int *ssid, int *id) +{ + int bitnum; + + bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); + if (bitnum >= set->num_ssid * set->num_id) + return 0; + *ssid = bitnum / set->num_id; + *id = bitnum % set->num_id; + return 1; +} + +struct idset *idset_sch_new(void) +{ + return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1); +} + +void idset_sch_add(struct idset *set, struct subchannel_id schid) +{ + idset_add(set, schid.ssid, schid.sch_no); +} + +void idset_sch_del(struct idset *set, struct subchannel_id schid) +{ + idset_del(set, schid.ssid, schid.sch_no); +} + +/* Clear ids starting from @schid up to end of subchannel set. */ +void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid) +{ + int pos = schid.ssid * set->num_id + schid.sch_no; + + bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no); +} + +int idset_sch_contains(struct idset *set, struct subchannel_id schid) +{ + return idset_contains(set, schid.ssid, schid.sch_no); +} + +int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) +{ + int ssid = 0; + int id = 0; + int rc; + + rc = idset_get_first(set, &ssid, &id); + if (rc) { + init_subchannel_id(schid); + schid->ssid = ssid; + schid->sch_no = id; + } + return rc; +} + +int idset_is_empty(struct idset *set) +{ + return bitmap_empty(set->bitmap, set->num_ssid * set->num_id); +} + +void idset_add_set(struct idset *to, struct idset *from) +{ + int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); + + bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); +} diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 00000000000..06d3bc01bb0 --- /dev/null +++ b/drivers/s390/cio/idset.h @@ -0,0 +1,26 @@ +/* + * Copyright IBM Corp. 2007, 2012 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef S390_IDSET_H +#define S390_IDSET_H S390_IDSET_H + +#include <asm/schid.h> + +struct idset; + +void idset_free(struct idset *set); +void idset_clear(struct idset *set); +void idset_fill(struct idset *set); + +struct idset *idset_sch_new(void); +void idset_sch_add(struct idset *set, struct subchannel_id id); +void idset_sch_del(struct idset *set, struct subchannel_id id); +void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid); +int idset_sch_contains(struct idset *set, struct subchannel_id id); +int idset_sch_get_first(struct idset *set, struct subchannel_id *id); +int idset_is_empty(struct idset *set); +void idset_add_set(struct idset *to, struct idset *from); + +#endif /* S390_IDSET_H */ diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h new file mode 100644 index 00000000000..b108f4a5c7d --- /dev/null +++ b/drivers/s390/cio/io_sch.h @@ -0,0 +1,217 @@ +#ifndef S390_IO_SCH_H +#define S390_IO_SCH_H + +#include <linux/types.h> +#include <asm/schid.h> +#include <asm/ccwdev.h> +#include <asm/irq.h> +#include "css.h" +#include "orb.h" + +struct io_subchannel_private { + union orb orb; /* operation request block */ + struct ccw1 sense_ccw; /* static ccw for sense command */ + struct ccw_device *cdev;/* pointer to the child ccw device */ + struct { + unsigned int suspend:1; /* allow suspend */ + unsigned int prefetch:1;/* deny prefetch */ + unsigned int inter:1; /* suppress intermediate interrupts */ + } __packed options; +} __aligned(8); + +#define to_io_private(n) ((struct io_subchannel_private *) \ + dev_get_drvdata(&(n)->dev)) +#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p)) + +static inline struct ccw_device *sch_get_cdev(struct subchannel *sch) +{ + struct io_subchannel_private *priv = to_io_private(sch); + return priv ? priv->cdev : NULL; +} + +static inline void sch_set_cdev(struct subchannel *sch, + struct ccw_device *cdev) +{ + struct io_subchannel_private *priv = to_io_private(sch); + if (priv) + priv->cdev = cdev; +} + +#define MAX_CIWS 8 + +/* + * Possible status values for a CCW request's I/O. + */ +enum io_status { + IO_DONE, + IO_RUNNING, + IO_STATUS_ERROR, + IO_PATH_ERROR, + IO_REJECTED, + IO_KILLED +}; + +/** + * ccw_request - Internal CCW request. + * @cp: channel program to start + * @timeout: maximum allowable time in jiffies between start I/O and interrupt + * @maxretries: number of retries per I/O operation and path + * @lpm: mask of paths to use + * @check: optional callback that determines if results are final + * @filter: optional callback to adjust request status based on IRB data + * @callback: final callback + * @data: user-defined pointer passed to all callbacks + * @singlepath: if set, use only one path from @lpm per start I/O + * @cancel: non-zero if request was cancelled + * @done: non-zero if request was finished + * @mask: current path mask + * @retries: current number of retries + * @drc: delayed return code + */ +struct ccw_request { + struct ccw1 *cp; + unsigned long timeout; + u16 maxretries; + u8 lpm; + int (*check)(struct ccw_device *, void *); + enum io_status (*filter)(struct ccw_device *, void *, struct irb *, + enum io_status); + void (*callback)(struct ccw_device *, void *, int); + void *data; + unsigned int singlepath:1; + /* These fields are used internally. */ + unsigned int cancel:1; + unsigned int done:1; + u16 mask; + u16 retries; + int drc; +} __attribute__((packed)); + +/* + * sense-id response buffer layout + */ +struct senseid { + /* common part */ + u8 reserved; /* always 0x'FF' */ + u16 cu_type; /* control unit type */ + u8 cu_model; /* control unit model */ + u16 dev_type; /* device type */ + u8 dev_model; /* device model */ + u8 unused; /* padding byte */ + /* extended part */ + struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ +} __attribute__ ((packed, aligned(4))); + +enum cdev_todo { + CDEV_TODO_NOTHING, + CDEV_TODO_ENABLE_CMF, + CDEV_TODO_REBIND, + CDEV_TODO_REGISTER, + CDEV_TODO_UNREG, + CDEV_TODO_UNREG_EVAL, +}; + +#define FAKE_CMD_IRB 1 +#define FAKE_TM_IRB 2 + +struct ccw_device_private { + struct ccw_device *cdev; + struct subchannel *sch; + int state; /* device state */ + atomic_t onoff; + struct ccw_dev_id dev_id; /* device id */ + struct subchannel_id schid; /* subchannel number */ + struct ccw_request req; /* internal I/O request */ + int iretry; + u8 pgid_valid_mask; /* mask of valid PGIDs */ + u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ + u8 pgid_reset_mask; /* mask of PGIDs which were reset */ + u8 path_noirq_mask; /* mask of paths for which no irq was + received */ + u8 path_notoper_mask; /* mask of paths which were found + not operable */ + u8 path_gone_mask; /* mask of paths, that became unavailable */ + u8 path_new_mask; /* mask of paths, that became available */ + struct { + unsigned int fast:1; /* post with "channel end" */ + unsigned int repall:1; /* report every interrupt status */ + unsigned int pgroup:1; /* do path grouping */ + unsigned int force:1; /* allow forced online */ + unsigned int mpath:1; /* do multipathing */ + } __attribute__ ((packed)) options; + struct { + unsigned int esid:1; /* Ext. SenseID supported by HW */ + unsigned int dosense:1; /* delayed SENSE required */ + unsigned int doverify:1; /* delayed path verification */ + unsigned int donotify:1; /* call notify function */ + unsigned int recog_done:1; /* dev. recog. complete */ + unsigned int fake_irb:2; /* deliver faked irb */ + unsigned int resuming:1; /* recognition while resume */ + unsigned int pgroup:1; /* pathgroup is set up */ + unsigned int mpath:1; /* multipathing is set up */ + unsigned int pgid_unknown:1;/* unknown pgid state */ + unsigned int initialized:1; /* set if initial reference held */ + } __attribute__((packed)) flags; + unsigned long intparm; /* user interruption parameter */ + struct qdio_irq *qdio_data; + struct irb irb; /* device status */ + struct senseid senseid; /* SenseID info */ + struct pgid pgid[8]; /* path group IDs per chpid*/ + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ + struct work_struct todo_work; + enum cdev_todo todo; + wait_queue_head_t wait_q; + struct timer_list timer; + void *cmb; /* measurement information */ + struct list_head cmb_list; /* list of measured devices */ + u64 cmb_start_time; /* clock value of cmb reset */ + void *cmb_wait; /* deferred cmb enable/disable */ + enum interruption_class int_class; +}; + +static inline int rsch(struct subchannel_id schid) +{ + register struct subchannel_id reg1 asm("1") = schid; + int ccode; + + asm volatile( + " rsch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) + : "d" (reg1) + : "cc", "memory"); + return ccode; +} + +static inline int hsch(struct subchannel_id schid) +{ + register struct subchannel_id reg1 asm("1") = schid; + int ccode; + + asm volatile( + " hsch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) + : "d" (reg1) + : "cc"); + return ccode; +} + +static inline int xsch(struct subchannel_id schid) +{ + register struct subchannel_id reg1 asm("1") = schid; + int ccode; + + asm volatile( + " .insn rre,0xb2760000,%1,0\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) + : "d" (reg1) + : "cc"); + return ccode; +} + +#endif diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index c874607d9a8..4d80fc67a06 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -1,12 +1,16 @@ #ifndef S390_CIO_IOASM_H #define S390_CIO_IOASM_H +#include <asm/chpid.h> +#include <asm/schid.h> +#include "orb.h" +#include "cio.h" + /* * TPI info structure */ struct tpi_info { - __u32 reserved1 : 16; /* reserved 0x00000001 */ - __u32 irq : 16; /* aka. subchannel number */ + struct subchannel_id schid; __u32 intparm; /* interruption parameter */ __u32 adapter_IO : 1; __u32 reserved2 : 1; @@ -21,207 +25,142 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -extern __inline__ int stsch(int irq, volatile struct schib *addr) +static inline int stsch_err(struct subchannel_id schid, struct schib *addr) { - int ccode; - - __asm__ __volatile__( - " lr 1,%1\n" - " stsch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000), "a" (addr) - : "cc", "1" ); - return ccode; -} + register struct subchannel_id reg1 asm ("1") = schid; + int ccode = -EIO; -extern __inline__ int msch(int irq, volatile struct schib *addr) -{ - int ccode; - - __asm__ __volatile__( - " lr 1,%1\n" - " msch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) - : "cc", "1" ); - return ccode; -} - -extern __inline__ int msch_err(int irq, volatile struct schib *addr) -{ - int ccode; - - __asm__ __volatile__( - " lhi %0,%3\n" - " lr 1,%1\n" - " msch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" + asm volatile( + " stsch 0(%3)\n" + "0: ipm %0\n" + " srl %0,28\n" "1:\n" -#ifdef CONFIG_ARCH_S390X - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" -#else - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" -#endif - : "=&d" (ccode) - : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) - : "cc", "1" ); - return ccode; -} - -extern __inline__ int tsch(int irq, volatile struct irb *addr) -{ - int ccode; - - __asm__ __volatile__( - " lr 1,%1\n" - " tsch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) - : "cc", "1" ); + EX_TABLE(0b,1b) + : "+d" (ccode), "=m" (*addr) + : "d" (reg1), "a" (addr) + : "cc"); return ccode; } -extern __inline__ int tpi( volatile struct tpi_info *addr) +static inline int msch(struct subchannel_id schid, struct schib *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " tpi 0(%1)\n" - " ipm %0\n" - " srl %0,28" + asm volatile( + " msch 0(%2)\n" + " ipm %0\n" + " srl %0,28" : "=d" (ccode) - : "a" (addr) - : "cc", "1" ); + : "d" (reg1), "a" (addr), "m" (*addr) + : "cc"); return ccode; } -extern __inline__ int ssch(int irq, volatile struct orb *addr) +static inline int msch_err(struct subchannel_id schid, struct schib *addr) { - int ccode; + register struct subchannel_id reg1 asm ("1") = schid; + int ccode = -EIO; - __asm__ __volatile__( - " lr 1,%1\n" - " ssch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) - : "cc", "1" ); + asm volatile( + " msch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b,1b) + : "+d" (ccode) + : "d" (reg1), "a" (addr), "m" (*addr) + : "cc"); return ccode; } -extern __inline__ int rsch(int irq) +static inline int tsch(struct subchannel_id schid, struct irb *addr) { + register struct subchannel_id reg1 asm ("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " rsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L) - : "cc", "1" ); + asm volatile( + " tsch 0(%3)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode), "=m" (*addr) + : "d" (reg1), "a" (addr) + : "cc"); return ccode; } -extern __inline__ int csch(int irq) +static inline int ssch(struct subchannel_id schid, union orb *addr) { - int ccode; + register struct subchannel_id reg1 asm("1") = schid; + int ccode = -EIO; - __asm__ __volatile__( - " lr 1,%1\n" - " csch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L) - : "cc", "1" ); + asm volatile( + " ssch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (ccode) + : "d" (reg1), "a" (addr), "m" (*addr) + : "cc", "memory"); return ccode; } -extern __inline__ int hsch(int irq) +static inline int csch(struct subchannel_id schid) { + register struct subchannel_id reg1 asm("1") = schid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " hsch\n" - " ipm %0\n" - " srl %0,28" + asm volatile( + " csch\n" + " ipm %0\n" + " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) - : "cc", "1" ); + : "d" (reg1) + : "cc"); return ccode; } -extern __inline__ int xsch(int irq) +static inline int tpi(struct tpi_info *addr) { int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " .insn rre,0xb2760000,%1,0\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (irq | 0x10000L) - : "cc", "1" ); + asm volatile( + " tpi 0(%2)\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode), "=m" (*addr) + : "a" (addr) + : "cc"); return ccode; } -extern __inline__ int chsc(void *chsc_area) +static inline int chsc(void *chsc_area) { + typedef struct { char _[4096]; } addr_type; int cc; - __asm__ __volatile__ ( - ".insn rre,0xb25f0000,%1,0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (chsc_area) - : "cc" ); - + asm volatile( + " .insn rre,0xb25f0000,%2,0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc), "=m" (*(addr_type *) chsc_area) + : "d" (chsc_area), "m" (*(addr_type *) chsc_area) + : "cc"); return cc; } -extern __inline__ int iac( void) -{ - int ccode; - - __asm__ __volatile__( - " iac 1\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) : : "cc", "1" ); - return ccode; -} - -extern __inline__ int rchp(int chpid) +static inline int rchp(struct chp_id chpid) { + register struct chp_id reg1 asm ("1") = chpid; int ccode; - __asm__ __volatile__( - " lr 1,%1\n" - " rchp\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (chpid) - : "cc", "1" ); + asm volatile( + " lr 1,%1\n" + " rchp\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) : "d" (reg1) : "cc"); return ccode; } diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c new file mode 100644 index 00000000000..c592087be0f --- /dev/null +++ b/drivers/s390/cio/isc.c @@ -0,0 +1,68 @@ +/* + * Functions for registration of I/O interruption subclasses on s390. + * + * Copyright IBM Corp. 2008 + * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <asm/isc.h> + +static unsigned int isc_refs[MAX_ISC + 1]; +static DEFINE_SPINLOCK(isc_ref_lock); + + +/** + * isc_register - register an I/O interruption subclass. + * @isc: I/O interruption subclass to register + * + * The number of users for @isc is increased. If this is the first user to + * register @isc, the corresponding I/O interruption subclass mask is enabled. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_register(unsigned int isc) +{ + if (isc > MAX_ISC) { + WARN_ON(1); + return; + } + + spin_lock(&isc_ref_lock); + if (isc_refs[isc] == 0) + ctl_set_bit(6, 31 - isc); + isc_refs[isc]++; + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_register); + +/** + * isc_unregister - unregister an I/O interruption subclass. + * @isc: I/O interruption subclass to unregister + * + * The number of users for @isc is decreased. If this is the last user to + * unregister @isc, the corresponding I/O interruption subclass mask is + * disabled. + * Note: This function must not be called if isc_register() hasn't been called + * before by the driver for @isc. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_unregister(unsigned int isc) +{ + spin_lock(&isc_ref_lock); + /* check for misuse */ + if (isc > MAX_ISC || isc_refs[isc] == 0) { + WARN_ON(1); + goto out_unlock; + } + if (isc_refs[isc] == 1) + ctl_clear_bit(6, 31 - isc); + isc_refs[isc]--; +out_unlock: + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_unregister); diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c new file mode 100644 index 00000000000..358ee16d10a --- /dev/null +++ b/drivers/s390/cio/itcw.c @@ -0,0 +1,369 @@ +/* + * Functions for incremental construction of fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include <asm/itcw.h> + +/** + * struct itcw - incremental tcw helper data type + * + * This structure serves as a handle for the incremental construction of a + * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate + * tcw and associated data. The data structures are contained inside a single + * contiguous buffer provided by the user. + * + * The itcw construction functions take care of overall data integrity: + * - reset unused fields to zero + * - fill in required pointers + * - ensure required alignment for data structures + * - prevent data structures to cross 4k-byte boundary where required + * - calculate tccb-related length fields + * - optionally provide ready-made interrogate tcw and associated structures + * + * Restrictions apply to the itcws created with these construction functions: + * - tida only supported for data address, not for tccb + * - only contiguous tidaw-lists (no ttic) + * - total number of bytes required per itcw may not exceed 4k bytes + * - either read or write operation (may not work with r=0 and w=0) + * + * Example: + * struct itcw *itcw; + * void *buffer; + * size_t size; + * + * size = itcw_calc_size(1, 2, 0); + * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); + * if (!buffer) + * return -ENOMEM; + * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); + * if (IS_ERR(itcw)) + * return PTR_ER(itcw); + * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); + * itcw_add_tidaw(itcw, 0, 0x30000, 20); + * itcw_add_tidaw(itcw, 0, 0x40000, 52); + * itcw_finalize(itcw); + * + */ +struct itcw { + struct tcw *tcw; + struct tcw *intrg_tcw; + int num_tidaws; + int max_tidaws; + int intrg_num_tidaws; + int intrg_max_tidaws; +}; + +/** + * itcw_get_tcw - return pointer to tcw associated with the itcw + * @itcw: address of the itcw + * + * Return pointer to the tcw associated with the itcw. + */ +struct tcw *itcw_get_tcw(struct itcw *itcw) +{ + return itcw->tcw; +} +EXPORT_SYMBOL(itcw_get_tcw); + +/** + * itcw_calc_size - return the size of an itcw with the given parameters + * @intrg: if non-zero, add an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Calculate and return the number of bytes required to hold an itcw with the + * given parameters and assuming tccbs with maximum size. + * + * Note that the resulting size also contains bytes needed for alignment + * padding as well as padding to ensure that data structures don't cross a + * 4k-boundary where required. + */ +size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) +{ + size_t len; + int cross_count; + + /* Main data. */ + len = sizeof(struct itcw); + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ max_tidaws * sizeof(struct tidaw); + /* Interrogate data. */ + if (intrg) { + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); + } + + /* Maximum required alignment padding. */ + len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; + + /* TIDAW lists may not cross a 4k boundary. To cross a + * boundary we need to add a TTIC TIDAW. We need to reserve + * one additional TIDAW for a TTIC that we may need to add due + * to the placement of the data chunk in memory, and a further + * TIDAW for each page boundary that the TIDAW list may cross + * due to it's own size. + */ + if (max_tidaws) { + cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + len += cross_count * sizeof(struct tidaw); + } + if (intrg_max_tidaws) { + cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + len += cross_count * sizeof(struct tidaw); + } + return len; +} +EXPORT_SYMBOL(itcw_calc_size); + +#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) + +static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, + int align, int check_4k) +{ + addr_t addr; + + addr = ALIGN(*start, align); + if (check_4k && CROSS4K(addr, len)) { + addr = ALIGN(addr, 4096); + addr = ALIGN(addr, align); + } + if (addr + len > end) + return ERR_PTR(-ENOSPC); + *start = addr + len; + return (void *) addr; +} + +/** + * itcw_init - initialize incremental tcw data structure + * @buffer: address of buffer to use for data structures + * @size: number of bytes in buffer + * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write + * operation tcw + * @intrg: if non-zero, add and initialize an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Prepare the specified buffer to be used as an incremental tcw, i.e. a + * helper data structure that can be used to construct a valid tcw by + * successive calls to other helper functions. Note: the buffer needs to be + * located below the 2G address limit. The resulting tcw has the following + * restrictions: + * - no tccb tidal + * - input/output tidal is contiguous (no ttic) + * - total data should not exceed 4k + * - tcw specifies either read or write operation + * + * On success, return pointer to the resulting incremental tcw data structure, + * ERR_PTR otherwise. + */ +struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, + int max_tidaws, int intrg_max_tidaws) +{ + struct itcw *itcw; + void *chunk; + addr_t start; + addr_t end; + int cross_count; + + /* Check for 2G limit. */ + start = (addr_t) buffer; + end = start + size; + if (end > (1 << 31)) + return ERR_PTR(-EINVAL); + memset(buffer, 0, size); + /* ITCW. */ + chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); + if (IS_ERR(chunk)) + return chunk; + itcw = chunk; + /* allow for TTIC tidaws that may be needed to cross a page boundary */ + cross_count = 0; + if (max_tidaws) + cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + itcw->max_tidaws = max_tidaws + cross_count; + cross_count = 0; + if (intrg_max_tidaws) + cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count; + /* Main TCW. */ + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->tcw = chunk; + tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, + (op == ITCW_OP_WRITE) ? 1 : 0); + /* Interrogate TCW. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->intrg_tcw = chunk; + tcw_init(itcw->intrg_tcw, 1, 0); + tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); + } + /* Data TIDAL. */ + if (max_tidaws > 0) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + itcw->max_tidaws, 16, 0); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->tcw, chunk, 1); + } + /* Interrogate data TIDAL. */ + if (intrg && (intrg_max_tidaws > 0)) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + itcw->intrg_max_tidaws, 16, 0); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->intrg_tcw, chunk, 1); + } + /* TSB. */ + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->tcw, chunk); + /* Interrogate TSB. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->intrg_tcw, chunk); + } + /* TCCB. */ + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); + tcw_set_tccb(itcw->tcw, chunk); + /* Interrogate TCCB. */ + if (intrg) { + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); + tcw_set_tccb(itcw->intrg_tcw, chunk); + tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, + sizeof(struct dcw_intrg_data), 0); + tcw_finalize(itcw->intrg_tcw, 0); + } + return itcw; +} +EXPORT_SYMBOL(itcw_init); + +/** + * itcw_add_dcw - add a dcw to the itcw + * @itcw: address of the itcw + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: address of control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified itcw by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space. + * + * Note: the tcal field of the tccb header will be updated to reflect added + * content. + */ +struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, + u8 cd_count, u32 count) +{ + return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, + flags, cd, cd_count, count); +} +EXPORT_SYMBOL(itcw_add_dcw); + +/** + * itcw_add_tidaw - add a tidaw to the itcw + * @itcw: address of the itcw + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified itcw + * (depending on the value of the r-flag and w-flag). Return a pointer to + * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the + * available space. + * + * Note: TTIC tidaws are automatically added when needed, so explicitly calling + * this interface with the TTIC flag is not supported. The last-tidaw flag + * for the last tidaw in the list will be set by itcw_finalize. + */ +struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) +{ + struct tidaw *following; + + if (itcw->num_tidaws >= itcw->max_tidaws) + return ERR_PTR(-ENOSPC); + /* + * Is the tidaw, which follows the one we are about to fill, on the next + * page? Then we have to insert a TTIC tidaw first, that points to the + * tidaw on the new page. + */ + following = ((struct tidaw *) tcw_get_data(itcw->tcw)) + + itcw->num_tidaws + 1; + if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) { + tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, + TIDAW_FLAGS_TTIC, following, 0); + if (itcw->num_tidaws >= itcw->max_tidaws) + return ERR_PTR(-ENOSPC); + } + return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); +} +EXPORT_SYMBOL(itcw_add_tidaw); + +/** + * itcw_set_data - set data address and tida flag of the itcw + * @itcw: address of the itcw + * @addr: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of the itcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) +{ + tcw_set_data(itcw->tcw, addr, use_tidal); +} +EXPORT_SYMBOL(itcw_set_data); + +/** + * itcw_finalize - calculate length and count fields of the itcw + * @itcw: address of the itcw + * + * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. + * In case input- or output-tida is used, the tidaw-list must be stored in + * continuous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void itcw_finalize(struct itcw *itcw) +{ + tcw_finalize(itcw->tcw, itcw->num_tidaws); +} +EXPORT_SYMBOL(itcw_finalize); diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h new file mode 100644 index 00000000000..7a640530e7f --- /dev/null +++ b/drivers/s390/cio/orb.h @@ -0,0 +1,91 @@ +/* + * Orb related data structures. + * + * Copyright IBM Corp. 2007, 2011 + * + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + * Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#ifndef S390_ORB_H +#define S390_ORB_H + +/* + * Command-mode operation request block + */ +struct cmd_orb { + u32 intparm; /* interruption parameter */ + u32 key:4; /* flags, like key, suspend control, etc. */ + u32 spnd:1; /* suspend control */ + u32 res1:1; /* reserved */ + u32 mod:1; /* modification control */ + u32 sync:1; /* synchronize control */ + u32 fmt:1; /* format control */ + u32 pfch:1; /* prefetch control */ + u32 isic:1; /* initial-status-interruption control */ + u32 alcc:1; /* address-limit-checking control */ + u32 ssic:1; /* suppress-suspended-interr. control */ + u32 res2:1; /* reserved */ + u32 c64:1; /* IDAW/QDIO 64 bit control */ + u32 i2k:1; /* IDAW 2/4kB block size control */ + u32 lpm:8; /* logical path mask */ + u32 ils:1; /* incorrect length */ + u32 zero:6; /* reserved zeros */ + u32 orbx:1; /* ORB extension control */ + u32 cpa; /* channel program address */ +} __packed __aligned(4); + +/* + * Transport-mode operation request block + */ +struct tm_orb { + u32 intparm; + u32 key:4; + u32:9; + u32 b:1; + u32:2; + u32 lpm:8; + u32:7; + u32 x:1; + u32 tcw; + u32 prio:8; + u32:8; + u32 rsvpgm:8; + u32:8; + u32:32; + u32:32; + u32:32; + u32:32; +} __packed __aligned(4); + +/* + * eadm operation request block + */ +struct eadm_orb { + u32 intparm; + u32 key:4; + u32:4; + u32 compat1:1; + u32 compat2:1; + u32:21; + u32 x:1; + u32 aob; + u32 css_prio:8; + u32:8; + u32 scm_prio:8; + u32:8; + u32:29; + u32 fmt:3; + u32:32; + u32:32; + u32:32; +} __packed __aligned(4); + +union orb { + struct cmd_orb cmd; + struct tm_orb tm; + struct eadm_orb eadm; +} __packed __aligned(4); + +#endif /* S390_ORB_H */ diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c deleted file mode 100644 index d36258d6665..00000000000 --- a/drivers/s390/cio/qdio.c +++ /dev/null @@ -1,3468 +0,0 @@ -/* - * - * linux/drivers/s390/cio/qdio.c - * - * Linux for S/390 QDIO base support, Hipersocket base support - * version 2 - * - * Copyright 2000,2002 IBM Corporation - * Author(s): Utz Bacher <utz.bacher@de.ibm.com> - * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com> - * - * Restriction: only 63 iqdio subchannels would have its own indicator, - * after that, subsequent subchannels share one indicator - * - * - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/config.h> -#include <linux/module.h> -#include <linux/init.h> - -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/timer.h> - -#include <asm/ccwdev.h> -#include <asm/io.h> -#include <asm/atomic.h> -#include <asm/semaphore.h> -#include <asm/timex.h> - -#include <asm/debug.h> -#include <asm/qdio.h> - -#include "cio.h" -#include "css.h" -#include "device.h" -#include "airq.h" -#include "qdio.h" -#include "ioasm.h" -#include "chsc.h" - -#define VERSION_QDIO_C "$Revision: 1.101 $" - -/****************** MODULE PARAMETER VARIABLES ********************/ -MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); -MODULE_DESCRIPTION("QDIO base support version 2, " \ - "Copyright 2000 IBM Corporation"); -MODULE_LICENSE("GPL"); - -/******************** HERE WE GO ***********************************/ - -static const char version[] = "QDIO base support version 2 (" - VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")"; - -#ifdef QDIO_PERFORMANCE_STATS -static int proc_perf_file_registration; -static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; -static struct qdio_perf_stats perf_stats; -#endif /* QDIO_PERFORMANCE_STATS */ - -static int hydra_thinints; -static int omit_svs; - -static int indicator_used[INDICATORS_PER_CACHELINE]; -static __u32 * volatile indicators; -static __u32 volatile spare_indicator; -static atomic_t spare_indicator_usecount; - -static debug_info_t *qdio_dbf_setup; -static debug_info_t *qdio_dbf_sbal; -static debug_info_t *qdio_dbf_trace; -static debug_info_t *qdio_dbf_sense; -#ifdef CONFIG_QDIO_DEBUG -static debug_info_t *qdio_dbf_slsb_out; -static debug_info_t *qdio_dbf_slsb_in; -#endif /* CONFIG_QDIO_DEBUG */ - -/* iQDIO stuff: */ -static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change - during a while loop */ -static DEFINE_SPINLOCK(ttiq_list_lock); -static int register_thinint_result; -static void tiqdio_tl(unsigned long); -static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); - -/* not a macro, as one of the arguments is atomic_read */ -static inline int -qdio_min(int a,int b) -{ - if (a<b) - return a; - else - return b; -} - -/***************** SCRUBBER HELPER ROUTINES **********************/ - -static inline volatile __u64 -qdio_get_micros(void) -{ - return (get_clock() >> 10); /* time>>12 is microseconds */ -} - -/* - * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve - * the q in any case, so that we'll not be interrupted when we are in - * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost - * ever works (last famous words) - */ -static inline int -qdio_reserve_q(struct qdio_q *q) -{ - return atomic_add_return(1,&q->use_count) - 1; -} - -static inline void -qdio_release_q(struct qdio_q *q) -{ - atomic_dec(&q->use_count); -} - -static volatile inline void -qdio_set_slsb(volatile char *slsb, unsigned char value) -{ - xchg((char*)slsb,value); -} - -static inline int -qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, - unsigned int gpr3) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigasync"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_syncs++; -#endif /* QDIO_PERFORMANCE_STATS */ - - cc = do_siga_sync(q->irq, gpr2, gpr3); - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static inline int -qdio_siga_sync_q(struct qdio_q *q) -{ - if (q->is_input_q) - return qdio_siga_sync(q, 0, q->mask); - return qdio_siga_sync(q, q->mask, 0); -} - -/* - * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns - * an access exception - */ -static inline int -qdio_siga_output(struct qdio_q *q) -{ - int cc; - __u32 busy_bit; - __u64 start_time=0; - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_outs++; -#endif /* QDIO_PERFORMANCE_STATS */ - - QDIO_DBF_TEXT4(0,trace,"sigaout"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - for (;;) { - cc = do_siga_output(q->irq, q->mask, &busy_bit); -//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); - if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { - if (!start_time) - start_time=NOW; - if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) - break; - } else - break; - } - - if ((cc==2) && (busy_bit)) - cc |= QDIO_SIGA_ERROR_B_BIT_SET; - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static inline int -qdio_siga_input(struct qdio_q *q) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigain"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_ins++; -#endif /* QDIO_PERFORMANCE_STATS */ - - cc = do_siga_input(q->irq, q->mask); - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 * volatile -qdio_get_indicator(void) -{ - int i; - - for (i=1;i<INDICATORS_PER_CACHELINE;i++) - if (!indicator_used[i]) { - indicator_used[i]=1; - return indicators+i; - } - atomic_inc(&spare_indicator_usecount); - return (__u32 * volatile) &spare_indicator; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static void -qdio_put_indicator(__u32 *addr) -{ - int i; - - if ( (addr) && (addr!=&spare_indicator) ) { - i=addr-indicators; - indicator_used[i]=0; - } - if (addr == &spare_indicator) - atomic_dec(&spare_indicator_usecount); -} - -static inline volatile void -tiqdio_clear_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"clrsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,0); -} - -static inline volatile void -tiqdio_set_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"setsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,-1); -} - -static inline void -tiqdio_sched_tl(void) -{ - tasklet_hi_schedule(&tiqdio_tasklet); -} - -static inline void -qdio_mark_tiq(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"mark iq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - spin_lock_irqsave(&ttiq_list_lock,flags); - if (unlikely(atomic_read(&q->is_in_shutdown))) - goto out_unlock; - - if (!q->is_input_q) - goto out_unlock; - - if ((q->list_prev) || (q->list_next)) - goto out_unlock; - - if (!tiq_list) { - tiq_list=q; - q->list_prev=q; - q->list_next=q; - } else { - q->list_next=tiq_list; - q->list_prev=tiq_list->list_prev; - tiq_list->list_prev->list_next=q; - tiq_list->list_prev=q; - } - spin_unlock_irqrestore(&ttiq_list_lock,flags); - - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return; -out_unlock: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - return; -} - -static inline void -qdio_mark_q(struct qdio_q *q) -{ - QDIO_DBF_TEXT4(0,trace,"mark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(atomic_read(&q->is_in_shutdown))) - return; - - tasklet_schedule(&q->tasklet); -} - -static inline int -qdio_stop_polling(struct qdio_q *q) -{ -#ifdef QDIO_USE_PROCESSING_STATE - int gsf; - - if (!atomic_swap(&q->polling,0)) - return 1; - - QDIO_DBF_TEXT4(0,trace,"stoppoll"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* show the card that we are not polling anymore */ - if (!q->is_input_q) - return 1; - - gsf=GET_SAVED_FRONTIER(q); - set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1)], - SLSB_P_INPUT_NOT_INIT); - /* - * we don't issue this SYNC_MEMORY, as we trust Rick T and - * moreover will not use the PROCESSING state under VM, so - * q->polling was 0 anyway - */ - /*SYNC_MEMORY;*/ - if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) - return 1; - /* - * set our summary bit again, as otherwise there is a - * small window we can miss between resetting it and - * checking for PRIMED state - */ - if (q->is_thinint_q) - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - return 0; - -#else /* QDIO_USE_PROCESSING_STATE */ - return 1; -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -/* - * see the comment in do_QDIO and before qdio_reserve_q about the - * sophisticated locking outside of unmark_q, so that we don't need to - * disable the interrupts :-) -*/ -static inline void -qdio_unmark_q(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"unmark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if ((!q->list_prev)||(!q->list_next)) - return; - - if ((q->is_thinint_q)&&(q->is_input_q)) { - /* iQDIO */ - spin_lock_irqsave(&ttiq_list_lock,flags); - /* in case cleanup has done this already and simultanously - * qdio_unmark_q is called from the interrupt handler, we've - * got to check this in this specific case again */ - if ((!q->list_prev)||(!q->list_next)) - goto out; - if (q->list_next==q) { - /* q was the only interesting q */ - tiq_list=NULL; - q->list_next=NULL; - q->list_prev=NULL; - } else { - q->list_next->list_prev=q->list_prev; - q->list_prev->list_next=q->list_next; - tiq_list=q->list_next; - q->list_next=NULL; - q->list_prev=NULL; - } -out: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - } -} - -static inline unsigned long -tiqdio_clear_global_summary(void) -{ - unsigned long time; - - QDIO_DBF_TEXT5(0,trace,"clrglobl"); - - time = do_clear_global_summary(); - - QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); - - return time; -} - - -/************************* OUTBOUND ROUTINES *******************************/ - -static inline int -qdio_get_outbound_buffer_frontier(struct qdio_q *q) -{ - int f,f_mod_no; - volatile char *slsb; - int first_not_to_check; - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"getobfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * f points to already processed elements, so f+no_used is correct... - * ... but: we don't check 128 buffers, as otherwise - * qdio_has_outbound_q_moved would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis)) - SYNC_MEMORY; - -check_next: - if (f==first_not_to_check) - goto out; - - switch(slsb[f_mod_no]) { - - /* the adapter has not fetched the output yet */ - case SLSB_CU_OUTPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - - /* the adapter got it */ - case SLSB_P_OUTPUT_EMPTY: - atomic_dec(&q->number_of_buffers_used); - f++; - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - QDIO_DBF_TEXT5(0,trace,"outpempt"); - goto check_next; - - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - sprintf(dbf_text,"%x-%x-%x",f_mod_no, - q->sbal[f_mod_no]->element[14].sbalf.value, - q->sbal[f_mod_no]->element[15].sbalf.value); - QDIO_DBF_TEXT3(1,trace,dbf_text); - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); - - /* - * we increment the frontier, as this buffer - * was processed obviously - */ - atomic_dec(&q->number_of_buffers_used); - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_OUTPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - break; - - /* no new buffers */ - default: - QDIO_DBF_TEXT5(0,trace,"outpni"); - } -out: - return (q->first_to_check=f_mod_no); -} - -/* all buffers are processed */ -static inline int -qdio_is_outbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"oqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"oqisdone"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - return (no_used==0); -} - -static inline int -qdio_has_outbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_outbound_buffer_frontier(q); - - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -static inline void -qdio_kick_outbound_q(struct qdio_q *q) -{ - int result; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"kickoutq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!q->siga_out) - return; - - /* here's the story with cc=2 and busy bit set (thanks, Rick): - * VM's CP could present us cc=2 and busy bit set on SIGA-write - * during reconfiguration of their Guest LAN (only in HIPERS mode, - * QDIO mode is asynchronous -- cc=2 and busy bit there will take - * the queues down immediately; and not being under VM we have a - * problem on cc=2 and busy bit set right away). - * - * Therefore qdio_siga_output will try for a short time constantly, - * if such a condition occurs. If it doesn't change, it will - * increase the busy_siga_counter and save the timestamp, and - * schedule the queue for later processing (via mark_q, using the - * queue tasklet). __qdio_outbound_processing will check out the - * counter. If non-zero, it will call qdio_kick_outbound_q as often - * as the value of the counter. This will attempt further SIGA - * instructions. For each successful SIGA, the counter is - * decreased, for failing SIGAs the counter remains the same, after - * all. - * After some time of no movement, qdio_kick_outbound_q will - * finally fail and reflect corresponding error codes to call - * the upper layer module and have it take the queues down. - * - * Note that this is a change from the original HiperSockets design - * (saying cc=2 and busy bit means take the queues down), but in - * these days Guest LAN didn't exist... excessive cc=2 with busy bit - * conditions will still take the queues down, but the threshold is - * higher due to the Guest LAN environment. - */ - - - result=qdio_siga_output(q); - - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; - break; - case (2|QDIO_SIGA_ERROR_B_BIT_SET): - /* cc=2 and busy bit: */ - atomic_inc(&q->busy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { - qdio_mark_q(q); - break; - } - QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } -} - -static inline void -qdio_kick_outbound_handler(struct qdio_q *q) -{ - int start, end, real_end, count; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - start = q->first_element_to_kick; - /* last_move_ftc was just updated */ - real_end = GET_SAVED_FRONTIER(q); - end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1); - count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& - (QDIO_MAX_BUFFERS_PER_Q-1); - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"kickouth"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (q->state==QDIO_IRQ_STATE_ACTIVE) - q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| - q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; -} - -static inline void -__qdio_outbound_processing(struct qdio_q *q) -{ - int siga_attempts; - - QDIO_DBF_TEXT4(0,trace,"qoutproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - o_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } -#ifdef QDIO_PERFORMANCE_STATS - o_p_nc++; - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ - - /* see comment in qdio_kick_outbound_q */ - siga_attempts=atomic_read(&q->busy_siga_counter); - while (siga_attempts) { - atomic_dec(&q->busy_siga_counter); - qdio_kick_outbound_q(q); - siga_attempts--; - } - - if (qdio_has_outbound_q_moved(q)) - qdio_kick_outbound_handler(q); - - if (q->is_iqdio_q) { - /* - * for asynchronous queues, we better check, if the fill - * level is too high. for synchronous queues, the fill - * level will never be that high. - */ - if (atomic_read(&q->number_of_buffers_used)> - IQDIO_FILL_LEVEL_TO_POLL) - qdio_mark_q(q); - - } else if (!q->hydra_gives_outbound_pcis) - if (!qdio_is_outbound_q_done(q)) - qdio_mark_q(q); - - qdio_release_q(q); -} - -static void -qdio_outbound_processing(struct qdio_q *q) -{ - __qdio_outbound_processing(q); -} - -/************************* INBOUND ROUTINES *******************************/ - - -static inline int -qdio_get_inbound_buffer_frontier(struct qdio_q *q) -{ - int f,f_mod_no; - volatile char *slsb; - int first_not_to_check; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif /* CONFIG_QDIO_DEBUG */ -#ifdef QDIO_USE_PROCESSING_STATE - int last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"getibfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved - * would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - /* - * we don't use this one, as a PCI or we after a thin interrupt - * will sync the queues - */ - /* SYNC_MEMORY;*/ - -check_next: - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - if (f==first_not_to_check) - goto out; - switch (slsb[f_mod_no]) { - - /* CU_EMPTY means frontier is reached */ - case SLSB_CU_INPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"inptempt"); - break; - - /* P_PRIMED means set slsb to P_PROCESSING and move on */ - case SLSB_P_INPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"inptprim"); - -#ifdef QDIO_USE_PROCESSING_STATE - /* - * as soon as running under VM, polling the input queues will - * kill VM in terms of CP overhead - */ - if (q->siga_sync) { - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); - } else { - /* set the previous buffer to NOT_INIT. The current - * buffer will be set to PROCESSING at the end of - * this function to avoid further interrupts. */ - if (last_position>=0) - set_slsb(&slsb[last_position], - SLSB_P_INPUT_NOT_INIT); - atomic_set(&q->polling,1); - last_position=f_mod_no; - } -#else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); -#endif /* QDIO_USE_PROCESSING_STATE */ - /* - * not needed, as the inbound queue will be synced on the next - * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s - */ - /*SYNC_MEMORY;*/ - f++; - atomic_dec(&q->number_of_buffers_used); - goto check_next; - - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - - /* P_ERROR means frontier is reached, break and report error */ - case SLSB_P_INPUT_ERROR: -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"inperr%2x",f_mod_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_INPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - /* we increment the frontier, as this buffer - * was processed obviously */ - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - atomic_dec(&q->number_of_buffers_used); - -#ifdef QDIO_USE_PROCESSING_STATE - last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - break; - - /* everything else means frontier not changed (HALTED or so) */ - default: - break; - } -out: - q->first_to_check=f_mod_no; - -#ifdef QDIO_USE_PROCESSING_STATE - if (last_position>=0) - set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - - return q->first_to_check; -} - -static inline int -qdio_has_inbound_q_moved(struct qdio_q *q) -{ - int i; - -#ifdef QDIO_PERFORMANCE_STATS - static int old_pcis=0; - static int old_thinints=0; - - if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints)) - perf_stats.start_time_inbound=NOW; - else - old_pcis=perf_stats.pcis; -#endif /* QDIO_PERFORMANCE_STATS */ - - i=qdio_get_inbound_buffer_frontier(q); - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) - SAVE_TIMESTAMP(q); - - QDIO_DBF_TEXT4(0,trace,"inhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"inhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -/* means, no more buffers to be filled */ -static inline int -tiqdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* propagate the change from 82 to 80 through VM */ - SYNC_MEMORY; - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"iqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"iniqisdo"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!no_used) - return 1; - - if (!q->siga_sync) - /* we'll check for more primed buffers in qeth_stop_polling */ - return 0; - - if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) - /* - * nothing more to do, if next buffer is not PRIMED. - * note that we did a SYNC_MEMORY before, that there - * has been a sychnronization. - * we will return 0 below, as there is nothing to do - * (stop_polling not necessary, as we have not been - * using the PROCESSING state - */ - return 0; - - /* - * ok, the next input buffer is primed. that means, that device state - * change indicator and adapter local summary are set, so we will find - * it next time. - * we will return 0 below, as there is nothing to do, except scheduling - * ourselves for the next time. - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; -} - -static inline int -qdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* - * we need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance - */ - SYNC_MEMORY; - - if (!no_used) { - QDIO_DBF_TEXT4(0,trace,"inqisdnA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - QDIO_DBF_TEXT4(0,trace,dbf_text); - return 1; - } - - if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { - /* we got something to do */ - QDIO_DBF_TEXT4(0,trace,"inqisntA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } - - /* on VM, we don't poll, so the q is always done here */ - if (q->siga_sync) - return 1; - if (q->hydra_gives_outbound_pcis) - return 1; - - /* - * at this point we know, that inbound first_to_check - * has (probably) not moved (see qdio_inbound_processing) - */ - if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisdon"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 1; - } else { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisntd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; - } -} - -static inline void -qdio_kick_inbound_handler(struct qdio_q *q) -{ - int count, start, end, real_end, i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - QDIO_DBF_TEXT4(0,trace,"kickinh"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - start=q->first_element_to_kick; - real_end=q->first_to_check; - end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - i=start; - count=0; - while (1) { - count++; - if (i==end) - break; - i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) - q->handler(q->cdev, - QDIO_STATUS_INBOUND_INT|q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; - perf_stats.inbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ -} - -static inline void -__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *oq; - int i; - - QDIO_DBF_TEXT4(0,trace,"iqinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* - * we first want to reserve the q, so that we know, that we don't - * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might - * be set - */ - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - ii_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - tiqdio_sched_tl(); - return; - } -#ifdef QDIO_PERFORMANCE_STATS - ii_p_nc++; -#endif /* QDIO_PERFORMANCE_STATS */ - if (unlikely(atomic_read(&q->is_in_shutdown))) { - qdio_unmark_q(q); - goto out; - } - - /* - * we reset spare_ind_was_set, when the queue does not use the - * spare indicator - */ - if (spare_ind_was_set) - spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); - - if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) - goto out; - /* - * q->dev_st_chg_ind is the indicator, be it shared or not. - * only clear it, if indicator is non-shared - */ - if (!spare_ind_was_set) - tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); - - if (q->hydra_gives_outbound_pcis) { - if (!q->siga_sync_done_on_thinints) { - SYNC_MEMORY_ALL; - } else if ((!q->siga_sync_done_on_outb_tis)&& - (q->hydra_gives_outbound_pcis)) { - SYNC_MEMORY_ALL_OUTB; - } - } else { - SYNC_MEMORY; - } - /* - * maybe we have to do work on our outbound queues... at least - * we have to check the outbound-int-capable thinint-capable - * queues - */ - if (q->hydra_gives_outbound_pcis) { - irq_ptr = (struct qdio_irq*)q->irq_ptr; - for (i=0;i<irq_ptr->no_output_qs;i++) { - oq = irq_ptr->output_qs[i]; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ - if (!qdio_is_outbound_q_done(oq)) - __qdio_outbound_processing(oq); - } - } - - if (!qdio_has_inbound_q_moved(q)) - goto out; - - qdio_kick_inbound_handler(q); - if (tiqdio_is_inbound_q_done(q)) - if (!qdio_stop_polling(q)) { - /* - * we set the flags to get into the stuff next time, - * see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - } -out: - qdio_release_q(q); -} - -static void -tiqdio_inbound_processing(struct qdio_q *q) -{ - __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); -} - -static inline void -__qdio_inbound_processing(struct qdio_q *q) -{ - int q_laps=0; - - QDIO_DBF_TEXT4(0,trace,"qinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - i_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } -#ifdef QDIO_PERFORMANCE_STATS - i_p_nc++; - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ - -again: - if (qdio_has_inbound_q_moved(q)) { - qdio_kick_inbound_handler(q); - if (!qdio_stop_polling(q)) { - q_laps++; - if (q_laps<QDIO_Q_LAPS) - goto again; - } - qdio_mark_q(q); - } else { - if (!qdio_is_inbound_q_done(q)) - /* means poll time is not yet over */ - qdio_mark_q(q); - } - - qdio_release_q(q); -} - -static void -qdio_inbound_processing(struct qdio_q *q) -{ - __qdio_inbound_processing(q); -} - -/************************* MAIN ROUTINES *******************************/ - -#ifdef QDIO_USE_PROCESSING_STATE -static inline int -tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) -{ - if (!q) { - tiqdio_sched_tl(); - return 0; - } - - /* - * under VM, we have not used the PROCESSING state, so no - * need to stop polling - */ - if (q->siga_sync) - return 2; - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - ii_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - - /* - * sanity -- we'd get here without setting the - * dev st chg ind - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; - } - if (qdio_stop_polling(q)) { - qdio_release_q(q); - return 2; - } - if (q_laps<QDIO_Q_LAPS-1) { - qdio_release_q(q); - return 3; - } - /* - * we set the flags to get into the stuff - * next time, see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - qdio_release_q(q); - return 1; - -} -#endif /* QDIO_USE_PROCESSING_STATE */ - -static inline void -tiqdio_inbound_checks(void) -{ - struct qdio_q *q; - int spare_ind_was_set=0; -#ifdef QDIO_USE_PROCESSING_STATE - int q_laps=0; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"iqdinbck"); - QDIO_DBF_TEXT5(0,trace,"iqlocsum"); - -#ifdef QDIO_USE_PROCESSING_STATE -again: -#endif /* QDIO_USE_PROCESSING_STATE */ - - /* when the spare indicator is used and set, save that and clear it */ - if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) { - spare_ind_was_set = 1; - tiqdio_clear_summary_bit((__u32*)&spare_indicator); - } - - q=(struct qdio_q*)tiq_list; - do { - if (!q) - break; - __tiqdio_inbound_processing(q, spare_ind_was_set); - q=(struct qdio_q*)q->list_next; - } while (q!=(struct qdio_q*)tiq_list); - -#ifdef QDIO_USE_PROCESSING_STATE - q=(struct qdio_q*)tiq_list; - do { - int ret; - - ret = tiqdio_reset_processing_state(q, q_laps); - switch (ret) { - case 0: - return; - case 1: - q_laps++; - case 2: - q = (struct qdio_q*)q->list_next; - break; - default: - q_laps++; - goto again; - } - } while (q!=(struct qdio_q*)tiq_list); -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -static void -tiqdio_tl(unsigned long data) -{ - QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ - - tiqdio_inbound_checks(); -} - -/********************* GENERAL HELPER_ROUTINES ***********************/ - -static void -qdio_release_irq_memory(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) { - if (!irq_ptr->input_qs[i]) - goto next; - - if (irq_ptr->input_qs[i]->slib) - kfree(irq_ptr->input_qs[i]->slib); - kfree(irq_ptr->input_qs[i]); - -next: - if (!irq_ptr->output_qs[i]) - continue; - - if (irq_ptr->output_qs[i]->slib) - kfree(irq_ptr->output_qs[i]->slib); - kfree(irq_ptr->output_qs[i]); - - } - kfree(irq_ptr->qdr); - kfree(irq_ptr); -} - -static void -qdio_set_impl_params(struct qdio_irq *irq_ptr, - unsigned int qib_param_field_format, - /* pointer to 128 bytes or NULL, if no param field */ - unsigned char *qib_param_field, - /* pointer to no_queues*128 words of data or NULL */ - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned long *input_slib_elements, - unsigned long *output_slib_elements) -{ - int i,j; - - if (!irq_ptr) - return; - - irq_ptr->qib.pfmt=qib_param_field_format; - if (qib_param_field) - memcpy(irq_ptr->qib.parm,qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); - - if (input_slib_elements) - for (i=0;i<no_input_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->input_qs[i]->slib->slibe[j].parms= - input_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } - if (output_slib_elements) - for (i=0;i<no_output_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->output_qs[i]->slib->slibe[j].parms= - output_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } -} - -static int -qdio_alloc_qs(struct qdio_irq *irq_ptr, - int no_input_qs, int no_output_qs) -{ - int i; - struct qdio_q *q; - int result=-ENOMEM; - - for (i=0;i<no_input_qs;i++) { - q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); - - if (!q) { - QDIO_PRINT_ERR("kmalloc of q failed!\n"); - goto out; - } - - memset(q,0,sizeof(struct qdio_q)); - - q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); - if (!q->slib) { - QDIO_PRINT_ERR("kmalloc of slib failed!\n"); - goto out; - } - - irq_ptr->input_qs[i]=q; - } - - for (i=0;i<no_output_qs;i++) { - q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); - - if (!q) { - goto out; - } - - memset(q,0,sizeof(struct qdio_q)); - - q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); - if (!q->slib) { - QDIO_PRINT_ERR("kmalloc of slib failed!\n"); - goto out; - } - - irq_ptr->output_qs[i]=q; - } - - result=0; -out: - return result; -} - -static void -qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, - int no_input_qs, int no_output_qs, - qdio_handler_t *input_handler, - qdio_handler_t *output_handler, - unsigned long int_parm,int q_format, - unsigned long flags, - void **inbound_sbals_array, - void **outbound_sbals_array) -{ - struct qdio_q *q; - int i,j; - char dbf_text[20]; /* see qdio_initialize */ - void *ptr; - int available; - - sprintf(dbf_text,"qfqs%4x",cdev->private->irq); - QDIO_DBF_TEXT0(0,setup,dbf_text); - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - - memset(q,0,((char*)&q->slib)-((char*)q)); - sprintf(dbf_text,"in-q%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(inbound_sbals_array++); - - q->queue_type=q_format; - q->int_parm=int_parm; - q->irq=irq_ptr->irq; - q->irq_ptr = irq_ptr; - q->cdev = cdev; - q->mask=1<<(31-i); - q->q_no=i; - q->is_input_q=1; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=input_handler; - q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; - - q->tasklet.data=(unsigned long)q; - /* q->is_thinint_q isn't valid at this time, but - * irq_ptr->is_thinint_irq is */ - q->tasklet.func=(void(*)(unsigned long)) - ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing: - &qdio_inbound_processing); - - /* actually this is not used for inbound queues. yet. */ - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - -/* for (j=0;j<QDIO_STATS_NUMBER;j++) - q->timing.last_transfer_times[j]=(qdio_get_micros()/ - QDIO_STATS_NUMBER)*j; - q->timing.last_transfer_index=QDIO_STATS_NUMBER-1; -*/ - - /* fill in slib */ - if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { - set_slsb(&q->slsb.acc.val[j], - SLSB_P_INPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } - } - - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - memset(q,0,((char*)&q->slib)-((char*)q)); - - sprintf(dbf_text,"outq%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(outbound_sbals_array++); - - q->queue_type=q_format; - q->int_parm=int_parm; - q->is_input_q=0; - q->irq=irq_ptr->irq; - q->cdev = cdev; - q->irq_ptr = irq_ptr; - q->mask=1<<(31-i); - q->q_no=i; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=output_handler; - - q->tasklet.data=(unsigned long)q; - q->tasklet.func=(void(*)(unsigned long)) - &qdio_outbound_processing; - - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - - /* fill in slib */ - if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { - set_slsb(&q->slsb.acc.val[j], - SLSB_P_OUTPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } - } -} - -static void -qdio_fill_thresholds(struct qdio_irq *irq_ptr, - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned int min_input_threshold, - unsigned int max_input_threshold, - unsigned int min_output_threshold, - unsigned int max_output_threshold) -{ - int i; - struct qdio_q *q; - - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - q->timing.threshold=max_input_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_input_threshold+ - (max_input_threshold-min_input_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - q->timing.threshold=max_output_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_output_threshold+ - (max_output_threshold-min_output_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } -} - -static int -tiqdio_thinint_handler(void) -{ - QDIO_DBF_TEXT4(0,trace,"thin_int"); - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.thinints++; - perf_stats.start_time_inbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ - - /* SVS only when needed: - * issue SVS to benefit from iqdio interrupt avoidance - * (SVS clears AISOI)*/ - if (!omit_svs) - tiqdio_clear_global_summary(); - - tiqdio_inbound_checks(); - return 0; -} - -static void -qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) -{ - int i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); - QDIO_DBF_TEXT5(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - irq_ptr->state=state; - for (i=0;i<irq_ptr->no_input_qs;i++) - irq_ptr->input_qs[i]->state=state; - for (i=0;i<irq_ptr->no_output_qs;i++) - irq_ptr->output_qs[i]->state=state; - mb(); -} - -static inline void -qdio_irq_check_sense(int irq, struct irb *irb) -{ - char dbf_text[15]; - - if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",irq); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); - - QDIO_PRINT_WARN("sense data available on qdio channel.\n"); - HEXDUMP16(WARN,"irb: ",irb); - HEXDUMP16(WARN,"sense data: ",irb->ecw); - } - -} - -static inline void -qdio_handle_pci(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.pcis++; - perf_stats.start_time_inbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ - for (i=0;i<irq_ptr->no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) - qdio_mark_q(q); - else { -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ - __qdio_inbound_processing(q); - } - } - if (!irq_ptr->hydra_gives_outbound_pcis) - return; - for (i=0;i<irq_ptr->no_output_qs;i++) { - q=irq_ptr->output_qs[i]; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ - if (qdio_is_outbound_q_done(q)) - continue; - if (!irq_ptr->sync_done_on_outb_pcis) - SYNC_MEMORY; - __qdio_outbound_processing(q); - } -} - -static void qdio_establish_handle_irq(struct ccw_device*, int, int); - -static inline void -qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, - int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *q; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - QDIO_DBF_TEXT2(1, trace, "ick2"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on activate " \ - "queues on device %s (cs=x%x, ds=x%x).\n", - cdev->dev.bus_id, cstat, dstat); - if (irq_ptr->no_input_qs) { - q=irq_ptr->input_qs[0]; - } else if (irq_ptr->no_output_qs) { - q=irq_ptr->output_qs[0]; - } else { - QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n", - cdev->dev.bus_id); - goto omit_handler_call; - } - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0,0,0,-1,-1,q->int_parm); -omit_handler_call: - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); - -} - -static void -qdio_call_shutdown(void *data) -{ - struct ccw_device *cdev; - - cdev = (struct ccw_device *)data; - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - put_device(&cdev->dev); -} - -static void -qdio_timeout_handler(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - QDIO_DBF_TEXT2(0, trace, "qtoh"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", - irq_ptr->irq); - QDIO_DBF_TEXT2(1,setup,"eq:timeo"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", - irq_ptr->irq); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", - irq_ptr->irq); - QDIO_DBF_TEXT2(1, trace, "cio:term"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - if (get_device(&cdev->dev)) { - /* Can't call shutdown from interrupt context. */ - PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown, (void *)cdev); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - break; - default: - BUG(); - } - ccw_device_set_timeout(cdev, 0); - wake_up(&cdev->private->wait_q); -} - -static void -qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct qdio_irq *irq_ptr; - int cstat,dstat; - char dbf_text[15]; - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "qint"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!intparm) { - QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \ - "handler, device %s\n", cdev->dev.bus_id); - return; - } - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) { - QDIO_DBF_TEXT2(1, trace, "uint"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_PRINT_ERR("received interrupt on unused device %s!\n", - cdev->dev.bus_id); - return; - } - - if (IS_ERR(irb)) { - /* Currently running i/o is in error. */ - switch (PTR_ERR(irb)) { - case -EIO: - QDIO_PRINT_ERR("i/o error on device %s\n", - cdev->dev.bus_id); - return; - case -ETIMEDOUT: - qdio_timeout_handler(cdev); - return; - default: - QDIO_PRINT_ERR("unknown error state %ld on device %s\n", - PTR_ERR(irb), cdev->dev.bus_id); - return; - } - } - - qdio_irq_check_sense(irq_ptr->irq, irb); - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); - break; - - case QDIO_IRQ_STATE_CLEANUP: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - break; - - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - if (cstat & SCHN_STAT_PCI) { - qdio_handle_pci(irq_ptr); - break; - } - - if ((cstat&~SCHN_STAT_PCI)||dstat) { - qdio_handle_activate_check(cdev, intparm, cstat, dstat); - break; - } - default: - QDIO_PRINT_ERR("got interrupt for queues in state %d on " \ - "device %s?!\n", - irq_ptr->state, cdev->dev.bus_id); - } - wake_up(&cdev->private->wait_q); - -} - -int -qdio_synchronize(struct ccw_device *cdev, unsigned int flags, - unsigned int queue_number) -{ - int cc; - struct qdio_q *q; - struct qdio_irq *irq_ptr; - void *ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]="SyncXXXX"; -#endif - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->irq; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); - *((int*)(&dbf_text[0]))=flags; - *((int*)(&dbf_text[4]))=queue_number; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); -#endif /* CONFIG_QDIO_DEBUG */ - - if (flags&QDIO_FLAG_SYNC_INPUT) { - q=irq_ptr->input_qs[queue_number]; - if (!q) - return -EINVAL; - cc = do_siga_sync(q->irq, 0, q->mask); - } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { - q=irq_ptr->output_qs[queue_number]; - if (!q) - return -EINVAL; - cc = do_siga_sync(q->irq, q->mask, 0); - } else - return -EINVAL; - - ptr=&cc; - if (cc) - QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int)); - - return cc; -} - -static unsigned char -qdio_check_siga_needs(int sch) -{ - int result; - unsigned char qdioac; - - struct { - struct chsc_header request; - u16 reserved1; - u16 first_sch; - u16 reserved2; - u16 last_sch; - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 flags; - u8 reserved5; - u16 sch; - u8 qfmt; - u8 reserved6; - u8 qdioac; - u8 sch_class; - u8 reserved7; - u8 icnt; - u8 reserved8; - u8 ocnt; - } *ssqd_area; - - ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!ssqd_area) { - QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ - "SIGAs for sch x%x.\n", sch); - return CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - } - ssqd_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - - ssqd_area->first_sch = sch; - ssqd_area->last_sch = sch; - - result=chsc(ssqd_area); - - if (result) { - QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ - "SIGAs for sch x%x.\n", - result,sch); - qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - goto out; - } - - if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon checking SIGA needs " \ - "is 0x%x. Using all SIGAs for sch x%x.\n", - ssqd_area->response.code, sch); - qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - goto out; - } - if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || - !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || - (ssqd_area->sch != sch)) { - QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ - "using all SIGAs.\n",sch); - qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | - CHSC_FLAG_SIGA_OUTPUT_NECESSARY | - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ - goto out; - } - - qdioac = ssqd_area->qdioac; -out: - free_page ((unsigned long) ssqd_area); - return qdioac; -} - -static unsigned int -tiqdio_check_chsc_availability(void) -{ - char dbf_text[15]; - - if (!css_characteristics_avail) - return -EIO; - - /* Check for bit 41. */ - if (!css_general_characteristics.aif) { - QDIO_PRINT_WARN("Adapter interruption facility not " \ - "installed.\n"); - return -ENOENT; - } - - /* Check for bits 107 and 108. */ - if (!css_chsc_characteristics.scssc || - !css_chsc_characteristics.scsscf) { - QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ - "not available.\n"); - return -ENOENT; - } - - /* Check for OSA/FCP thin interrupts (bit 67). */ - hydra_thinints = css_general_characteristics.aif_osa; - sprintf(dbf_text,"hydrati%1x", hydra_thinints); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* Check for aif time delay disablement fac (bit 56). If installed, - * omit svs even under lpar (good point by rick again) */ - omit_svs = css_general_characteristics.aif_tdd; - sprintf(dbf_text,"omitsvs%1x", omit_svs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - return 0; -} - - -static unsigned int -tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) -{ - unsigned long real_addr_local_summary_bit; - unsigned long real_addr_dev_st_chg_ind; - void *ptr; - char dbf_text[15]; - - unsigned int resp_code; - int result; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32 reserved4:21; - u32 isc:3; - u32 word_with_d_bit; - /* set to 0x10000000 to enable - * time delay disablement facility */ - u32 reserved5; - u32 subsystem_id; - u32 reserved6[1004]; - struct chsc_header response; - u32 reserved7; - } *scssc_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - if (reset_to_zero) { - real_addr_local_summary_bit=0; - real_addr_dev_st_chg_ind=0; - } else { - real_addr_local_summary_bit= - virt_to_phys((volatile void *)indicators); - real_addr_dev_st_chg_ind= - virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); - } - - scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scssc_area) { - QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel x%x.\n", irq_ptr->irq); - return -ENOMEM; - } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - - scssc_area->summary_indicator_addr = real_addr_local_summary_bit; - scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind; - scssc_area->ks = QDIO_STORAGE_KEY; - scssc_area->kc = QDIO_STORAGE_KEY; - scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; - /* enables the time delay disablement facility. Don't care - * whether it is really there (i.e. we haven't checked for - * it) */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - else - QDIO_PRINT_WARN("Time delay disablement facility " \ - "not available\n"); - - - - result = chsc(scssc_area); - if (result) { - QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ - "cc=%i.\n",irq_ptr->irq,result); - result = -EIO; - goto out; - } - - resp_code = scssc_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting indicators " \ - "is 0x%x.\n",resp_code); - sprintf(dbf_text,"sidR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scssc_area->response; - QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); - result = -EIO; - goto out; - } - - QDIO_DBF_TEXT2(0,setup,"setscind"); - QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, - sizeof(unsigned long)); - QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); - result = 0; -out: - free_page ((unsigned long) scssc_area); - return result; - -} - -static unsigned int -tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) -{ - unsigned int resp_code; - int result; - void *ptr; - char dbf_text[15]; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u32 reserved4[2]; - u32 delay_target; - u32 reserved5[1009]; - struct chsc_header response; - u32 reserved6; - } *scsscf_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scsscf_area) { - QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel x%x.\n", irq_ptr->irq); - return -ENOMEM; - } - scsscf_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x1027, - }; - - scsscf_area->delay_target = delay_target<<16; - - result=chsc(scsscf_area); - if (result) { - QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ - "cc=%i. Continuing.\n",irq_ptr->irq,result); - result = -EIO; - goto out; - } - - resp_code = scsscf_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting delay target " \ - "is 0x%x. Continuing.\n",resp_code); - sprintf(dbf_text,"sdtR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scsscf_area->response; - QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); - } - QDIO_DBF_TEXT2(0,trace,"delytrgt"); - QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); - result = 0; /* not critical */ -out: - free_page ((unsigned long) scsscf_area); - return result; -} - -int -qdio_cleanup(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - int rc; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - sprintf(dbf_text,"qcln%4x",irq_ptr->irq); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - rc = qdio_shutdown(cdev, how); - if ((rc == 0) || (rc == -EINPROGRESS)) - rc = qdio_free(cdev); - return rc; -} - -int -qdio_shutdown(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - int i; - int result = 0; - int rc; - unsigned long flags; - int timeout; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* mark all qs as uninteresting */ - for (i=0;i<irq_ptr->no_input_qs;i++) - atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1); - - for (i=0;i<irq_ptr->no_output_qs;i++) - atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1); - - tasklet_kill(&tiqdio_tasklet); - - for (i=0;i<irq_ptr->no_input_qs;i++) { - qdio_unmark_q(irq_ptr->input_qs[i]); - tasklet_kill(&irq_ptr->input_qs[i]->tasklet); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - input_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->input_qs[i]->use_count)) - result=-EINPROGRESS; - } - - for (i=0;i<irq_ptr->no_output_qs;i++) { - tasklet_kill(&irq_ptr->output_qs[i]->tasklet); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - output_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->output_qs[i]->use_count)) - result=-EINPROGRESS; - } - - /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev),flags); - if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) { - rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_CLEAR_TIMEOUT; - } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) { - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } else { /* default behaviour */ - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } - if (rc == -ENODEV) { - /* No need to wait for device no longer present. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) { - /* - * Whoever put another handler there, has to cope with the - * interrupt theirself. Might happen if qdio_shutdown was - * called on already shutdown queues, but this shouldn't have - * bad side effects. - */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (rc == 0) { - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - ccw_device_set_timeout(cdev, timeout); - spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); - - wait_event(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR); - } else { - QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " - "device %s\n", result, cdev->dev.bus_id); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - result = rc; - goto out; - } - if (irq_ptr->is_thinint_irq) { - qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); - tiqdio_set_subchannel_ind(irq_ptr,1); - /* reset adapter interrupt indicators */ - } - - /* exchange int handlers, if necessary */ - if ((void*)cdev->handler == (void*)qdio_handler) - cdev->handler=irq_ptr->original_int_handler; - - /* Ignore errors. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - ccw_device_set_timeout(cdev, 0); -out: - up(&irq_ptr->setting_up_sema); - return result; -} - -int -qdio_free(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - cdev->private->qdio_data = 0; - - up(&irq_ptr->setting_up_sema); - - qdio_release_irq_memory(irq_ptr); - module_put(THIS_MODULE); - return 0; -} - -static inline void -qdio_allocate_do_dbf(struct qdio_initialize *init_data) -{ - char dbf_text[20]; /* if a printf printed out more than 8 chars */ - - sprintf(dbf_text,"qfmt:%x",init_data->q_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8); - sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*)); - QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*)); - sprintf(dbf_text,"miit%4x",init_data->min_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"mait%4x",init_data->max_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"miot%4x",init_data->min_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"maot%4x",init_data->max_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"niq:%4x",init_data->no_input_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"noq:%4x",init_data->no_output_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); -} - -static inline void -qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) -{ - irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib); - - irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl); - - irq_ptr->qdr->qdf0[i].slsba= - (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; -} - -static inline void -qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, - int j, int iqfmt) -{ - irq_ptr->output_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib); - - irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl); - - irq_ptr->qdr->qdf0[i+j].slsba= - (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; -} - - -static inline void -qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->input_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->input_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static inline void -qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_output_qs;i++) { - irq_ptr->output_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->output_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->output_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static inline int -qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, - int dstat) -{ - char dbf_text[15]; - struct qdio_irq *irq_ptr; - - irq_ptr = cdev->private->qdio_data; - - if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->irq); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0x%x (cs=x%x, ds=x%x).\n", - irq_ptr->irq,cstat,dstat); - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); - } - - if (!(dstat & DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:no de"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " - "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->irq, dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - - if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:badio"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: got " - "the following devstat: dstat=%02x, " - "cstat=%02x\n", - irq_ptr->irq, dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - return 0; -} - -static void -qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - sprintf(dbf_text,"qehi%4x",cdev->private->irq); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { - ccw_device_set_timeout(cdev, 0); - return; - } - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); - ccw_device_set_timeout(cdev, 0); -} - -int -qdio_initialize(struct qdio_initialize *init_data) -{ - int rc; - char dbf_text[15]; - - sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - rc = qdio_allocate(init_data); - if (rc == 0) { - rc = qdio_establish(init_data); - if (rc != 0) - qdio_free(init_data->cdev); - } - - return rc; -} - - -int -qdio_allocate(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || - (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) || - ((init_data->no_input_qs) && (!init_data->input_handler)) || - ((init_data->no_output_qs) && (!init_data->output_handler)) ) - return -EINVAL; - - if (!init_data->input_sbal_addr_array) - return -EINVAL; - - if (!init_data->output_sbal_addr_array) - return -EINVAL; - - qdio_allocate_do_dbf(init_data); - - /* create irq */ - irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); - - QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); - - if (!irq_ptr) { - QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n"); - return -ENOMEM; - } - - memset(irq_ptr,0,sizeof(struct qdio_irq)); - - init_MUTEX(&irq_ptr->setting_up_sema); - - /* QDR must be in DMA area since CCW data address is only 32 bit */ - irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); - if (!(irq_ptr->qdr)) { - kfree(irq_ptr); - QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); - return -ENOMEM; - } - QDIO_DBF_TEXT0(0,setup,"qdr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*)); - - if (qdio_alloc_qs(irq_ptr, - init_data->no_input_qs, - init_data->no_output_qs)) { - qdio_release_irq_memory(irq_ptr); - return -ENOMEM; - } - - init_data->cdev->private->qdio_data = irq_ptr; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); - - return 0; -} - -int qdio_fill_irq(struct qdio_initialize *init_data) -{ - int i; - char dbf_text[15]; - struct ciw *ciw; - int is_iqdio; - struct qdio_irq *irq_ptr; - - irq_ptr = init_data->cdev->private->qdio_data; - - memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr)); - - /* wipes qib.ac, required by ar7063 */ - memset(irq_ptr->qdr,0,sizeof(struct qdr)); - - irq_ptr->int_parm=init_data->int_parm; - - irq_ptr->irq = init_data->cdev->private->irq; - irq_ptr->no_input_qs=init_data->no_input_qs; - irq_ptr->no_output_qs=init_data->no_output_qs; - - if (init_data->q_format==QDIO_IQDIO_QFMT) { - irq_ptr->is_iqdio_irq=1; - irq_ptr->is_thinint_irq=1; - } else { - irq_ptr->is_iqdio_irq=0; - irq_ptr->is_thinint_irq=hydra_thinints; - } - sprintf(dbf_text,"is_i_t%1x%1x", - irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind=qdio_get_indicator(); - QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); - if (!irq_ptr->dev_st_chg_ind) { - QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0x%x\n",irq_ptr->irq); - qdio_release_irq_memory(irq_ptr); - return -ENOBUFS; - } - } - - /* defaults */ - irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD; - irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT; - irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD; - irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT; - - qdio_fill_qs(irq_ptr, init_data->cdev, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_handler, - init_data->output_handler,init_data->int_parm, - init_data->q_format,init_data->flags, - init_data->input_sbal_addr_array, - init_data->output_sbal_addr_array); - - if (!try_module_get(THIS_MODULE)) { - QDIO_PRINT_CRIT("try_module_get() failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -EINVAL; - } - - qdio_fill_thresholds(irq_ptr,init_data->no_input_qs, - init_data->no_output_qs, - init_data->min_input_threshold, - init_data->max_input_threshold, - init_data->min_output_threshold, - init_data->max_output_threshold); - - /* fill in qdr */ - irq_ptr->qdr->qfmt=init_data->q_format; - irq_ptr->qdr->iqdcnt=init_data->no_input_qs; - irq_ptr->qdr->oqdcnt=init_data->no_output_qs; - irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */ - irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4; - - irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; - - /* fill in qib */ - irq_ptr->qib.qfmt=init_data->q_format; - if (init_data->no_input_qs) - irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); - if (init_data->no_output_qs) - irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib); - memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8); - - qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format, - init_data->qib_param_field, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_slib_elements, - init_data->output_slib_elements); - - /* first input descriptors, then output descriptors */ - is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0; - for (i=0;i<init_data->no_input_qs;i++) - qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio); - - for (i=0;i<init_data->no_output_qs;i++) - qdio_allocate_fill_output_desc(irq_ptr, i, - init_data->no_input_qs, - is_iqdio); - - /* qdr, qib, sls, slsbs, slibs, sbales filled. */ - - /* get qdio commands */ - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no eq"); - QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->equeue = *ciw; - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no aq"); - QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->aqueue = *ciw; - - /* Set new interrupt handler. */ - irq_ptr->original_int_handler = init_data->cdev->handler; - init_data->cdev->handler = qdio_handler; - - return 0; -} - -int -qdio_establish(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - unsigned long saveflags; - int result, result2; - struct ccw_device *cdev; - char dbf_text[20]; - - cdev=init_data->cdev; - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -EINVAL; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - - qdio_fill_irq(init_data); - - /* the thinint CHSC stuff */ - if (irq_ptr->is_thinint_irq) { - - result = tiqdio_set_subchannel_ind(irq_ptr,0); - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); - } - - sprintf(dbf_text,"qest%4x",cdev->private->irq); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - /* establish q */ - irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->equeue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options(cdev, 0); - result=ccw_device_start_timeout(cdev,&irq_ptr->ccw, - QDIO_DOING_ESTABLISH,0, 0, - QDIO_ESTABLISH_TIMEOUT); - if (result) { - result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw, - QDIO_DOING_ESTABLISH,0,0, - QDIO_ESTABLISH_TIMEOUT); - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); - result=result2; - if (result) - ccw_device_set_timeout(cdev, 0); - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - QDIO_ESTABLISH_TIMEOUT); - - if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) - result = 0; - else { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; - } - - irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); - /* if this gets set once, we're running under VM and can omit SVSes */ - if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) - omit_svs=1; - - sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - irq_ptr->hydra_gives_outbound_pcis= - irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED; - irq_ptr->sync_done_on_outb_pcis= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS; - - qdio_initialize_set_siga_flags_input(irq_ptr); - qdio_initialize_set_siga_flags_output(irq_ptr); - - up(&irq_ptr->setting_up_sema); - - return result; - -} - -int -qdio_activate(struct ccw_device *cdev, int flags) -{ - struct qdio_irq *irq_ptr; - int i,result=0,result2; - unsigned long saveflags; - char dbf_text[20]; /* see qdio_initialize */ - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) { - result=-EBUSY; - goto out; - } - - sprintf(dbf_text,"qact%4x", irq_ptr->irq); - QDIO_DBF_TEXT2(0,setup,dbf_text); - QDIO_DBF_TEXT2(0,trace,dbf_text); - - /* activate q */ - irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->aqueue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(0); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_timeout(cdev, 0); - ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, - 0, DOIO_DENY_PREFETCH); - if (result) { - result2=ccw_device_start(cdev,&irq_ptr->ccw, - QDIO_DOING_ACTIVATE,0,0); - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - if (result) - goto out; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - if (irq_ptr->is_thinint_irq) { - /* - * that way we know, that, if we will get interrupted - * by tiqdio_inbound_processing, qdio_unmark_q will - * not be called - */ - qdio_reserve_q(irq_ptr->input_qs[i]); - qdio_mark_tiq(irq_ptr->input_qs[i]); - qdio_release_q(irq_ptr->input_qs[i]); - } - } - - if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) { - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->is_input_q|= - QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT; - } - } - - wait_event_interruptible_timeout(cdev->private->wait_q, - ((irq_ptr->state == - QDIO_IRQ_STATE_STOPPED) || - (irq_ptr->state == - QDIO_IRQ_STATE_ERR)), - QDIO_ACTIVATE_TIMEOUT); - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_STOPPED: - case QDIO_IRQ_STATE_ERR: - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - down(&irq_ptr->setting_up_sema); - result = -EIO; - break; - default: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); - result = 0; - } - out: - up(&irq_ptr->setting_up_sema); - - return result; -} - -/* buffers filled forwards again to make Rick happy */ -static inline void -qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); - count--; - if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - - /* not necessary, as the queues are synced during the SIGA read */ - /*SYNC_MEMORY;*/ -} - -static inline void -qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); - count--; - if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - - /* SIGA write will sync the queues */ - /*SYNC_MEMORY;*/ -} - -static inline void -do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - - /* This is the inbound handling of queues */ - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - qdio_do_qdio_fill_input(q,qidx,count,buffers); - - if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& - (callflags&QDIO_FLAG_UNDER_INTERRUPT)) - atomic_swap(&q->polling,0); - - if (used_elements) - return; - if (callflags&QDIO_FLAG_DONT_SIGA) - return; - if (q->siga_in) { - int result; - - result=qdio_siga_input(q); - if (result) { - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } - } - - qdio_mark_q(q); -} - -static inline void -do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - - /* This is the outbound handling of queues */ -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.start_time_outbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ - - qdio_do_qdio_fill_output(q,qidx,count,buffers); - - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - if (callflags&QDIO_FLAG_DONT_SIGA) { -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ - return; - } - if (q->is_iqdio_q) { - /* one siga for every sbal */ - while (count--) - qdio_kick_outbound_q(q); - - __qdio_outbound_processing(q); - } else { - /* under VM, we do a SIGA sync unconditionally */ - SYNC_MEMORY; - else { - /* - * w/o shadow queues (else branch of - * SYNC_MEMORY :-/ ), we try to - * fast-requeue buffers - */ - if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1)]!= - SLSB_CU_OUTPUT_PRIMED) { - qdio_kick_outbound_q(q); - } else { - QDIO_DBF_TEXT3(0,trace, "fast-req"); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.fast_reqs++; -#endif /* QDIO_PERFORMANCE_STATS */ - } - } - /* - * only marking the q could take too long, - * the upper layer module could do a lot of - * traffic in that time - */ - __qdio_outbound_processing(q); - } - -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ -} - -/* count must be 1 in iqdio */ -int -do_QDIO(struct ccw_device *cdev,unsigned int callflags, - unsigned int queue_number, unsigned int qidx, - unsigned int count,struct qdio_buffer *buffers) -{ - struct qdio_irq *irq_ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[20]; - - sprintf(dbf_text,"doQD%04x",cdev->private->irq); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || - (count>QDIO_MAX_BUFFERS_PER_Q) || - (queue_number>QDIO_MAX_QUEUES_PER_IRQ) ) - return -EINVAL; - - if (count==0) - return 0; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - if (callflags&QDIO_FLAG_SYNC_INPUT) - QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], - sizeof(void*)); - else - QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number], - sizeof(void*)); - sprintf(dbf_text,"flag%04x",callflags); - QDIO_DBF_TEXT3(0,trace,dbf_text); - sprintf(dbf_text,"qi%02xct%02x",qidx,count); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) - return -EBUSY; - - if (callflags&QDIO_FLAG_SYNC_INPUT) - do_qdio_handle_inbound(irq_ptr->input_qs[queue_number], - callflags, qidx, count, buffers); - else if (callflags&QDIO_FLAG_SYNC_OUTPUT) - do_qdio_handle_outbound(irq_ptr->output_qs[queue_number], - callflags, qidx, count, buffers); - else { - QDIO_DBF_TEXT3(1,trace,"doQD:inv"); - return -EINVAL; - } - return 0; -} - -#ifdef QDIO_PERFORMANCE_STATS -static int -qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, - int buffer_length, int *eof, void *data) -{ - int c=0; - - /* we are always called with buffer_length=4k, so we all - deliver on the first read */ - if (offset>0) - return 0; - -#define _OUTP_IT(x...) c+=sprintf(buffer+c,x) - _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); - _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); - _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); - _OUTP_IT("Number of tasklet runs (total) : %u\n", - perf_stats.tl_runs); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %u\n", - perf_stats.siga_syncs); - _OUTP_IT("Number of SIGA in's issued : %u\n", - perf_stats.siga_ins); - _OUTP_IT("Number of SIGA out's issued : %u\n", - perf_stats.siga_outs); - _OUTP_IT("Number of PCIs caught : %u\n", - perf_stats.pcis); - _OUTP_IT("Number of adapter interrupts caught : %u\n", - perf_stats.thinints); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n", - perf_stats.fast_reqs); - _OUTP_IT("\n"); - _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n", - perf_stats.inbound_time); - _OUTP_IT("Number of inbound transfers : %u\n", - perf_stats.inbound_cnt); - _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n", - perf_stats.outbound_time); - _OUTP_IT("Number of do_QDIOs outbound : %u\n", - perf_stats.outbound_cnt); - _OUTP_IT("\n"); - - return c; -} - -static struct proc_dir_entry *qdio_perf_proc_file; -#endif /* QDIO_PERFORMANCE_STATS */ - -static void -qdio_add_procfs_entry(void) -{ -#ifdef QDIO_PERFORMANCE_STATS - proc_perf_file_registration=0; - qdio_perf_proc_file=create_proc_entry(QDIO_PERF, - S_IFREG|0444,&proc_root); - if (qdio_perf_proc_file) { - qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; - } else proc_perf_file_registration=-1; - - if (proc_perf_file_registration) - QDIO_PRINT_WARN("was not able to register perf. " \ - "proc-file (%i).\n", - proc_perf_file_registration); -#endif /* QDIO_PERFORMANCE_STATS */ -} - -static void -qdio_remove_procfs_entry(void) -{ -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs=0; - - if (!proc_perf_file_registration) /* means if it went ok earlier */ - remove_proc_entry(QDIO_PERF,&proc_root); -#endif /* QDIO_PERFORMANCE_STATS */ -} - -static void -tiqdio_register_thinints(void) -{ - char dbf_text[20]; - register_thinint_result= - s390_register_adapter_interrupt(&tiqdio_thinint_handler); - if (register_thinint_result) { - sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff)); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_PRINT_ERR("failed to register adapter handler " \ - "(rc=%i).\nAdapter interrupts might " \ - "not work. Continuing.\n", - register_thinint_result); - } -} - -static void -tiqdio_unregister_thinints(void) -{ - if (!register_thinint_result) - s390_unregister_adapter_interrupt(&tiqdio_thinint_handler); -} - -static int -qdio_get_qdio_memory(void) -{ - int i; - indicator_used[0]=1; - - for (i=1;i<INDICATORS_PER_CACHELINE;i++) - indicator_used[i]=0; - indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), - GFP_KERNEL); - if (!indicators) return -ENOMEM; - memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE)); - return 0; -} - -static void -qdio_release_qdio_memory(void) -{ - if (indicators) - kfree(indicators); -} - -static void -qdio_unregister_dbf_views(void) -{ - if (qdio_dbf_setup) - debug_unregister(qdio_dbf_setup); - if (qdio_dbf_sbal) - debug_unregister(qdio_dbf_sbal); - if (qdio_dbf_sense) - debug_unregister(qdio_dbf_sense); - if (qdio_dbf_trace) - debug_unregister(qdio_dbf_trace); -#ifdef CONFIG_QDIO_DEBUG - if (qdio_dbf_slsb_out) - debug_unregister(qdio_dbf_slsb_out); - if (qdio_dbf_slsb_in) - debug_unregister(qdio_dbf_slsb_in); -#endif /* CONFIG_QDIO_DEBUG */ -} - -static int -qdio_register_dbf_views(void) -{ - qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME, - QDIO_DBF_SETUP_PAGES, - QDIO_DBF_SETUP_NR_AREAS, - QDIO_DBF_SETUP_LEN); - if (!qdio_dbf_setup) - goto oom; - debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL); - - qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME, - QDIO_DBF_SBAL_PAGES, - QDIO_DBF_SBAL_NR_AREAS, - QDIO_DBF_SBAL_LEN); - if (!qdio_dbf_sbal) - goto oom; - - debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL); - - qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME, - QDIO_DBF_SENSE_PAGES, - QDIO_DBF_SENSE_NR_AREAS, - QDIO_DBF_SENSE_LEN); - if (!qdio_dbf_sense) - goto oom; - - debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL); - - qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME, - QDIO_DBF_TRACE_PAGES, - QDIO_DBF_TRACE_NR_AREAS, - QDIO_DBF_TRACE_LEN); - if (!qdio_dbf_trace) - goto oom; - - debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL); - -#ifdef CONFIG_QDIO_DEBUG - qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME, - QDIO_DBF_SLSB_OUT_PAGES, - QDIO_DBF_SLSB_OUT_NR_AREAS, - QDIO_DBF_SLSB_OUT_LEN); - if (!qdio_dbf_slsb_out) - goto oom; - debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL); - - qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME, - QDIO_DBF_SLSB_IN_PAGES, - QDIO_DBF_SLSB_IN_NR_AREAS, - QDIO_DBF_SLSB_IN_LEN); - if (!qdio_dbf_slsb_in) - goto oom; - debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; -oom: - QDIO_PRINT_ERR("not enough memory for dbf.\n"); - qdio_unregister_dbf_views(); - return -ENOMEM; -} - -static int __init -init_QDIO(void) -{ - int res; -#ifdef QDIO_PERFORMANCE_STATS - void *ptr; -#endif /* QDIO_PERFORMANCE_STATS */ - - printk("qdio: loading %s\n",version); - - res=qdio_get_qdio_memory(); - if (res) - return res; - - res = qdio_register_dbf_views(); - if (res) - return res; - - QDIO_DBF_TEXT0(0,setup,"initQDIO"); - -#ifdef QDIO_PERFORMANCE_STATS - memset((void*)&perf_stats,0,sizeof(perf_stats)); - QDIO_DBF_TEXT0(0,setup,"perfstat"); - ptr=&perf_stats; - QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); -#endif /* QDIO_PERFORMANCE_STATS */ - - qdio_add_procfs_entry(); - - if (tiqdio_check_chsc_availability()) - QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); - - tiqdio_register_thinints(); - - return 0; - } - -static void __exit -cleanup_QDIO(void) -{ - tiqdio_unregister_thinints(); - qdio_remove_procfs_entry(); - qdio_release_qdio_memory(); - qdio_unregister_dbf_views(); - - printk("qdio: %s: module removed\n",version); -} - -module_init(init_QDIO); -module_exit(cleanup_QDIO); - -EXPORT_SYMBOL(qdio_allocate); -EXPORT_SYMBOL(qdio_establish); -EXPORT_SYMBOL(qdio_initialize); -EXPORT_SYMBOL(qdio_activate); -EXPORT_SYMBOL(do_QDIO); -EXPORT_SYMBOL(qdio_shutdown); -EXPORT_SYMBOL(qdio_free); -EXPORT_SYMBOL(qdio_cleanup); -EXPORT_SYMBOL(qdio_synchronize); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 6b8aa6a852b..a563e4c0059 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,70 +1,20 @@ +/* + * Copyright IBM Corp. 2000, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ #ifndef _CIO_QDIO_H #define _CIO_QDIO_H #include <asm/page.h> +#include <asm/schid.h> +#include <asm/debug.h> +#include "chsc.h" -#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_VERBOSE_LEVEL 9 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_VERBOSE_LEVEL 5 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_USE_PROCESSING_STATE - -#ifdef CONFIG_QDIO_PERF_STATS -#define QDIO_PERFORMANCE_STATS -#endif /* CONFIG_QDIO_PERF_STATS */ - -#define QDIO_MINIMAL_BH_RELIEF_TIME 16 -#define QDIO_TIMER_POLL_VALUE 1 -#define IQDIO_TIMER_POLL_VALUE 1 - -/* - * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as - * we never know, whether we'll get initiative again, e.g. to give the - * transmit skb's back to the stack, however the stack may be waiting for - * them... therefore we define 4 as threshold to start polling (which - * will stop as soon as the asynchronous queue catches up) - * btw, this only applies to the asynchronous HiperSockets queue - */ -#define IQDIO_FILL_LEVEL_TO_POLL 4 - -#define TIQDIO_THININT_ISC 3 -#define TIQDIO_DELAY_TARGET 0 -#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ -#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ -#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */ -#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */ -#define IQDIO_LOCAL_LAPS 4 -#define IQDIO_LOCAL_LAPS_INT 1 -#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2 -/*#define IQDIO_IQDC_INT_PARM 0x1234*/ - -#define QDIO_Q_LAPS 5 - -#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY - -#define L2_CACHELINE_SIZE 256 -#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) - -#define QDIO_PERF "qdio_perf" - -/* must be a power of 2 */ -/*#define QDIO_STATS_NUMBER 4 - -#define QDIO_STATS_CLASSES 2 -#define QDIO_STATS_COUNT_NEEDED 2*/ - -#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before - exiting without having use_count - of the queue to 0 */ - -#define QDIO_ESTABLISH_TIMEOUT (1*HZ) -#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) -#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) -#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) +#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ +#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */ +#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ +#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -76,575 +26,400 @@ enum qdio_irq_states { NR_QDIO_IRQ_STATES, }; -/* used as intparm in do_IO: */ -#define QDIO_DOING_SENSEID 0 -#define QDIO_DOING_ESTABLISH 1 -#define QDIO_DOING_ACTIVATE 2 -#define QDIO_DOING_CLEANUP 3 - -/************************* DEBUG FACILITY STUFF *********************/ - -#define QDIO_DBF_HEX(ex,name,level,addr,len) \ - do { \ - if (ex) \ - debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ - else \ - debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ - } while (0) -#define QDIO_DBF_TEXT(ex,name,level,text) \ - do { \ - if (ex) \ - debug_text_exception(qdio_dbf_##name,level,text); \ - else \ - debug_text_event(qdio_dbf_##name,level,text); \ - } while (0) - - -#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) -#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) -#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) -#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) -#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) -#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) -#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) -#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text) -#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text) -#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text) -#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SETUP_NAME "qdio_setup" -#define QDIO_DBF_SETUP_LEN 8 -#define QDIO_DBF_SETUP_PAGES 4 -#define QDIO_DBF_SETUP_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SETUP_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SETUP_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */ -#define QDIO_DBF_SBAL_LEN 256 -#define QDIO_DBF_SBAL_PAGES 4 -#define QDIO_DBF_SBAL_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SBAL_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SBAL_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TRACE_NAME "qdio_trace" -#define QDIO_DBF_TRACE_LEN 8 -#define QDIO_DBF_TRACE_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TRACE_PAGES 16 -#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */ -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TRACE_PAGES 4 -#define QDIO_DBF_TRACE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SENSE_NAME "qdio_sense" -#define QDIO_DBF_SENSE_LEN 64 -#define QDIO_DBF_SENSE_PAGES 2 -#define QDIO_DBF_SENSE_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SENSE_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SENSE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT - -#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out" -#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_OUT_PAGES 256 -#define QDIO_DBF_SLSB_OUT_NR_AREAS 1 -#define QDIO_DBF_SLSB_OUT_LEVEL 6 - -#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in" -#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_IN_PAGES 256 -#define QDIO_DBF_SLSB_IN_NR_AREAS 1 -#define QDIO_DBF_SLSB_IN_LEVEL 6 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_PRINTK_HEADER QDIO_NAME ": " - -#if QDIO_VERBOSE_LEVEL>8 -#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_STUPID(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>7 -#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALL(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>6 -#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_INFO(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>5 -#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_WARN(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>4 -#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ERR(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>3 -#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_CRIT(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>2 -#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALERT(x...) -#endif - -#if QDIO_VERBOSE_LEVEL>1 -#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_EMERG(x...) -#endif - -#define HEXDUMP16(importance,header,ptr) \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x\n",*(((char*)ptr)), \ - *(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4), \ - *(((char*)ptr)+5),*(((char*)ptr)+6), \ - *(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10), \ - *(((char*)ptr)+11),*(((char*)ptr)+12), \ - *(((char*)ptr)+13),*(((char*)ptr)+14), \ - *(((char*)ptr)+15)); \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - -/****************** END OF DEBUG FACILITY STUFF *********************/ +/* used as intparm in do_IO */ +#define QDIO_DOING_ESTABLISH 1 +#define QDIO_DOING_ACTIVATE 2 +#define QDIO_DOING_CLEANUP 3 + +#define SLSB_STATE_NOT_INIT 0x0 +#define SLSB_STATE_EMPTY 0x1 +#define SLSB_STATE_PRIMED 0x2 +#define SLSB_STATE_PENDING 0x3 +#define SLSB_STATE_HALTED 0xe +#define SLSB_STATE_ERROR 0xf +#define SLSB_TYPE_INPUT 0x0 +#define SLSB_TYPE_OUTPUT 0x20 +#define SLSB_OWNER_PROG 0x80 +#define SLSB_OWNER_CU 0x40 + +#define SLSB_P_INPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */ +#define SLSB_P_INPUT_ACK \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */ +#define SLSB_CU_INPUT_EMPTY \ + (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */ +#define SLSB_P_INPUT_PRIMED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */ +#define SLSB_P_INPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */ +#define SLSB_P_INPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */ +#define SLSB_P_OUTPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ +#define SLSB_P_OUTPUT_EMPTY \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ +#define SLSB_P_OUTPUT_PENDING \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */ +#define SLSB_CU_OUTPUT_PRIMED \ + (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ +#define SLSB_P_OUTPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */ +#define SLSB_P_OUTPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */ + +#define SLSB_ERROR_DURING_LOOKUP 0xff + +/* additional CIWs returned by extended Sense-ID */ +#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ +#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ -/* - * Some instructions as assembly - */ -extern __inline__ int -do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) -{ - int cc; - -#ifndef CONFIG_ARCH_S390X - asm volatile ( - "lhi 0,2 \n\t" - "lr 1,%1 \n\t" - "lr 2,%2 \n\t" - "lr 3,%3 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) - : "cc", "0", "1", "2", "3" - ); -#else /* CONFIG_ARCH_S390X */ - asm volatile ( - "lghi 0,2 \n\t" - "llgfr 1,%1 \n\t" - "llgfr 2,%2 \n\t" - "llgfr 3,%3 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) - : "cc", "0", "1", "2", "3" - ); -#endif /* CONFIG_ARCH_S390X */ - return cc; -} - -extern __inline__ int -do_siga_input(unsigned int irq, unsigned int mask) +/* flags for st qdio sch data */ +#define CHSC_FLAG_QDIO_CAPABILITY 0x80 +#define CHSC_FLAG_VALIDITY 0x40 + +/* SIGA flags */ +#define QDIO_SIGA_WRITE 0x00 +#define QDIO_SIGA_READ 0x01 +#define QDIO_SIGA_SYNC 0x02 +#define QDIO_SIGA_WRITEQ 0x04 +#define QDIO_SIGA_QEBSM_FLAG 0x80 + +#ifdef CONFIG_64BIT +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) { - int cc; - -#ifndef CONFIG_ARCH_S390X - asm volatile ( - "lhi 0,1 \n\t" - "lr 1,%1 \n\t" - "lr 2,%2 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) - : "cc", "0", "1", "2", "memory" - ); -#else /* CONFIG_ARCH_S390X */ - asm volatile ( - "lghi 0,1 \n\t" - "llgfr 1,%1 \n\t" - "llgfr 2,%2 \n\t" - "siga 0 \n\t" - "ipm %0 \n\t" - "srl %0,28 \n\t" - : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) - : "cc", "0", "1", "2", "memory" - ); -#endif /* CONFIG_ARCH_S390X */ - - return cc; + register unsigned long _ccq asm ("0") = *count; + register unsigned long _token asm ("1") = token; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + + asm volatile( + " .insn rsy,0xeb000000008A,%1,0,0(%2)" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_token) + : "memory", "cc"); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + + return (_ccq >> 32) & 0xff; } -extern __inline__ int -do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count, int ack) { - int cc; - __u32 busy_bit; - -#ifndef CONFIG_ARCH_S390X - asm volatile ( - "lhi 0,0 \n\t" - "lr 1,%2 \n\t" - "lr 2,%3 \n\t" - "siga 0 \n\t" - "0:" - "ipm %0 \n\t" - "srl %0,28 \n\t" - "srl 0,31 \n\t" - "lr %1,0 \n\t" - "1: \n\t" - ".section .fixup,\"ax\"\n\t" - "2: \n\t" - "lhi %0,%4 \n\t" - "bras 1,3f \n\t" - ".long 1b \n\t" - "3: \n\t" - "l 1,0(1) \n\t" - "br 1 \n\t" - ".previous \n\t" - ".section __ex_table,\"a\"\n\t" - ".align 4 \n\t" - ".long 0b,2b \n\t" - ".previous \n\t" - : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "0", "1", "2", "memory" - ); -#else /* CONFIG_ARCH_S390X */ - asm volatile ( - "lghi 0,0 \n\t" - "llgfr 1,%2 \n\t" - "llgfr 2,%3 \n\t" - "siga 0 \n\t" - "0:" - "ipm %0 \n\t" - "srl %0,28 \n\t" - "srl 0,31 \n\t" - "llgfr %1,0 \n\t" - "1: \n\t" - ".section .fixup,\"ax\"\n\t" - "lghi %0,%4 \n\t" - "jg 1b \n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n\t" - ".align 8 \n\t" - ".quad 0b,1b \n\t" - ".previous \n\t" - : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "0", "1", "2", "memory" - ); -#endif /* CONFIG_ARCH_S390X */ - - (*bb) = busy_bit; - return cc; + register unsigned long _ccq asm ("0") = *count; + register unsigned long _token asm ("1") = token; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + unsigned long _state = (unsigned long)ack << 63; + + asm volatile( + " .insn rrf,0xB99c0000,%1,%2,0,0" + : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) + : "d" (_token) + : "memory", "cc"); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + *state = _state & 0xff; + + return (_ccq >> 32) & 0xff; } +#else +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) { return 0; } +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count, int ack) { return 0; } +#endif /* CONFIG_64BIT */ + +struct qdio_irq; + +struct siga_flag { + u8 input:1; + u8 output:1; + u8 sync:1; + u8 sync_after_ai:1; + u8 sync_out_after_pci:1; + u8:3; +} __attribute__ ((packed)); + +struct qdio_dev_perf_stat { + unsigned int adapter_int; + unsigned int qdio_int; + unsigned int pci_request_int; + + unsigned int tasklet_inbound; + unsigned int tasklet_inbound_resched; + unsigned int tasklet_inbound_resched2; + unsigned int tasklet_outbound; + + unsigned int siga_read; + unsigned int siga_write; + unsigned int siga_sync; -extern __inline__ unsigned long -do_clear_global_summary(void) -{ + unsigned int inbound_call; + unsigned int inbound_handler; + unsigned int stop_polling; + unsigned int inbound_queue_full; + unsigned int outbound_call; + unsigned int outbound_handler; + unsigned int outbound_queue_full; + unsigned int fast_requeue; + unsigned int target_full; + unsigned int eqbs; + unsigned int eqbs_partial; + unsigned int sqbs; + unsigned int sqbs_partial; + unsigned int int_discarded; +} ____cacheline_aligned; + +struct qdio_queue_perf_stat { + /* + * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. + * Since max. 127 SBALs are scanned reuse entry for 128 as queue full + * aka 127 SBALs found. + */ + unsigned int nr_sbals[8]; + unsigned int nr_sbal_error; + unsigned int nr_sbal_nop; + unsigned int nr_sbal_total; +}; - unsigned long time; - -#ifndef CONFIG_ARCH_S390X - asm volatile ( - "lhi 1,3 \n\t" - ".insn rre,0xb2650000,2,0 \n\t" - "lr %0,3 \n\t" - : "=d" (time) : : "cc", "1", "2", "3" - ); -#else /* CONFIG_ARCH_S390X */ - asm volatile ( - "lghi 1,3 \n\t" - ".insn rre,0xb2650000,2,0 \n\t" - "lgr %0,3 \n\t" - : "=d" (time) : : "cc", "1", "2", "3" - ); -#endif /* CONFIG_ARCH_S390X */ - - return time; -} - -/* - * QDIO device commands returned by extended Sense-ID - */ -#define DEFAULT_ESTABLISH_QS_CMD 0x1b -#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 -#define DEFAULT_ACTIVATE_QS_CMD 0x1f -#define DEFAULT_ACTIVATE_QS_COUNT 0 +enum qdio_queue_irq_states { + QDIO_QUEUE_IRQS_DISABLED, +}; -/* - * additional CIWs returned by extended Sense-ID - */ -#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ -#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ +struct qdio_input_q { + /* input buffer acknowledgement flag */ + int polling; + /* first ACK'ed buffer */ + int ack_start; + /* how much sbals are acknowledged with qebsm */ + int ack_count; + /* last time of noticing incoming data */ + u64 timestamp; + /* upper-layer polling flag */ + unsigned long queue_irq_state; + /* callback to start upper-layer polling */ + void (*queue_start_poll) (struct ccw_device *, int, unsigned long); +}; -#define QDIO_CHSC_RESPONSE_CODE_OK 1 -/* flags for st qdio sch data */ -#define CHSC_FLAG_QDIO_CAPABILITY 0x80 -#define CHSC_FLAG_VALIDITY 0x40 - -#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 -#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 -#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 - -#ifdef QDIO_PERFORMANCE_STATS -struct qdio_perf_stats { - unsigned int tl_runs; - - unsigned int siga_outs; - unsigned int siga_ins; - unsigned int siga_syncs; - unsigned int pcis; - unsigned int thinints; - unsigned int fast_reqs; - - __u64 start_time_outbound; - unsigned int outbound_cnt; - unsigned int outbound_time; - __u64 start_time_inbound; - unsigned int inbound_cnt; - unsigned int inbound_time; +struct qdio_output_q { + /* PCIs are enabled for the queue */ + int pci_out_enabled; + /* cq: use asynchronous output buffers */ + int use_cq; + /* cq: aobs used for particual SBAL */ + struct qaob **aobs; + /* cq: sbal state related to asynchronous operation */ + struct qdio_outbuf_state *sbal_state; + /* timer to check for more outbound work */ + struct timer_list timer; + /* used SBALs before tasklet schedule */ + int scan_threshold; }; -#endif /* QDIO_PERFORMANCE_STATS */ - -#define atomic_swap(a,b) xchg((int*)a.counter,b) - -/* unlikely as the later the better */ -#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) -#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,~0U) -#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,0) - -#define NOW qdio_get_micros() -#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW -#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time) -#define SAVE_FRONTIER(q,val) q->last_move_ftc=val -#define GET_SAVED_FRONTIER(q) (q->last_move_ftc) - -#define MY_MODULE_STRING(x) #x - -#ifdef CONFIG_ARCH_S390X -#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_ARCH_S390X */ -#define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_ARCH_S390X */ - -#ifdef CONFIG_QDIO_DEBUG -#define set_slsb(x,y) \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } \ - qdio_set_slsb(x,y); \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } -#else /* CONFIG_QDIO_DEBUG */ -#define set_slsb(x,y) qdio_set_slsb(x,y) -#endif /* CONFIG_QDIO_DEBUG */ +/* + * Note on cache alignment: grouped slsb and write mostly data at the beginning + * sbal[] is read-only and starts on a new cacheline followed by read mostly. + */ struct qdio_q { - volatile struct slsb slsb; - - char unused[QDIO_MAX_BUFFERS_PER_Q]; + struct slsb slsb; - __u32 * volatile dev_st_chg_ind; + union { + struct qdio_input_q in; + struct qdio_output_q out; + } u; - int is_input_q; - int irq; - struct ccw_device *cdev; + /* + * inbound: next buffer the program should check for + * outbound: next buffer to check if adapter processed it + */ + int first_to_check; - unsigned int is_iqdio_q; - unsigned int is_thinint_q; + /* first_to_check of the last time */ + int last_move; - /* bit 0 means queue 0, bit 1 means queue 1, ... */ - unsigned int mask; - unsigned int q_no; + /* beginning position for calling the program */ + int first_to_kick; - qdio_handler_t (*handler); + /* number of buffers in use by the adapter */ + atomic_t nr_buf_used; - /* points to the next buffer to be checked for having - * been processed by the card (outbound) - * or to the next buffer the program should check for (inbound) */ - volatile int first_to_check; - /* and the last time it was: */ - volatile int last_move_ftc; - - atomic_t number_of_buffers_used; - atomic_t polling; - - unsigned int siga_in; - unsigned int siga_out; - unsigned int siga_sync; - unsigned int siga_sync_done_on_thinints; - unsigned int siga_sync_done_on_outb_tis; - unsigned int hydra_gives_outbound_pcis; - - /* used to save beginning position when calling dd_handlers */ - int first_element_to_kick; - - atomic_t use_count; - atomic_t is_in_shutdown; + /* error condition during a data transfer */ + unsigned int qdio_error; - void *irq_ptr; + /* last scan of the queue */ + u64 timestamp; -#ifdef QDIO_USE_TIMERS_FOR_POLLING - struct timer_list timer; - atomic_t timer_already_set; - spinlock_t timer_lock; -#else /* QDIO_USE_TIMERS_FOR_POLLING */ struct tasklet_struct tasklet; -#endif /* QDIO_USE_TIMERS_FOR_POLLING */ + struct qdio_queue_perf_stat q_stats; - enum qdio_irq_states state; + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; - /* used to store the error condition during a data transfer */ - unsigned int qdio_error; - unsigned int siga_error; - unsigned int error_status_flags; + /* queue number */ + int nr; - /* list of interesting queues */ - volatile struct qdio_q *list_next; - volatile struct qdio_q *list_prev; + /* bitmask of queue number */ + int mask; - struct sl *sl; - volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; + /* input or output queue */ + int is_input_q; - struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; + /* list of thinint input queues */ + struct list_head entry; - unsigned long int_parm; + /* upper-layer program handler */ + qdio_handler_t (*handler); - /*struct { - int in_bh_check_limit; - int threshold; - } threshold_classes[QDIO_STATS_CLASSES];*/ - - struct { - /* inbound: the time to stop polling - outbound: the time to kick peer */ - int threshold; /* the real value */ - - /* outbound: last time of do_QDIO - inbound: last time of noticing incoming data */ - /*__u64 last_transfer_times[QDIO_STATS_NUMBER]; - int last_transfer_index; */ - - __u64 last_transfer_time; - __u64 busy_start; - } timing; - atomic_t busy_siga_counter; - unsigned int queue_type; - - /* leave this member at the end. won't be cleared in qdio_fill_qs */ - struct slib *slib; /* a page is allocated under this pointer, - sl points into this page, offset PAGE_SIZE/2 - (after slib) */ + struct dentry *debugfs_q; + struct qdio_irq *irq_ptr; + struct sl *sl; + /* + * A page is allocated under this pointer and used for slib and sl. + * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2. + */ + struct slib *slib; } __attribute__ ((aligned(256))); struct qdio_irq { - __u32 * volatile dev_st_chg_ind; + struct qib qib; + u32 *dsci; /* address of device state change indicator */ + struct ccw_device *cdev; + struct dentry *debugfs_dev; + struct dentry *debugfs_perf; unsigned long int_parm; - int irq; - - unsigned int is_iqdio_irq; - unsigned int is_thinint_irq; - unsigned int hydra_gives_outbound_pcis; - unsigned int sync_done_on_outb_pcis; + struct subchannel_id schid; + unsigned long sch_token; /* QEBSM facility */ enum qdio_irq_states state; - unsigned int no_input_qs; - unsigned int no_output_qs; + struct siga_flag siga_flag; /* siga sync information from qdioac */ - unsigned char qdioac; + int nr_input_qs; + int nr_output_qs; struct ccw1 ccw; - struct ciw equeue; struct ciw aqueue; - struct qib qib; - - void (*original_int_handler) (struct ccw_device *, - unsigned long, struct irb *); + struct qdio_ssqd_desc ssqd_desc; + void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + + int perf_stat_enabled; - /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ struct qdr *qdr; + unsigned long chsc_page; + struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; - struct semaphore setting_up_sema; + + debug_info_t *debug_area; + struct mutex setup_mutex; + struct qdio_dev_perf_stat perf_stat; }; -#endif + +/* helper functions */ +#define queue_type(q) q->irq_ptr->qib.qfmt +#define SCH_NO(q) (q->irq_ptr->schid.sch_no) + +#define is_thinint_irq(irq) \ + (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ + css_general_characteristics.aif_osa) + +#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) + +#define qperf_inc(__q, __attr) \ +({ \ + struct qdio_irq *qdev = (__q)->irq_ptr; \ + if (qdev->perf_stat_enabled) \ + (qdev->perf_stat.__attr)++; \ +}) + +static inline void account_sbals_error(struct qdio_q *q, int count) +{ + q->q_stats.nr_sbal_error += count; + q->q_stats.nr_sbal_total += count; +} + +/* the highest iqdio queue is used for multicast */ +static inline int multicast_outbound(struct qdio_q *q) +{ + return (q->irq_ptr->nr_output_qs > 1) && + (q->nr == q->irq_ptr->nr_output_qs - 1); +} + +#define pci_out_supported(q) \ + (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) +#define is_qebsm(q) (q->irq_ptr->sch_token != 0) + +#define need_siga_in(q) (q->irq_ptr->siga_flag.input) +#define need_siga_out(q) (q->irq_ptr->siga_flag.output) +#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync)) +#define need_siga_sync_after_ai(q) \ + (unlikely(q->irq_ptr->siga_flag.sync_after_ai)) +#define need_siga_sync_out_after_pci(q) \ + (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) + +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_input_qs && \ + ({ q = irq_ptr->input_qs[i]; 1; }); i++) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_output_qs && \ + ({ q = irq_ptr->output_qs[i]; 1; }); i++) + +#define prev_buf(bufnr) \ + ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) +#define next_buf(bufnr) \ + ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) \ + ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) +#define sub_buf(bufnr, dec) \ + ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) + +#define queue_irqs_enabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) +#define queue_irqs_disabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) + +extern u64 last_ai_time; + +/* prototypes for thin interrupt */ +void qdio_setup_thinint(struct qdio_irq *irq_ptr); +int qdio_establish_thinint(struct qdio_irq *irq_ptr); +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_inbound_processing(unsigned long q); +int tiqdio_allocate_memory(void); +void tiqdio_free_memory(void); +int tiqdio_register_thinints(void); +void tiqdio_unregister_thinints(void); +void clear_nonshared_ind(struct qdio_irq *); +int test_nonshared_ind(struct qdio_irq *); + +/* prototypes for setup */ +void qdio_inbound_processing(unsigned long data); +void qdio_outbound_processing(unsigned long data); +void qdio_outbound_timer(unsigned long data); +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb); +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, + int nr_output_qs); +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); +int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, + struct subchannel_id *schid, + struct qdio_ssqd_desc *data); +int qdio_setup_irq(struct qdio_initialize *init_data); +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_release_memory(struct qdio_irq *irq_ptr); +int qdio_setup_create_sysfs(struct ccw_device *cdev); +void qdio_setup_destroy_sysfs(struct ccw_device *cdev); +int qdio_setup_init(void); +void qdio_setup_exit(void); +int qdio_enable_async_operation(struct qdio_output_q *q); +void qdio_disable_async_operation(struct qdio_output_q *q); +struct qaob *qdio_allocate_aob(void); + +int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state); +#endif /* _CIO_QDIO_H */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c new file mode 100644 index 00000000000..f1f3baa8e6e --- /dev/null +++ b/drivers/s390/cio/qdio_debug.c @@ -0,0 +1,373 @@ +/* + * Copyright IBM Corp. 2008, 2009 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <asm/debug.h> +#include "qdio_debug.h" +#include "qdio.h" + +debug_info_t *qdio_dbf_setup; +debug_info_t *qdio_dbf_error; + +static struct dentry *debugfs_root; +#define QDIO_DEBUGFS_NAME_LEN 10 +#define QDIO_DBF_NAME_LEN 20 + +struct qdio_dbf_entry { + char dbf_name[QDIO_DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; + +static LIST_HEAD(qdio_dbf_list); +static DEFINE_MUTEX(qdio_dbf_list_mutex); + +static debug_info_t *qdio_get_dbf_entry(char *name) +{ + struct qdio_dbf_entry *entry; + debug_info_t *rc = NULL; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry(entry, &qdio_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } + } + mutex_unlock(&qdio_dbf_list_mutex); + return rc; +} + +static void qdio_clear_dbf_list(void) +{ + struct qdio_dbf_entry *entry, *tmp; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qdio_dbf_list_mutex); +} + +int qdio_allocate_dbf(struct qdio_initialize *init_data, + struct qdio_irq *irq_ptr) +{ + char text[QDIO_DBF_NAME_LEN]; + struct qdio_dbf_entry *new_entry; + + DBF_EVENT("qfmt:%1d", init_data->q_format); + DBF_HEX(init_data->adapter_name, 8); + DBF_EVENT("qpff%4x", init_data->qib_param_field_format); + DBF_HEX(&init_data->qib_param_field, sizeof(void *)); + DBF_HEX(&init_data->input_slib_elements, sizeof(void *)); + DBF_HEX(&init_data->output_slib_elements, sizeof(void *)); + DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs, + init_data->no_output_qs); + DBF_HEX(&init_data->input_handler, sizeof(void *)); + DBF_HEX(&init_data->output_handler, sizeof(void *)); + DBF_HEX(&init_data->int_parm, sizeof(long)); + DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); + DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); + DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); + + /* allocate trace view for the interface */ + snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s", + dev_name(&init_data->cdev->dev)); + irq_ptr->debug_area = qdio_get_dbf_entry(text); + if (irq_ptr->debug_area) + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused"); + else { + irq_ptr->debug_area = debug_register(text, 2, 1, 16); + if (!irq_ptr->debug_area) + return -ENOMEM; + if (debug_register_view(irq_ptr->debug_area, + &debug_hex_ascii_view)) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + debug_set_level(irq_ptr->debug_area, DBF_WARN); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); + new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL); + if (!new_entry) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN); + new_entry->dbf_info = irq_ptr->debug_area; + mutex_lock(&qdio_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qdio_dbf_list); + mutex_unlock(&qdio_dbf_list_mutex); + } + return 0; +} + +static int qstat_show(struct seq_file *m, void *v) +{ + unsigned char state; + struct qdio_q *q = m->private; + int i; + + if (!q) + return 0; + + seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", + q->timestamp, last_ai_time); + seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n", + atomic_read(&q->nr_buf_used), + q->first_to_check, q->last_move); + if (q->is_input_q) { + seq_printf(m, "polling: %d ack start: %d ack count: %d\n", + q->u.in.polling, q->u.in.ack_start, + q->u.in.ack_count); + seq_printf(m, "DSCI: %d IRQs disabled: %u\n", + *(u32 *)q->irq_ptr->dsci, + test_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)); + } + seq_printf(m, "SBAL states:\n"); + seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + debug_get_buf_state(q, i, &state); + switch (state) { + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_OUTPUT_NOT_INIT: + seq_printf(m, "N"); + break; + case SLSB_P_OUTPUT_PENDING: + seq_printf(m, "P"); + break; + case SLSB_P_INPUT_PRIMED: + case SLSB_CU_OUTPUT_PRIMED: + seq_printf(m, "+"); + break; + case SLSB_P_INPUT_ACK: + seq_printf(m, "A"); + break; + case SLSB_P_INPUT_ERROR: + case SLSB_P_OUTPUT_ERROR: + seq_printf(m, "x"); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_OUTPUT_EMPTY: + seq_printf(m, "-"); + break; + case SLSB_P_INPUT_HALTED: + case SLSB_P_OUTPUT_HALTED: + seq_printf(m, "."); + break; + default: + seq_printf(m, "?"); + } + if (i == 63) + seq_printf(m, "\n"); + } + seq_printf(m, "\n"); + seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); + + seq_printf(m, "\nSBAL statistics:"); + if (!q->irq_ptr->perf_stat_enabled) { + seq_printf(m, " disabled\n"); + return 0; + } + + seq_printf(m, "\n1 2.. 4.. 8.. " + "16.. 32.. 64.. 127\n"); + for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++) + seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]); + seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n", + q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop, + q->q_stats.nr_sbal_total); + return 0; +} + +static int qstat_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qstat_show, + file_inode(filp)->i_private); +} + +static const struct file_operations debugfs_fops = { + .owner = THIS_MODULE, + .open = qstat_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static char *qperf_names[] = { + "Assumed adapter interrupts", + "QDIO interrupts", + "Requested PCIs", + "Inbound tasklet runs", + "Inbound tasklet resched", + "Inbound tasklet resched2", + "Outbound tasklet runs", + "SIGA read", + "SIGA write", + "SIGA sync", + "Inbound calls", + "Inbound handler", + "Inbound stop_polling", + "Inbound queue full", + "Outbound calls", + "Outbound handler", + "Outbound queue full", + "Outbound fast_requeue", + "Outbound target_full", + "QEBSM eqbs", + "QEBSM eqbs partial", + "QEBSM sqbs", + "QEBSM sqbs partial", + "Discarded interrupts" +}; + +static int qperf_show(struct seq_file *m, void *v) +{ + struct qdio_irq *irq_ptr = m->private; + unsigned int *stat; + int i; + + if (!irq_ptr) + return 0; + if (!irq_ptr->perf_stat_enabled) { + seq_printf(m, "disabled\n"); + return 0; + } + stat = (unsigned int *)&irq_ptr->perf_stat; + + for (i = 0; i < ARRAY_SIZE(qperf_names); i++) + seq_printf(m, "%26s:\t%u\n", + qperf_names[i], *(stat + i)); + return 0; +} + +static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *off) +{ + struct seq_file *seq = file->private_data; + struct qdio_irq *irq_ptr = seq->private; + struct qdio_q *q; + unsigned long val; + int ret, i; + + if (!irq_ptr) + return 0; + + ret = kstrtoul_from_user(ubuf, count, 10, &val); + if (ret) + return ret; + + switch (val) { + case 0: + irq_ptr->perf_stat_enabled = 0; + memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + for_each_input_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); + for_each_output_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); + break; + case 1: + irq_ptr->perf_stat_enabled = 1; + break; + } + return count; +} + +static int qperf_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qperf_show, + file_inode(filp)->i_private); +} + +static const struct file_operations debugfs_perf_fops = { + .owner = THIS_MODULE, + .open = qperf_seq_open, + .read = seq_read, + .write = qperf_seq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void setup_debugfs_entry(struct qdio_q *q) +{ + char name[QDIO_DEBUGFS_NAME_LEN]; + + snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d", + q->is_input_q ? "input" : "output", + q->nr); + q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + q->irq_ptr->debugfs_dev, q, &debugfs_fops); + if (IS_ERR(q->debugfs_q)) + q->debugfs_q = NULL; +} + +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev), + debugfs_root); + if (IS_ERR(irq_ptr->debugfs_dev)) + irq_ptr->debugfs_dev = NULL; + + irq_ptr->debugfs_perf = debugfs_create_file("statistics", + S_IFREG | S_IRUGO | S_IWUSR, + irq_ptr->debugfs_dev, irq_ptr, + &debugfs_perf_fops); + if (IS_ERR(irq_ptr->debugfs_perf)) + irq_ptr->debugfs_perf = NULL; + + for_each_input_queue(irq_ptr, q, i) + setup_debugfs_entry(q); + for_each_output_queue(irq_ptr, q, i) + setup_debugfs_entry(q); +} + +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + debugfs_remove(q->debugfs_q); + for_each_output_queue(irq_ptr, q, i) + debugfs_remove(q->debugfs_q); + debugfs_remove(irq_ptr->debugfs_perf); + debugfs_remove(irq_ptr->debugfs_dev); +} + +int __init qdio_debug_init(void) +{ + debugfs_root = debugfs_create_dir("qdio", NULL); + + qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); + debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_setup, DBF_INFO); + DBF_EVENT("dbf created\n"); + + qdio_dbf_error = debug_register("qdio_error", 4, 1, 16); + debug_register_view(qdio_dbf_error, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_error, DBF_INFO); + DBF_ERROR("dbf created\n"); + return 0; +} + +void qdio_debug_exit(void) +{ + qdio_clear_dbf_list(); + debugfs_remove(debugfs_root); + if (qdio_dbf_setup) + debug_unregister(qdio_dbf_setup); + if (qdio_dbf_error) + debug_unregister(qdio_dbf_error); +} diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h new file mode 100644 index 00000000000..f33ce857761 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.h @@ -0,0 +1,86 @@ +/* + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_DEBUG_H +#define QDIO_DEBUG_H + +#include <asm/debug.h> +#include <asm/qdio.h> +#include "qdio.h" + +/* that gives us 15 characters in the text event views */ +#define QDIO_DBF_LEN 16 + +extern debug_info_t *qdio_dbf_setup; +extern debug_info_t *qdio_dbf_error; + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 6 /* informational */ + +#undef DBF_EVENT +#undef DBF_ERROR +#undef DBF_DEV_EVENT + +#define DBF_EVENT(text...) \ + do { \ + char debug_buffer[QDIO_DBF_LEN]; \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ + } while (0) + +static inline void DBF_HEX(void *addr, int len) +{ + while (len > 0) { + debug_event(qdio_dbf_setup, DBF_ERR, addr, len); + len -= qdio_dbf_setup->buf_size; + addr += qdio_dbf_setup->buf_size; + } +} + +#define DBF_ERROR(text...) \ + do { \ + char debug_buffer[QDIO_DBF_LEN]; \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ + } while (0) + +static inline void DBF_ERROR_HEX(void *addr, int len) +{ + while (len > 0) { + debug_event(qdio_dbf_error, DBF_ERR, addr, len); + len -= qdio_dbf_error->buf_size; + addr += qdio_dbf_error->buf_size; + } +} + +#define DBF_DEV_EVENT(level, device, text...) \ + do { \ + char debug_buffer[QDIO_DBF_LEN]; \ + if (debug_level_enabled(device->debug_area, level)) { \ + snprintf(debug_buffer, QDIO_DBF_LEN, text); \ + debug_text_event(device->debug_area, level, debug_buffer); \ + } \ + } while (0) + +static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr, + int len, int level) +{ + while (len > 0) { + debug_event(dev->debug_area, level, addr, len); + len -= dev->debug_area->buf_size; + addr += dev->debug_area->buf_size; + } +} + +int qdio_allocate_dbf(struct qdio_initialize *init_data, + struct qdio_irq *irq_ptr); +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr); +int qdio_debug_init(void); +void qdio_debug_exit(void); + +#endif diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c new file mode 100644 index 00000000000..848e3b64ea6 --- /dev/null +++ b/drivers/s390/cio/qdio_main.c @@ -0,0 +1,1880 @@ +/* + * Linux for s390 qdio support, buffer handling, qdio API and module support. + * + * Copyright IBM Corp. 2000, 2008 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/ipl.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "qdio.h" +#include "qdio_debug.h" + +MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ + "Jan Glauber <jang@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("QDIO base support"); +MODULE_LICENSE("GPL"); + +static inline int do_siga_sync(unsigned long schid, + unsigned int out_mask, unsigned int in_mask, + unsigned int fc) +{ + register unsigned long __fc asm ("0") = fc; + register unsigned long __schid asm ("1") = schid; + register unsigned long out asm ("2") = out_mask; + register unsigned long in asm ("3") = in_mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); + return cc; +} + +static inline int do_siga_input(unsigned long schid, unsigned int mask, + unsigned int fc) +{ + register unsigned long __fc asm ("0") = fc; + register unsigned long __schid asm ("1") = schid; + register unsigned long __mask asm ("2") = mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); + return cc; +} + +/** + * do_siga_output - perform SIGA-w/wt function + * @schid: subchannel id or in case of QEBSM the subchannel token + * @mask: which output queues to process + * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer + * @fc: function code to perform + * + * Returns condition code. + * Note: For IQDC unicast queues only the highest priority queue is processed. + */ +static inline int do_siga_output(unsigned long schid, unsigned long mask, + unsigned int *bb, unsigned int fc, + unsigned long aob) +{ + register unsigned long __fc asm("0") = fc; + register unsigned long __schid asm("1") = schid; + register unsigned long __mask asm("2") = mask; + register unsigned long __aob asm("3") = aob; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc), "+d" (__fc), "+d" (__aob) + : "d" (__schid), "d" (__mask) + : "cc"); + *bb = __fc >> 31; + return cc; +} + +static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) +{ + /* all done or next buffer state different */ + if (ccq == 0 || ccq == 32) + return 0; + /* no buffer processed */ + if (ccq == 97) + return 1; + /* not all buffers processed */ + if (ccq == 96) + return 2; + /* notify devices immediately */ + DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); + return -EIO; +} + +/** + * qdio_do_eqbs - extract buffer states for QEBSM + * @q: queue to manipulate + * @state: state of the extracted buffers + * @start: buffer number to start at + * @count: count of buffers to examine + * @auto_ack: automatically acknowledge buffers + * + * Returns the number of successfully extracted equal buffer states. + * Stops processing if a state is different from the last buffers state. + */ +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + int start, int count, int auto_ack) +{ + int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; + unsigned int ccq = 0; + + qperf_inc(q, eqbs); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, + auto_ack); + rc = qdio_check_ccq(q, ccq); + if (!rc) + return count - tmp_count; + + if (rc == 1) { + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); + goto again; + } + + if (rc == 2) { + qperf_inc(q, eqbs_partial); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + tmp_count); + /* + * Retry once, if that fails bail out and process the + * extracted buffers before trying again. + */ + if (!retried++) + goto again; + else + return count - tmp_count; + } + + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, + q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; +} + +/** + * qdio_do_sqbs - set buffer states for QEBSM + * @q: queue to manipulate + * @state: new state of the buffers + * @start: first buffer number to change + * @count: how many buffers to change + * + * Returns the number of successfully changed buffers. + * Does retrying until the specified count of buffer states is set or an + * error occurs. + */ +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, + int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + + if (!count) + return 0; + qperf_inc(q, sqbs); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + if (!rc) { + WARN_ON_ONCE(tmp_count); + return count - tmp_count; + } + + if (rc == 1 || rc == 2) { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); + qperf_inc(q, sqbs_partial); + goto again; + } + + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, + q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; +} + +/* returns number of examined buffers and their common state in *state */ +static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, unsigned int count, + int auto_ack, int merge_pending) +{ + unsigned char __state = 0; + int i; + + if (is_qebsm(q)) + return qdio_do_eqbs(q, state, bufnr, count, auto_ack); + + for (i = 0; i < count; i++) { + if (!__state) { + __state = q->slsb.val[bufnr]; + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + } else if (merge_pending) { + if ((q->slsb.val[bufnr] & __state) != __state) + break; + } else if (q->slsb.val[bufnr] != __state) + break; + bufnr = next_buf(bufnr); + } + *state = __state; + return i; +} + +static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, int auto_ack) +{ + return get_buf_states(q, bufnr, state, 1, auto_ack, 0); +} + +/* wrap-around safe setting of slsb states, returns number of changed buffers */ +static inline int set_buf_states(struct qdio_q *q, int bufnr, + unsigned char state, int count) +{ + int i; + + if (is_qebsm(q)) + return qdio_do_sqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + xchg(&q->slsb.val[bufnr], state); + bufnr = next_buf(bufnr); + } + return count; +} + +static inline int set_buf_state(struct qdio_q *q, int bufnr, + unsigned char state) +{ + return set_buf_states(q, bufnr, state, 1); +} + +/* set slsb states to initial state */ +static void qdio_init_buf_states(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); + for_each_output_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); +} + +static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, + unsigned int input) +{ + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_SYNC; + int cc; + + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); + qperf_inc(q, siga_sync); + + if (is_qebsm(q)) { + schid = q->irq_ptr->sch_token; + fc |= QDIO_SIGA_QEBSM_FLAG; + } + + cc = do_siga_sync(schid, output, input, fc); + if (unlikely(cc)) + DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); + return (cc) ? -EIO : 0; +} + +static inline int qdio_siga_sync_q(struct qdio_q *q) +{ + if (q->is_input_q) + return qdio_siga_sync(q, 0, q->mask); + else + return qdio_siga_sync(q, q->mask, 0); +} + +static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, + unsigned long aob) +{ + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_WRITE; + u64 start_time = 0; + int retries = 0, cc; + unsigned long laob = 0; + + if (q->u.out.use_cq && aob != 0) { + fc = QDIO_SIGA_WRITEQ; + laob = aob; + } + + if (is_qebsm(q)) { + schid = q->irq_ptr->sch_token; + fc |= QDIO_SIGA_QEBSM_FLAG; + } +again: + WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || + (aob && fc != QDIO_SIGA_WRITEQ)); + cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); + + /* hipersocket busy condition */ + if (unlikely(*busy_bit)) { + retries++; + + if (!start_time) { + start_time = get_tod_clock_fast(); + goto again; + } + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) + goto again; + } + if (retries) { + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, + "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); + } + return cc; +} + +static inline int qdio_siga_input(struct qdio_q *q) +{ + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_READ; + int cc; + + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); + qperf_inc(q, siga_read); + + if (is_qebsm(q)) { + schid = q->irq_ptr->sch_token; + fc |= QDIO_SIGA_QEBSM_FLAG; + } + + cc = do_siga_input(schid, q->mask, fc); + if (unlikely(cc)) + DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); + return (cc) ? -EIO : 0; +} + +#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) +#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) + +static inline void qdio_sync_queues(struct qdio_q *q) +{ + /* PCI capable outbound queues will also be scanned so sync them too */ + if (pci_out_supported(q)) + qdio_siga_sync_all(q); + else + qdio_siga_sync_q(q); +} + +int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state) +{ + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + return get_buf_states(q, bufnr, state, 1, 0, 0); +} + +static inline void qdio_stop_polling(struct qdio_q *q) +{ + if (!q->u.in.polling) + return; + + q->u.in.polling = 0; + qperf_inc(q, stop_polling); + + /* show the card that we are not polling anymore */ + if (is_qebsm(q)) { + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 0; + } else + set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); +} + +static inline void account_sbals(struct qdio_q *q, unsigned int count) +{ + int pos; + + q->q_stats.nr_sbal_total += count; + if (count == QDIO_MAX_BUFFERS_MASK) { + q->q_stats.nr_sbals[7]++; + return; + } + pos = ilog2(count); + q->q_stats.nr_sbals[pos]++; +} + +static void process_buffer_error(struct qdio_q *q, int count) +{ + unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : + SLSB_P_OUTPUT_NOT_INIT; + + q->qdio_error = QDIO_ERROR_SLSB_STATE; + + /* special handling for no target buffer empty */ + if ((!q->is_input_q && + (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { + qperf_inc(q, target_full); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", + q->first_to_check); + goto set; + } + + DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); + DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); + DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); + DBF_ERROR("F14:%2x F15:%2x", + q->sbal[q->first_to_check]->element[14].sflags, + q->sbal[q->first_to_check]->element[15].sflags); + +set: + /* + * Interrupts may be avoided as long as the error is present + * so change the buffer state immediately to avoid starvation. + */ + set_buf_states(q, q->first_to_check, state, count); +} + +static inline void inbound_primed(struct qdio_q *q, int count) +{ + int new; + + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); + + /* for QEBSM the ACK was already set by EQBS */ + if (is_qebsm(q)) { + if (!q->u.in.polling) { + q->u.in.polling = 1; + q->u.in.ack_count = count; + q->u.in.ack_start = q->first_to_check; + return; + } + + /* delete the previous ACK's */ + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = count; + q->u.in.ack_start = q->first_to_check; + return; + } + + /* + * ACK the newest buffer. The ACK will be removed in qdio_stop_polling + * or by the next inbound run. + */ + new = add_buf(q->first_to_check, count - 1); + if (q->u.in.polling) { + /* reset the previous ACK but first set the new one */ + set_buf_state(q, new, SLSB_P_INPUT_ACK); + set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + } else { + q->u.in.polling = 1; + set_buf_state(q, new, SLSB_P_INPUT_ACK); + } + + q->u.in.ack_start = new; + count--; + if (!count) + return; + /* need to change ALL buffers to get more interrupts */ + set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); +} + +static int get_inbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state = 0; + + q->timestamp = get_tod_clock_fast(); + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + if (q->first_to_check == stop) + goto out; + + /* + * No siga sync here, as a PCI or we after a thin interrupt + * already sync'ed the queues. + */ + count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); + if (!count) + goto out; + + switch (state) { + case SLSB_P_INPUT_PRIMED: + inbound_primed(q, count); + q->first_to_check = add_buf(q->first_to_check, count); + if (atomic_sub_return(count, &q->nr_buf_used) == 0) + qperf_inc(q, inbound_queue_full); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); + break; + case SLSB_P_INPUT_ERROR: + process_buffer_error(q, count); + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); + break; + default: + WARN_ON_ONCE(1); + } +out: + return q->first_to_check; +} + +static int qdio_inbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_inbound_buffer_frontier(q); + + if (bufnr != q->last_move) { + q->last_move = bufnr; + if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) + q->u.in.timestamp = get_tod_clock(); + return 1; + } else + return 0; +} + +static inline int qdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state = 0; + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + get_buf_state(q, q->first_to_check, &state, 0); + + if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) + /* more work coming */ + return 0; + + if (is_thinint_irq(q->irq_ptr)) + return 1; + + /* don't poll under z/VM */ + if (MACHINE_IS_VM) + return 1; + + /* + * At this point we know, that inbound first_to_check + * has (probably) not moved (see qdio_inbound_processing). + */ + if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", + q->first_to_check); + return 1; + } else + return 0; +} + +static inline int contains_aobs(struct qdio_q *q) +{ + return !q->is_input_q && q->u.out.use_cq; +} + +static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) +{ + unsigned char state = 0; + int j, b = start; + + if (!contains_aobs(q)) + return; + + for (j = 0; j < count; ++j) { + get_buf_state(q, b, &state, 0); + if (state == SLSB_P_OUTPUT_PENDING) { + struct qaob *aob = q->u.out.aobs[b]; + if (aob == NULL) + continue; + + q->u.out.sbal_state[b].flags |= + QDIO_OUTBUF_STATE_FLAG_PENDING; + q->u.out.aobs[b] = NULL; + } else if (state == SLSB_P_OUTPUT_EMPTY) { + q->u.out.sbal_state[b].aob = NULL; + } + b = next_buf(b); + } +} + +static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + int bufnr) +{ + unsigned long phys_aob = 0; + + if (!q->use_cq) + goto out; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { + q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + WARN_ON_ONCE(phys_aob & 0xFF); + } + +out: + return phys_aob; +} + +static void qdio_kick_handler(struct qdio_q *q) +{ + int start = q->first_to_kick; + int end = q->first_to_check; + int count; + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + count = sub_buf(end, start); + + if (q->is_input_q) { + qperf_inc(q, inbound_handler); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); + } else { + qperf_inc(q, outbound_handler); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", + start, count); + } + + qdio_handle_aobs(q, start, count); + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, + q->irq_ptr->int_parm); + + /* for the next time */ + q->first_to_kick = end; + q->qdio_error = 0; +} + +static void __qdio_inbound_processing(struct qdio_q *q) +{ + qperf_inc(q, tasklet_inbound); + + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_handler(q); + + if (!qdio_inbound_q_done(q)) { + /* means poll time is not yet over */ + qperf_inc(q, tasklet_inbound_resched); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { + tasklet_schedule(&q->tasklet); + return; + } + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) { + qperf_inc(q, tasklet_inbound_resched2); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); + } +} + +void qdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_inbound_processing(q); +} + +static int get_outbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state = 0; + + q->timestamp = get_tod_clock_fast(); + + if (need_siga_sync(q)) + if (((queue_type(q) != QDIO_IQDIO_QFMT) && + !pci_out_supported(q)) || + (queue_type(q) == QDIO_IQDIO_QFMT && + multicast_outbound(q))) + qdio_siga_sync_q(q); + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + if (q->first_to_check == stop) + goto out; + + count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); + if (!count) + goto out; + + switch (state) { + case SLSB_P_OUTPUT_EMPTY: + /* the adapter got it */ + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, + "out empty:%1d %02x", q->nr, count); + + atomic_sub(count, &q->nr_buf_used); + q->first_to_check = add_buf(q->first_to_check, count); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); + + break; + case SLSB_P_OUTPUT_ERROR: + process_buffer_error(q, count); + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); + break; + case SLSB_CU_OUTPUT_PRIMED: + /* the adapter has not fetched the output yet */ + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", + q->nr); + break; + case SLSB_P_OUTPUT_NOT_INIT: + case SLSB_P_OUTPUT_HALTED: + break; + default: + WARN_ON_ONCE(1); + } + +out: + return q->first_to_check; +} + +/* all buffers processed? */ +static inline int qdio_outbound_q_done(struct qdio_q *q) +{ + return atomic_read(&q->nr_buf_used) == 0; +} + +static inline int qdio_outbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_outbound_buffer_frontier(q); + + if (bufnr != q->last_move) { + q->last_move = bufnr; + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); + return 1; + } else + return 0; +} + +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) +{ + int retries = 0, cc; + unsigned int busy_bit; + + if (!need_siga_out(q)) + return 0; + + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); +retry: + qperf_inc(q, siga_write); + + cc = qdio_siga_output(q, &busy_bit, aob); + switch (cc) { + case 0: + break; + case 2: + if (busy_bit) { + while (++retries < QDIO_BUSY_BIT_RETRIES) { + mdelay(QDIO_BUSY_BIT_RETRY_DELAY); + goto retry; + } + DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); + cc = -EBUSY; + } else { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); + cc = -ENOBUFS; + } + break; + case 1: + case 3: + DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); + cc = -EIO; + break; + } + if (retries) { + DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); + DBF_ERROR("count:%u", retries); + } + return cc; +} + +static void __qdio_outbound_processing(struct qdio_q *q) +{ + qperf_inc(q, tasklet_outbound); + WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); + + if (qdio_outbound_q_moved(q)) + qdio_kick_handler(q); + + if (queue_type(q) == QDIO_ZFCP_QFMT) + if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) + goto sched; + + if (q->u.out.pci_out_enabled) + return; + + /* + * Now we know that queue type is either qeth without pci enabled + * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY + * is noticed and outbound_handler is called after some time. + */ + if (qdio_outbound_q_done(q)) + del_timer(&q->u.out.timer); + else + if (!timer_pending(&q->u.out.timer)) + mod_timer(&q->u.out.timer, jiffies + 10 * HZ); + return; + +sched: + if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; + tasklet_schedule(&q->tasklet); +} + +/* outbound tasklet */ +void qdio_outbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_outbound_processing(q); +} + +void qdio_outbound_timer(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + + if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; + tasklet_schedule(&q->tasklet); +} + +static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) +{ + struct qdio_q *out; + int i; + + if (!pci_out_supported(q)) + return; + + for_each_output_queue(q->irq_ptr, out, i) + if (!qdio_outbound_q_done(out)) + tasklet_schedule(&out->tasklet); +} + +static void __tiqdio_inbound_processing(struct qdio_q *q) +{ + qperf_inc(q, tasklet_inbound); + if (need_siga_sync(q) && need_siga_sync_after_ai(q)) + qdio_sync_queues(q); + + /* + * The interrupt could be caused by a PCI request. Check the + * PCI capable outbound queues. + */ + qdio_check_outbound_after_thinint(q); + + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_handler(q); + + if (!qdio_inbound_q_done(q)) { + qperf_inc(q, tasklet_inbound_resched); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { + tasklet_schedule(&q->tasklet); + return; + } + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) { + qperf_inc(q, tasklet_inbound_resched2); + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); + } +} + +void tiqdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __tiqdio_inbound_processing(q); +} + +static inline void qdio_set_state(struct qdio_irq *irq_ptr, + enum qdio_irq_states state) +{ + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); + + irq_ptr->state = state; + mb(); +} + +static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) +{ + if (irb->esw.esw0.erw.cons) { + DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); + DBF_ERROR_HEX(irb, 64); + DBF_ERROR_HEX(irb->ecw, 64); + } +} + +/* PCI interrupt handler */ +static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) +{ + int i; + struct qdio_q *q; + + if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; + + for_each_input_queue(irq_ptr, q, i) { + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { + tasklet_schedule(&q->tasklet); + } + } + + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + return; + + for_each_output_queue(irq_ptr, q, i) { + if (qdio_outbound_q_done(q)) + continue; + if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) + qdio_siga_sync_q(q); + tasklet_schedule(&q->tasklet); + } +} + +static void qdio_handle_activate_check(struct ccw_device *cdev, + unsigned long intparm, int cstat, int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + int count; + + DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); + DBF_ERROR("intp :%lx", intparm); + DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); + + if (irq_ptr->nr_input_qs) { + q = irq_ptr->input_qs[0]; + } else if (irq_ptr->nr_output_qs) { + q = irq_ptr->output_qs[0]; + } else { + dump_stack(); + goto no_handler; + } + + count = sub_buf(q->first_to_check, q->first_to_kick); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, + q->nr, q->first_to_kick, count, irq_ptr->int_parm); +no_handler: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + /* + * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. + * Therefore we call the LGR detection function here. + */ + lgr_info_log(); +} + +static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); + + if (cstat) + goto error; + if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) + goto error; + if (!(dstat & DEV_STAT_DEV_END)) + goto error; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); + return; + +error: + DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); + DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); +} + +/* qdio interrupt handler */ +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + int cstat, dstat; + + if (!intparm || !irq_ptr) { + DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); + return; + } + + if (irq_ptr->perf_stat_enabled) + irq_ptr->perf_stat.qdio_int++; + + if (IS_ERR(irb)) { + DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + wake_up(&cdev->private->wait_q); + return; + } + qdio_irq_check_sense(irq_ptr, irb); + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + qdio_establish_handle_irq(cdev, cstat, dstat); + break; + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + break; + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + if (cstat & SCHN_STAT_PCI) { + qdio_int_handler_pci(irq_ptr); + return; + } + if (cstat || dstat) + qdio_handle_activate_check(cdev, intparm, cstat, + dstat); + break; + case QDIO_IRQ_STATE_STOPPED: + break; + default: + WARN_ON_ONCE(1); + } + wake_up(&cdev->private->wait_q); +} + +/** + * qdio_get_ssqd_desc - get qdio subchannel description + * @cdev: ccw device to get description for + * @data: where to store the ssqd + * + * Returns 0 or an error code. The results of the chsc are stored in the + * specified structure. + */ +int qdio_get_ssqd_desc(struct ccw_device *cdev, + struct qdio_ssqd_desc *data) +{ + + if (!cdev || !cdev->private) + return -EINVAL; + + DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); + return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); +} +EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); + +static void qdio_shutdown_queues(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + tasklet_kill(&q->tasklet); + + for_each_output_queue(irq_ptr, q, i) { + del_timer(&q->u.out.timer); + tasklet_kill(&q->tasklet); + } +} + +/** + * qdio_shutdown - shut down a qdio subchannel + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + */ +int qdio_shutdown(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + int rc; + unsigned long flags; + + if (!irq_ptr) + return -ENODEV; + + WARN_ON_ONCE(irqs_disabled()); + DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + + mutex_lock(&irq_ptr->setup_mutex); + /* + * Subchannel was already shot down. We cannot prevent being called + * twice since cio may trigger a shutdown asynchronously. + */ + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + mutex_unlock(&irq_ptr->setup_mutex); + return 0; + } + + /* + * Indicate that the device is going down. Scheduling the queue + * tasklets is forbidden from here on. + */ + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + + tiqdio_remove_input_queues(irq_ptr); + qdio_shutdown_queues(cdev); + qdio_shutdown_debug_entries(irq_ptr); + + /* cleanup subchannel */ + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + + if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) + rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); + else + /* default behaviour is halt */ + rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + if (rc) { + DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4d", rc); + goto no_cleanup; + } + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || + irq_ptr->state == QDIO_IRQ_STATE_ERR, + 10 * HZ); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + +no_cleanup: + qdio_shutdown_thinint(irq_ptr); + + /* restore interrupt handler */ + if ((void *)cdev->handler == (void *)qdio_int_handler) + cdev->handler = irq_ptr->orig_handler; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + mutex_unlock(&irq_ptr->setup_mutex); + if (rc) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(qdio_shutdown); + +/** + * qdio_free - free data structures for a qdio subchannel + * @cdev: associated ccw device + */ +int qdio_free(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + + DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); + mutex_lock(&irq_ptr->setup_mutex); + + irq_ptr->debug_area = NULL; + cdev->private->qdio_data = NULL; + mutex_unlock(&irq_ptr->setup_mutex); + + qdio_release_memory(irq_ptr); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_free); + +/** + * qdio_allocate - allocate qdio queues and associated data + * @init_data: initialization data + */ +int qdio_allocate(struct qdio_initialize *init_data) +{ + struct qdio_irq *irq_ptr; + + DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); + + if ((init_data->no_input_qs && !init_data->input_handler) || + (init_data->no_output_qs && !init_data->output_handler)) + return -EINVAL; + + if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || + (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if ((!init_data->input_sbal_addr_array) || + (!init_data->output_sbal_addr_array)) + return -EINVAL; + + /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!irq_ptr) + goto out_err; + + mutex_init(&irq_ptr->setup_mutex); + if (qdio_allocate_dbf(init_data, irq_ptr)) + goto out_rel; + + /* + * Allocate a page for the chsc calls in qdio_establish. + * Must be pre-allocated since a zfcp recovery will call + * qdio_establish. In case of low memory and swap on a zfcp disk + * we may not be able to allocate memory otherwise. + */ + irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); + if (!irq_ptr->chsc_page) + goto out_rel; + + /* qdr is used in ccw1.cda which is u32 */ + irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!irq_ptr->qdr) + goto out_rel; + + if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, + init_data->no_output_qs)) + goto out_rel; + + init_data->cdev->private->qdio_data = irq_ptr; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + return 0; +out_rel: + qdio_release_memory(irq_ptr); +out_err: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(qdio_allocate); + +static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q = irq_ptr->input_qs[0]; + int i, use_cq = 0; + + if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) + use_cq = 1; + + for_each_output_queue(irq_ptr, q, i) { + if (use_cq) { + if (qdio_enable_async_operation(&q->u.out) < 0) { + use_cq = 0; + continue; + } + } else + qdio_disable_async_operation(&q->u.out); + } + DBF_EVENT("use_cq:%d", use_cq); +} + +/** + * qdio_establish - establish queues on a qdio subchannel + * @init_data: initialization data + */ +int qdio_establish(struct qdio_initialize *init_data) +{ + struct qdio_irq *irq_ptr; + struct ccw_device *cdev = init_data->cdev; + unsigned long saveflags; + int rc; + + DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + mutex_lock(&irq_ptr->setup_mutex); + qdio_setup_irq(init_data); + + rc = qdio_establish_thinint(irq_ptr); + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + /* establish q */ + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->equeue.count; + irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options_mask(cdev, 0); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + if (rc) { + DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || + irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); + + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + } + + qdio_setup_ssqd_info(irq_ptr); + + qdio_detect_hsicq(irq_ptr); + + /* qebsm is now setup if available, initialize buffer states */ + qdio_init_buf_states(irq_ptr); + + mutex_unlock(&irq_ptr->setup_mutex); + qdio_print_subchannel_info(irq_ptr, cdev); + qdio_setup_debug_entries(irq_ptr, cdev); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_establish); + +/** + * qdio_activate - activate queues on a qdio subchannel + * @cdev: associated cdev + */ +int qdio_activate(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long saveflags; + + DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + mutex_lock(&irq_ptr->setup_mutex); + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + rc = -EBUSY; + goto out; + } + + irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->aqueue.count; + irq_ptr->ccw.cda = 0; + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + 0, DOIO_DENY_PREFETCH); + if (rc) { + DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%4x", rc); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) + goto out; + + if (is_thinint_irq(irq_ptr)) + tiqdio_add_input_queues(irq_ptr); + + /* wait for subchannel to become active */ + msleep(5); + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_STOPPED: + case QDIO_IRQ_STATE_ERR: + rc = -EIO; + break; + default: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); + rc = 0; + } +out: + mutex_unlock(&irq_ptr->setup_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_activate); + +static inline int buf_in_between(int bufnr, int start, int count) +{ + int end = add_buf(start, count); + + if (end > start) { + if (bufnr >= start && bufnr < end) + return 1; + else + return 0; + } + + /* wrap-around case */ + if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || + (bufnr < end)) + return 1; + else + return 0; +} + +/** + * handle_inbound - reset processed input buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are emptied + */ +static int handle_inbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + int diff; + + qperf_inc(q, inbound_call); + + if (!q->u.in.polling) + goto set; + + /* protect against stop polling setting an ACK for an emptied slsb */ + if (count == QDIO_MAX_BUFFERS_PER_Q) { + /* overwriting everything, just delete polling status */ + q->u.in.polling = 0; + q->u.in.ack_count = 0; + goto set; + } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { + if (is_qebsm(q)) { + /* partial overwrite, just update ack_start */ + diff = add_buf(bufnr, count); + diff = sub_buf(diff, q->u.in.ack_start); + q->u.in.ack_count -= diff; + if (q->u.in.ack_count <= 0) { + q->u.in.polling = 0; + q->u.in.ack_count = 0; + goto set; + } + q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); + } + else + /* the only ACK will be deleted, so stop polling */ + q->u.in.polling = 0; + } + +set: + count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); + atomic_add(count, &q->nr_buf_used); + + if (need_siga_in(q)) + return qdio_siga_input(q); + + return 0; +} + +/** + * handle_outbound - process filled outbound buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are filled + */ +static int handle_outbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned char state = 0; + int used, rc = 0; + + qperf_inc(q, outbound_call); + + count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); + used = atomic_add_return(count, &q->nr_buf_used); + + if (used == QDIO_MAX_BUFFERS_PER_Q) + qperf_inc(q, outbound_queue_full); + + if (callflags & QDIO_FLAG_PCI_OUT) { + q->u.out.pci_out_enabled = 1; + qperf_inc(q, pci_request_int); + } else + q->u.out.pci_out_enabled = 0; + + if (queue_type(q) == QDIO_IQDIO_QFMT) { + unsigned long phys_aob = 0; + + /* One SIGA-W per buffer required for unicast HSI */ + WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); + + phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); + + rc = qdio_kick_outbound_q(q, phys_aob); + } else if (need_siga_sync(q)) { + rc = qdio_siga_sync_q(q); + } else { + /* try to fast requeue buffers */ + get_buf_state(q, prev_buf(bufnr), &state, 0); + if (state != SLSB_CU_OUTPUT_PRIMED) + rc = qdio_kick_outbound_q(q, 0); + else + qperf_inc(q, fast_requeue); + } + + /* in case of SIGA errors we must process the error immediately */ + if (used >= q->u.out.scan_threshold || rc) + tasklet_schedule(&q->tasklet); + else + /* free the SBALs in case of no further traffic */ + if (!timer_pending(&q->u.out.timer)) + mod_timer(&q->u.out.timer, jiffies + HZ); + return rc; +} + +/** + * do_QDIO - process input or output buffers + * @cdev: associated ccw_device for the qdio subchannel + * @callflags: input or output and special flags from the program + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int do_QDIO(struct ccw_device *cdev, unsigned int callflags, + int q_nr, unsigned int bufnr, unsigned int count) +{ + struct qdio_irq *irq_ptr; + + if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) + return -EINVAL; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + DBF_DEV_EVENT(DBF_INFO, irq_ptr, + "do%02x b:%02x c:%02x", callflags, bufnr, count); + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EIO; + if (!count) + return 0; + if (callflags & QDIO_FLAG_SYNC_INPUT) + return handle_inbound(irq_ptr->input_qs[q_nr], + callflags, bufnr, count); + else if (callflags & QDIO_FLAG_SYNC_OUTPUT) + return handle_outbound(irq_ptr->output_qs[q_nr], + callflags, bufnr, count); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(do_QDIO); + +/** + * qdio_start_irq - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - success + * 1 - irqs not started since new data is available + */ +int qdio_start_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + clear_nonshared_ind(irq_ptr); + qdio_stop_polling(q); + clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); + + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (test_nonshared_ind(irq_ptr)) + goto rescan; + if (!qdio_inbound_q_done(q)) + goto rescan; + return 0; + +rescan: + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; + +} +EXPORT_SYMBOL(qdio_start_irq); + +/** + * qdio_get_next_buffers - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * @bufnr: first filled buffer number + * @error: buffers are in error state + * + * Return codes + * < 0 - error + * = 0 - no new buffers found + * > 0 - number of processed buffers + */ +int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, + int *error) +{ + struct qdio_q *q; + int start, end; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + /* + * Cannot rely on automatic sync after interrupt since queues may + * also be examined without interrupt. + */ + if (need_siga_sync(q)) + qdio_sync_queues(q); + + /* check the PCI capable outbound queues. */ + qdio_check_outbound_after_thinint(q); + + if (!qdio_inbound_q_moved(q)) + return 0; + + /* Note: upper-layer MUST stop processing immediately here ... */ + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return -EIO; + + start = q->first_to_kick; + end = q->first_to_check; + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_kick = end; + q->qdio_error = 0; + return sub_buf(end, start); +} +EXPORT_SYMBOL(qdio_get_next_buffers); + +/** + * qdio_stop_irq - disable interrupt processing for the device + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - interrupts were already disabled + * 1 - interrupts successfully disabled + */ +int qdio_stop_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; +} +EXPORT_SYMBOL(qdio_stop_irq); + +/** + * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. + * @schid: Subchannel ID. + * @cnc: Boolean Change-Notification Control + * @response: Response code will be stored at this address + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer passed from the caller to qdio_pnso_brinfo() + * @type: Type of the address entry passed to the callback + * @entry: Entry containg the address of the specified type + * @priv: Pointer to pass to the callback function. + * + * Performs "Store-network-bridging-information list" operation and calls + * the callback function for every entry in the list. If "change- + * notification-control" is set, further changes in the address list + * will be reported via the IPA command. + */ +int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv) +{ + struct chsc_pnso_area *rr; + int rc; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + /* on the first iteration, naihdr.resume_token will be zero */ + rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); + if (rc != 0 && rc != -EBUSY) + goto out; + if (rr->response.code != 1) { + rc = -EIO; + continue; + } else + rc = 0; + + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + elems = (rr->response.length - + sizeof(struct chsc_header) - + sizeof(struct chsc_brinfo_naihdr)) / + size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + switch (size) { + case sizeof(struct qdio_brinfo_entry_l3_ipv6): + (*cb)(priv, l3_ipv6_addr, + &rr->entries.l3_ipv6[i]); + break; + case sizeof(struct qdio_brinfo_entry_l3_ipv4): + (*cb)(priv, l3_ipv4_addr, + &rr->entries.l3_ipv4[i]); + break; + case sizeof(struct qdio_brinfo_entry_l2): + (*cb)(priv, l2_addr_lnid, + &rr->entries.l2[i]); + break; + default: + WARN_ON_ONCE(1); + rc = -EIO; + goto out; + } + } while (rr->response.code == 0x0107 || /* channel busy */ + (rr->response.code == 1 && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + (*response) = rr->response.code; + +out: + free_page((unsigned long)rr); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); + +static int __init init_QDIO(void) +{ + int rc; + + rc = qdio_debug_init(); + if (rc) + return rc; + rc = qdio_setup_init(); + if (rc) + goto out_debug; + rc = tiqdio_allocate_memory(); + if (rc) + goto out_cache; + rc = tiqdio_register_thinints(); + if (rc) + goto out_ti; + return 0; + +out_ti: + tiqdio_free_memory(); +out_cache: + qdio_setup_exit(); +out_debug: + qdio_debug_exit(); + return rc; +} + +static void __exit exit_QDIO(void) +{ + tiqdio_unregister_thinints(); + tiqdio_free_memory(); + qdio_setup_exit(); + qdio_debug_exit(); +} + +module_init(init_QDIO); +module_exit(exit_QDIO); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c new file mode 100644 index 00000000000..f5f4a91fab4 --- /dev/null +++ b/drivers/s390/cio/qdio_setup.c @@ -0,0 +1,547 @@ +/* + * qdio queue initialization + * + * Copyright IBM Corp. 2008 + * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio.h" +#include "qdio_debug.h" + +static struct kmem_cache *qdio_q_cache; +static struct kmem_cache *qdio_aob_cache; + +struct qaob *qdio_allocate_aob(void) +{ + return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(qdio_allocate_aob); + +void qdio_release_aob(struct qaob *aob) +{ + kmem_cache_free(qdio_aob_cache, aob); +} +EXPORT_SYMBOL_GPL(qdio_release_aob); + +/* + * qebsm is only available under 64bit but the adapter sets the feature + * flag anyway, so we manually override it. + */ +static inline int qebsm_possible(void) +{ +#ifdef CONFIG_64BIT + return css_general_characteristics.qebsm; +#endif + return 0; +} + +/* + * qib_param_field: pointer to 128 bytes or NULL, if no param field + * nr_input_qs: pointer to nr_queues*128 words of data or NULL + */ +static void set_impl_params(struct qdio_irq *irq_ptr, + unsigned int qib_param_field_format, + unsigned char *qib_param_field, + unsigned long *input_slib_elements, + unsigned long *output_slib_elements) +{ + struct qdio_q *q; + int i, j; + + if (!irq_ptr) + return; + + irq_ptr->qib.pfmt = qib_param_field_format; + if (qib_param_field) + memcpy(irq_ptr->qib.parm, qib_param_field, + QDIO_MAX_BUFFERS_PER_Q); + + if (!input_slib_elements) + goto output; + + for_each_input_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +output: + if (!output_slib_elements) + return; + + for_each_output_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +} + +static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) +{ + struct qdio_q *q; + int i; + + for (i = 0; i < nr_queues; i++) { + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + if (!q) + return -ENOMEM; + + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); + if (!q->slib) { + kmem_cache_free(qdio_q_cache, q); + return -ENOMEM; + } + irq_ptr_qs[i] = q; + } + return 0; +} + +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) +{ + int rc; + + rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); + if (rc) + return rc; + rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); + return rc; +} + +static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, + qdio_handler_t *handler, int i) +{ + struct slib *slib = q->slib; + + /* queue must be cleared for qdio_establish */ + memset(q, 0, sizeof(*q)); + memset(slib, 0, PAGE_SIZE); + q->slib = slib; + q->irq_ptr = irq_ptr; + q->mask = 1 << (31 - i); + q->nr = i; + q->handler = handler; +} + +static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, + void **sbals_array, int i) +{ + struct qdio_q *prev; + int j; + + DBF_HEX(&q, sizeof(void *)); + q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); + + /* fill in sbal */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->sbal[j] = *sbals_array++; + + /* fill in slib */ + if (i > 0) { + prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] + : irq_ptr->output_qs[i - 1]; + prev->slib->nsliba = (unsigned long)q->slib; + } + + q->slib->sla = (unsigned long)q->sl; + q->slib->slsba = (unsigned long)&q->slsb.val[0]; + + /* fill in sl */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->sl->element[j].sbal = (unsigned long)q->sbal[j]; +} + +static void setup_queues(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + struct qdio_q *q; + void **input_sbal_array = qdio_init->input_sbal_addr_array; + void **output_sbal_array = qdio_init->output_sbal_addr_array; + struct qdio_outbuf_state *output_sbal_state_array = + qdio_init->output_sbal_state_array; + int i; + + for_each_input_queue(irq_ptr, q, i) { + DBF_EVENT("inq:%1d", i); + setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); + + q->is_input_q = 1; + q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? + qdio_init->queue_start_poll_array[i] : NULL; + + setup_storage_lists(q, irq_ptr, input_sbal_array, i); + input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + if (is_thinint_irq(irq_ptr)) { + tasklet_init(&q->tasklet, tiqdio_inbound_processing, + (unsigned long) q); + } else { + tasklet_init(&q->tasklet, qdio_inbound_processing, + (unsigned long) q); + } + } + + for_each_output_queue(irq_ptr, q, i) { + DBF_EVENT("outq:%1d", i); + setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); + + q->u.out.sbal_state = output_sbal_state_array; + output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; + + q->is_input_q = 0; + q->u.out.scan_threshold = qdio_init->scan_threshold; + setup_storage_lists(q, irq_ptr, output_sbal_array, i); + output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + tasklet_init(&q->tasklet, qdio_outbound_processing, + (unsigned long) q); + setup_timer(&q->u.out.timer, (void(*)(unsigned long)) + &qdio_outbound_timer, (unsigned long)q); + } +} + +static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) +{ + if (qdioac & AC1_SIGA_INPUT_NEEDED) + irq_ptr->siga_flag.input = 1; + if (qdioac & AC1_SIGA_OUTPUT_NEEDED) + irq_ptr->siga_flag.output = 1; + if (qdioac & AC1_SIGA_SYNC_NEEDED) + irq_ptr->siga_flag.sync = 1; + if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) + irq_ptr->siga_flag.sync_after_ai = 1; + if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) + irq_ptr->siga_flag.sync_out_after_pci = 1; +} + +static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, + unsigned char qdioac, unsigned long token) +{ + if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) + goto no_qebsm; + if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || + (!(qdioac & AC1_SC_QEBSM_ENABLED))) + goto no_qebsm; + + irq_ptr->sch_token = token; + + DBF_EVENT("V=V:1"); + DBF_EVENT("%8lx", irq_ptr->sch_token); + return; + +no_qebsm: + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + DBF_EVENT("noV=V"); +} + +/* + * If there is a qdio_irq we use the chsc_page and store the information + * in the qdio_irq, otherwise we copy it to the specified structure. + */ +int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, + struct subchannel_id *schid, + struct qdio_ssqd_desc *data) +{ + struct chsc_ssqd_area *ssqd; + int rc; + + DBF_EVENT("getssqd:%4x", schid->sch_no); + if (!irq_ptr) { + ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); + if (!ssqd) + return -ENOMEM; + } else { + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + } + + rc = chsc_ssqd(*schid, ssqd); + if (rc) + goto out; + + if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || + !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || + (ssqd->qdio_ssqd.sch != schid->sch_no)) + rc = -EINVAL; + + if (!rc) + memcpy(data, &ssqd->qdio_ssqd, sizeof(*data)); + +out: + if (!irq_ptr) + free_page((unsigned long)ssqd); + + return rc; +} + +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) +{ + unsigned char qdioac; + int rc; + + rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc); + if (rc) { + DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); + DBF_ERROR("rc:%x", rc); + /* all flags set, worst case */ + qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | + AC1_SIGA_SYNC_NEEDED; + } else + qdioac = irq_ptr->ssqd_desc.qdioac1; + + check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); + process_ac_flags(irq_ptr, qdioac); + DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); + DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); +} + +void qdio_release_memory(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* + * Must check queue array manually since irq_ptr->nr_input_queues / + * irq_ptr->nr_input_queues may not yet be set. + */ + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->input_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->output_qs[i]; + if (q) { + if (q->u.out.use_cq) { + int n; + + for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { + struct qaob *aob = q->u.out.aobs[n]; + if (aob) { + qdio_release_aob(aob); + q->u.out.aobs[n] = NULL; + } + } + + qdio_disable_async_operation(&q->u.out); + } + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + free_page((unsigned long) irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + free_page((unsigned long) irq_ptr); +} + +static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, + struct qdio_q **irq_ptr_qs, + int i, int nr) +{ + irq_ptr->qdr->qdf0[i + nr].sliba = + (unsigned long)irq_ptr_qs[i]->slib; + + irq_ptr->qdr->qdf0[i + nr].sla = + (unsigned long)irq_ptr_qs[i]->sl; + + irq_ptr->qdr->qdf0[i + nr].slsba = + (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; + + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; +} + +static void setup_qdr(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + int i; + + irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->ac = qdio_init->qdr_ac; + irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; + irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; + irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ + irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; + irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; + + for (i = 0; i < qdio_init->no_input_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); + + for (i = 0; i < qdio_init->no_output_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, + qdio_init->no_input_qs); +} + +static void setup_qib(struct qdio_irq *irq_ptr, + struct qdio_initialize *init_data) +{ + if (qebsm_possible()) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + + irq_ptr->qib.rflags |= init_data->qib_rflags; + + irq_ptr->qib.qfmt = init_data->q_format; + if (init_data->no_input_qs) + irq_ptr->qib.isliba = + (unsigned long)(irq_ptr->input_qs[0]->slib); + if (init_data->no_output_qs) + irq_ptr->qib.osliba = + (unsigned long)(irq_ptr->output_qs[0]->slib); + memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); +} + +int qdio_setup_irq(struct qdio_initialize *init_data) +{ + struct ciw *ciw; + struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; + int rc; + + memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); + memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); + memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); + memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); + memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + + irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; + irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; + + /* wipes qib.ac, required by ar7063 */ + memset(irq_ptr->qdr, 0, sizeof(struct qdr)); + + irq_ptr->int_parm = init_data->int_parm; + irq_ptr->nr_input_qs = init_data->no_input_qs; + irq_ptr->nr_output_qs = init_data->no_output_qs; + irq_ptr->cdev = init_data->cdev; + ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); + qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, + init_data->output_slib_elements); + + /* fill input and output descriptors */ + setup_qdr(irq_ptr, init_data); + + /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ + + /* get qdio commands */ + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); + rc = -EINVAL; + goto out_err; + } + irq_ptr->equeue = *ciw; + + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); + rc = -EINVAL; + goto out_err; + } + irq_ptr->aqueue = *ciw; + + /* set new interrupt handler */ + irq_ptr->orig_handler = init_data->cdev->handler; + init_data->cdev->handler = qdio_int_handler; + return 0; +out_err: + qdio_release_memory(irq_ptr); + return rc; +} + +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev) +{ + char s[80]; + + snprintf(s, 80, "qdio: %s %s on SC %x using " + "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", + dev_name(&cdev->dev), + (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : + ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), + irq_ptr->schid.sch_no, + is_thinint_irq(irq_ptr), + (irq_ptr->sch_token) ? 1 : 0, + (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, + css_general_characteristics.aif_tdd, + (irq_ptr->siga_flag.input) ? "R" : " ", + (irq_ptr->siga_flag.output) ? "W" : " ", + (irq_ptr->siga_flag.sync) ? "S" : " ", + (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", + (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); + printk(KERN_INFO "%s", s); +} + +int qdio_enable_async_operation(struct qdio_output_q *outq) +{ + outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, + GFP_ATOMIC); + if (!outq->aobs) { + outq->use_cq = 0; + return -ENOMEM; + } + outq->use_cq = 1; + return 0; +} + +void qdio_disable_async_operation(struct qdio_output_q *q) +{ + kfree(q->aobs); + q->aobs = NULL; + q->use_cq = 0; +} + +int __init qdio_setup_init(void) +{ + int rc; + + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), + 256, 0, NULL); + if (!qdio_q_cache) + return -ENOMEM; + + qdio_aob_cache = kmem_cache_create("qdio_aob", + sizeof(struct qaob), + sizeof(struct qaob), + 0, + NULL); + if (!qdio_aob_cache) { + rc = -ENOMEM; + goto free_qdio_q_cache; + } + + /* Check for OSA/FCP thin interrupts (bit 67). */ + DBF_EVENT("thinint:%1d", + (css_general_characteristics.aif_osa) ? 1 : 0); + + /* Check for QEBSM support in general (bit 58). */ + DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); + rc = 0; +out: + return rc; +free_qdio_q_cache: + kmem_cache_destroy(qdio_q_cache); + goto out; +} + +void qdio_setup_exit(void) +{ + kmem_cache_destroy(qdio_aob_cache); + kmem_cache_destroy(qdio_q_cache); +} diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c new file mode 100644 index 00000000000..5d06253c2a7 --- /dev/null +++ b/drivers/s390/cio/qdio_thinint.c @@ -0,0 +1,299 @@ +/* + * Copyright IBM Corp. 2000, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/kernel_stat.h> +#include <linux/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/airq.h> +#include <asm/isc.h> + +#include "cio.h" +#include "ioasm.h" +#include "qdio.h" +#include "qdio_debug.h" + +/* + * Restriction: only 63 iqdio subchannels would have its own indicator, + * after that, subsequent subchannels share one indicator + */ +#define TIQDIO_NR_NONSHARED_IND 63 +#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) +#define TIQDIO_SHARED_IND 63 + +/* device state change indicators */ +struct indicator_t { + u32 ind; /* u32 because of compare-and-swap performance */ + atomic_t count; /* use count, 0 or 1 for non-shared indicators */ +}; + +/* list of thin interrupt input queues */ +static LIST_HEAD(tiq_list); +static DEFINE_MUTEX(tiq_list_lock); + +/* Adapter interrupt definitions */ +static void tiqdio_thinint_handler(struct airq_struct *airq); + +static struct airq_struct tiqdio_airq = { + .handler = tiqdio_thinint_handler, + .isc = QDIO_AIRQ_ISC, +}; + +static struct indicator_t *q_indicators; + +u64 last_ai_time; + +/* returns addr for the device state change indicator */ +static u32 *get_indicator(void) +{ + int i; + + for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) + if (!atomic_read(&q_indicators[i].count)) { + atomic_set(&q_indicators[i].count, 1); + return &q_indicators[i].ind; + } + + /* use the shared indicator */ + atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); + return &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void put_indicator(u32 *addr) +{ + int i; + + if (!addr) + return; + i = ((unsigned long)addr - (unsigned long)q_indicators) / + sizeof(struct indicator_t); + atomic_dec(&q_indicators[i].count); +} + +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +{ + mutex_lock(&tiq_list_lock); + list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); + mutex_unlock(&tiq_list_lock); + xchg(irq_ptr->dsci, 1 << 7); +} + +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + + q = irq_ptr->input_qs[0]; + /* if establish triggered an error */ + if (!q || !q->entry.prev || !q->entry.next) + return; + + mutex_lock(&tiq_list_lock); + list_del_rcu(&q->entry); + mutex_unlock(&tiq_list_lock); + synchronize_rcu(); +} + +static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) +{ + return irq_ptr->nr_input_qs > 1; +} + +static inline int references_shared_dsci(struct qdio_irq *irq_ptr) +{ + return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static inline int shared_ind(struct qdio_irq *irq_ptr) +{ + return references_shared_dsci(irq_ptr) || + has_multiple_inq_on_dsci(irq_ptr); +} + +void clear_nonshared_ind(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + if (shared_ind(irq_ptr)) + return; + xchg(irq_ptr->dsci, 0); +} + +int test_nonshared_ind(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + if (shared_ind(irq_ptr)) + return 0; + if (*irq_ptr->dsci) + return 1; + else + return 0; +} + +static inline u32 clear_shared_ind(void) +{ + if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) + return 0; + return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); +} + +static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq, q, i) { + if (!references_shared_dsci(irq) && + has_multiple_inq_on_dsci(irq)) + xchg(q->irq_ptr->dsci, 0); + + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + + /* avoid dsci clear here, done after processing */ + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { + if (!shared_ind(q->irq_ptr)) + xchg(q->irq_ptr->dsci, 0); + + /* + * Call inbound processing but not directly + * since that could starve other thinint queues. + */ + tasklet_schedule(&q->tasklet); + } + } +} + +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @alsi: pointer to adapter local summary indicator + * @data: NULL + */ +static void tiqdio_thinint_handler(struct airq_struct *airq) +{ + u32 si_used = clear_shared_ind(); + struct qdio_q *q; + + last_ai_time = S390_lowcore.int_clock; + inc_irq_stat(IRQIO_QAI); + + /* protect tiq_list entries, only changed in activate or shutdown */ + rcu_read_lock(); + + /* check for work on all inbound thinint queues */ + list_for_each_entry_rcu(q, &tiq_list, entry) { + struct qdio_irq *irq; + + /* only process queues from changed sets */ + irq = q->irq_ptr; + if (unlikely(references_shared_dsci(irq))) { + if (!si_used) + continue; + } else if (!*irq->dsci) + continue; + + tiqdio_call_inq_handlers(irq); + + qperf_inc(q, adapter_int); + } + rcu_read_unlock(); +} + +static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) +{ + struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; + u64 summary_indicator_addr, subchannel_indicator_addr; + int rc; + + if (reset) { + summary_indicator_addr = 0; + subchannel_indicator_addr = 0; + } else { + summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); + subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); + } + + rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, + subchannel_indicator_addr); + if (rc) { + DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, + scssc->response.code); + goto out; + } + + DBF_EVENT("setscind"); + DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); + DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); +out: + return rc; +} + +/* allocate non-shared indicators and shared indicator */ +int __init tiqdio_allocate_memory(void) +{ + q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, + GFP_KERNEL); + if (!q_indicators) + return -ENOMEM; + return 0; +} + +void tiqdio_free_memory(void) +{ + kfree(q_indicators); +} + +int __init tiqdio_register_thinints(void) +{ + int rc; + + rc = register_adapter_interrupt(&tiqdio_airq); + if (rc) { + DBF_EVENT("RTI:%x", rc); + return rc; + } + return 0; +} + +int qdio_establish_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + return set_subchannel_ind(irq_ptr, 0); +} + +void qdio_setup_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + irq_ptr->dsci = get_indicator(); + DBF_HEX(&irq_ptr->dsci, sizeof(void *)); +} + +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + + /* reset adapter interrupt indicators */ + set_subchannel_ind(irq_ptr, 1); + put_indicator(irq_ptr->dsci); +} + +void __exit tiqdio_unregister_thinints(void) +{ + WARN_ON(!list_empty(&tiq_list)); + unregister_adapter_interrupt(&tiqdio_airq); +} diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c new file mode 100644 index 00000000000..15268edc54a --- /dev/null +++ b/drivers/s390/cio/scm.c @@ -0,0 +1,288 @@ +/* + * Recognize and maintain s390 storage class memory. + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <asm/eadm.h> +#include "chsc.h" + +static struct device *scm_root; + +#define to_scm_dev(n) container_of(n, struct scm_device, dev) +#define to_scm_drv(d) container_of(d, struct scm_driver, drv) + +static int scmdev_probe(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + + return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV; +} + +static int scmdev_remove(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + + return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV; +} + +static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + return add_uevent_var(env, "MODALIAS=scm:scmdev"); +} + +static struct bus_type scm_bus_type = { + .name = "scm", + .probe = scmdev_probe, + .remove = scmdev_remove, + .uevent = scmdev_uevent, +}; + +/** + * scm_driver_register() - register a scm driver + * @scmdrv: driver to be registered + */ +int scm_driver_register(struct scm_driver *scmdrv) +{ + struct device_driver *drv = &scmdrv->drv; + + drv->bus = &scm_bus_type; + + return driver_register(drv); +} +EXPORT_SYMBOL_GPL(scm_driver_register); + +/** + * scm_driver_unregister() - deregister a scm driver + * @scmdrv: driver to be deregistered + */ +void scm_driver_unregister(struct scm_driver *scmdrv) +{ + driver_unregister(&scmdrv->drv); +} +EXPORT_SYMBOL_GPL(scm_driver_unregister); + +void scm_irq_handler(struct aob *aob, int error) +{ + struct aob_rq_header *aobrq = (void *) aob->request.data; + struct scm_device *scmdev = aobrq->scmdev; + struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver); + + scmdrv->handler(scmdev, aobrq->data, error); +} +EXPORT_SYMBOL_GPL(scm_irq_handler); + +#define scm_attr(name) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scm_device *scmdev = to_scm_dev(dev); \ + int ret; \ + \ + device_lock(dev); \ + ret = sprintf(buf, "%u\n", scmdev->attrs.name); \ + device_unlock(dev); \ + \ + return ret; \ +} \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); + +scm_attr(persistence); +scm_attr(oper_state); +scm_attr(data_state); +scm_attr(rank); +scm_attr(release); +scm_attr(res_id); + +static struct attribute *scmdev_attrs[] = { + &dev_attr_persistence.attr, + &dev_attr_oper_state.attr, + &dev_attr_data_state.attr, + &dev_attr_rank.attr, + &dev_attr_release.attr, + &dev_attr_res_id.attr, + NULL, +}; + +static struct attribute_group scmdev_attr_group = { + .attrs = scmdev_attrs, +}; + +static const struct attribute_group *scmdev_attr_groups[] = { + &scmdev_attr_group, + NULL, +}; + +static void scmdev_release(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + + kfree(scmdev); +} + +static void scmdev_setup(struct scm_device *scmdev, struct sale *sale, + unsigned int size, unsigned int max_blk_count) +{ + dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa); + scmdev->nr_max_block = max_blk_count; + scmdev->address = sale->sa; + scmdev->size = 1UL << size; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.persistence = sale->p; + scmdev->attrs.oper_state = sale->op_state; + scmdev->attrs.data_state = sale->data_state; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.release = sale->r; + scmdev->attrs.res_id = sale->rid; + scmdev->dev.parent = scm_root; + scmdev->dev.bus = &scm_bus_type; + scmdev->dev.release = scmdev_release; + scmdev->dev.groups = scmdev_attr_groups; +} + +/* + * Check for state-changes, notify the driver and userspace. + */ +static void scmdev_update(struct scm_device *scmdev, struct sale *sale) +{ + struct scm_driver *scmdrv; + bool changed; + + device_lock(&scmdev->dev); + changed = scmdev->attrs.rank != sale->rank || + scmdev->attrs.oper_state != sale->op_state; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.oper_state = sale->op_state; + if (!scmdev->dev.driver) + goto out; + scmdrv = to_scm_drv(scmdev->dev.driver); + if (changed && scmdrv->notify) + scmdrv->notify(scmdev, SCM_CHANGE); +out: + device_unlock(&scmdev->dev); + if (changed) + kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE); +} + +static int check_address(struct device *dev, void *data) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct sale *sale = data; + + return scmdev->address == sale->sa; +} + +static struct scm_device *scmdev_find(struct sale *sale) +{ + struct device *dev; + + dev = bus_find_device(&scm_bus_type, NULL, sale, check_address); + + return dev ? to_scm_dev(dev) : NULL; +} + +static int scm_add(struct chsc_scm_info *scm_info, size_t num) +{ + struct sale *sale, *scmal = scm_info->scmal; + struct scm_device *scmdev; + int ret; + + for (sale = scmal; sale < scmal + num; sale++) { + scmdev = scmdev_find(sale); + if (scmdev) { + scmdev_update(scmdev, sale); + /* Release reference from scm_find(). */ + put_device(&scmdev->dev); + continue; + } + scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL); + if (!scmdev) + return -ENODEV; + scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc); + ret = device_register(&scmdev->dev); + if (ret) { + /* Release reference from device_initialize(). */ + put_device(&scmdev->dev); + return ret; + } + } + + return 0; +} + +int scm_update_information(void) +{ + struct chsc_scm_info *scm_info; + u64 token = 0; + size_t num; + int ret; + + scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); + if (!scm_info) + return -ENOMEM; + + do { + ret = chsc_scm_info(scm_info, token); + if (ret) + break; + + num = (scm_info->response.length - + (offsetof(struct chsc_scm_info, scmal) - + offsetof(struct chsc_scm_info, response)) + ) / sizeof(struct sale); + + ret = scm_add(scm_info, num); + if (ret) + break; + + token = scm_info->restok; + } while (token); + + free_page((unsigned long)scm_info); + + return ret; +} + +static int scm_dev_avail(struct device *dev, void *unused) +{ + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + struct scm_device *scmdev = to_scm_dev(dev); + + if (dev->driver && scmdrv->notify) + scmdrv->notify(scmdev, SCM_AVAIL); + + return 0; +} + +int scm_process_availability_information(void) +{ + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); +} + +static int __init scm_init(void) +{ + int ret; + + ret = bus_register(&scm_bus_type); + if (ret) + return ret; + + scm_root = root_device_register("scm"); + if (IS_ERR(scm_root)) { + bus_unregister(&scm_bus_type); + return PTR_ERR(scm_root); + } + + scm_update_information(); + return 0; +} +subsys_initcall_sync(scm_init); |
