diff options
Diffstat (limited to 'drivers/s390/cio')
30 files changed, 1559 insertions, 962 deletions
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index bc10220f684..00bfbee0af9 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -9,142 +9,267 @@ */ #include <linux/init.h> +#include <linux/irq.h> +#include <linux/kernel_stat.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rculist.h> #include <linux/slab.h> -#include <linux/rcupdate.h> #include <asm/airq.h> #include <asm/isc.h> #include "cio.h" #include "cio_debug.h" +#include "ioasm.h" -#define NR_AIRQS 32 -#define NR_AIRQS_PER_WORD sizeof(unsigned long) -#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD) +static DEFINE_SPINLOCK(airq_lists_lock); +static struct hlist_head airq_lists[MAX_ISC+1]; -union indicator_t { - unsigned long word[NR_AIRQ_WORDS]; - unsigned char byte[NR_AIRQS]; -} __attribute__((packed)); +/** + * register_adapter_interrupt() - register adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor + * + * Returns 0 on success, or -EINVAL. + */ +int register_adapter_interrupt(struct airq_struct *airq) +{ + char dbf_txt[32]; -struct airq_t { - adapter_int_handler_t handler; - void *drv_data; -}; + if (!airq->handler || airq->isc > MAX_ISC) + return -EINVAL; + if (!airq->lsi_ptr) { + airq->lsi_ptr = kzalloc(1, GFP_KERNEL); + if (!airq->lsi_ptr) + return -ENOMEM; + airq->flags |= AIRQ_PTR_ALLOCATED; + } + if (!airq->lsi_mask) + airq->lsi_mask = 0xff; + snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + isc_register(airq->isc); + spin_lock(&airq_lists_lock); + hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]); + spin_unlock(&airq_lists_lock); + return 0; +} +EXPORT_SYMBOL(register_adapter_interrupt); -static union indicator_t indicators[MAX_ISC+1]; -static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS]; +/** + * unregister_adapter_interrupt - unregister adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor + */ +void unregister_adapter_interrupt(struct airq_struct *airq) +{ + char dbf_txt[32]; -static int register_airq(struct airq_t *airq, u8 isc) + if (hlist_unhashed(&airq->list)) + return; + snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + spin_lock(&airq_lists_lock); + hlist_del_rcu(&airq->list); + spin_unlock(&airq_lists_lock); + synchronize_rcu(); + isc_unregister(airq->isc); + if (airq->flags & AIRQ_PTR_ALLOCATED) { + kfree(airq->lsi_ptr); + airq->lsi_ptr = NULL; + airq->flags &= ~AIRQ_PTR_ALLOCATED; + } +} +EXPORT_SYMBOL(unregister_adapter_interrupt); + +static irqreturn_t do_airq_interrupt(int irq, void *dummy) { - int i; + struct tpi_info *tpi_info; + struct airq_struct *airq; + struct hlist_head *head; + + __this_cpu_write(s390_idle.nohz_delay, 1); + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + head = &airq_lists[tpi_info->isc]; + rcu_read_lock(); + hlist_for_each_entry_rcu(airq, head, list) + if ((*airq->lsi_ptr & airq->lsi_mask) != 0) + airq->handler(airq); + rcu_read_unlock(); + + return IRQ_HANDLED; +} + +static struct irqaction airq_interrupt = { + .name = "AIO", + .handler = do_airq_interrupt, +}; - for (i = 0; i < NR_AIRQS; i++) - if (!cmpxchg(&airqs[isc][i], NULL, airq)) - return i; - return -ENOMEM; +void __init init_airq_interrupts(void) +{ + irq_set_chip_and_handler(THIN_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(THIN_INTERRUPT, &airq_interrupt); } /** - * s390_register_adapter_interrupt() - register adapter interrupt handler - * @handler: adapter handler to be registered - * @drv_data: driver data passed with each call to the handler - * @isc: isc for which the handler should be called + * airq_iv_create - create an interrupt vector + * @bits: number of bits in the interrupt vector + * @flags: allocation flags * - * Returns: - * Pointer to the indicator to be used on success - * ERR_PTR() if registration failed + * Returns a pointer to an interrupt vector structure */ -void *s390_register_adapter_interrupt(adapter_int_handler_t handler, - void *drv_data, u8 isc) +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags) { - struct airq_t *airq; - char dbf_txt[16]; - int ret; - - if (isc > MAX_ISC) - return ERR_PTR(-EINVAL); - airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); - if (!airq) { - ret = -ENOMEM; + struct airq_iv *iv; + unsigned long size; + + iv = kzalloc(sizeof(*iv), GFP_KERNEL); + if (!iv) goto out; + iv->bits = bits; + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); + iv->vector = kzalloc(size, GFP_KERNEL); + if (!iv->vector) + goto out_free; + if (flags & AIRQ_IV_ALLOC) { + iv->avail = kmalloc(size, GFP_KERNEL); + if (!iv->avail) + goto out_free; + memset(iv->avail, 0xff, size); + iv->end = 0; + } else + iv->end = bits; + if (flags & AIRQ_IV_BITLOCK) { + iv->bitlock = kzalloc(size, GFP_KERNEL); + if (!iv->bitlock) + goto out_free; + } + if (flags & AIRQ_IV_PTR) { + size = bits * sizeof(unsigned long); + iv->ptr = kzalloc(size, GFP_KERNEL); + if (!iv->ptr) + goto out_free; + } + if (flags & AIRQ_IV_DATA) { + size = bits * sizeof(unsigned int); + iv->data = kzalloc(size, GFP_KERNEL); + if (!iv->data) + goto out_free; } - airq->handler = handler; - airq->drv_data = drv_data; + spin_lock_init(&iv->lock); + return iv; - ret = register_airq(airq, isc); +out_free: + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->avail); + kfree(iv->vector); + kfree(iv); out: - snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); - CIO_TRACE_EVENT(4, dbf_txt); - if (ret < 0) { - kfree(airq); - return ERR_PTR(ret); - } else - return &indicators[isc].byte[ret]; + return NULL; } -EXPORT_SYMBOL(s390_register_adapter_interrupt); +EXPORT_SYMBOL(airq_iv_create); /** - * s390_unregister_adapter_interrupt - unregister adapter interrupt handler - * @ind: indicator for which the handler is to be unregistered - * @isc: interruption subclass + * airq_iv_release - release an interrupt vector + * @iv: pointer to interrupt vector structure */ -void s390_unregister_adapter_interrupt(void *ind, u8 isc) +void airq_iv_release(struct airq_iv *iv) { - struct airq_t *airq; - char dbf_txt[16]; - int i; - - i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); - snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); - CIO_TRACE_EVENT(4, dbf_txt); - indicators[isc].byte[i] = 0; - airq = xchg(&airqs[isc][i], NULL); - /* - * Allow interrupts to complete. This will ensure that the airq handle - * is no longer referenced by any interrupt handler. - */ - synchronize_sched(); - kfree(airq); + kfree(iv->data); + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->vector); + kfree(iv->avail); + kfree(iv); } -EXPORT_SYMBOL(s390_unregister_adapter_interrupt); - -#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) +EXPORT_SYMBOL(airq_iv_release); -void do_adapter_IO(u8 isc) +/** + * airq_iv_alloc - allocate irq bits from an interrupt vector + * @iv: pointer to an interrupt vector structure + * @num: number of consecutive irq bits to allocate + * + * Returns the bit number of the first irq in the allocated block of irqs, + * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been + * specified + */ +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num) { - int w; - int i; - unsigned long word; - struct airq_t *airq; - - /* - * Access indicator array in word-sized chunks to minimize storage - * fetch operations. - */ - for (w = 0; w < NR_AIRQ_WORDS; w++) { - word = indicators[isc].word[w]; - i = w * NR_AIRQS_PER_WORD; - /* - * Check bytes within word for active indicators. - */ - while (word) { - if (word & INDICATOR_MASK) { - airq = airqs[isc][i]; - /* Make sure gcc reads from airqs only once. */ - barrier(); - if (likely(airq)) - airq->handler(&indicators[isc].byte[i], - airq->drv_data); - else - /* - * Reset ill-behaved indicator. - */ - indicators[isc].byte[i] = 0; - } - word <<= 8; - i++; + unsigned long bit, i, flags; + + if (!iv->avail || num == 0) + return -1UL; + spin_lock_irqsave(&iv->lock, flags); + bit = find_first_bit_inv(iv->avail, iv->bits); + while (bit + num <= iv->bits) { + for (i = 1; i < num; i++) + if (!test_bit_inv(bit + i, iv->avail)) + break; + if (i >= num) { + /* Found a suitable block of irqs */ + for (i = 0; i < num; i++) + clear_bit_inv(bit + i, iv->avail); + if (bit + num >= iv->end) + iv->end = bit + num + 1; + break; } + bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1); + } + if (bit + num > iv->bits) + bit = -1UL; + spin_unlock_irqrestore(&iv->lock, flags); + return bit; +} +EXPORT_SYMBOL(airq_iv_alloc); + +/** + * airq_iv_free - free irq bits of an interrupt vector + * @iv: pointer to interrupt vector structure + * @bit: number of the first irq bit to free + * @num: number of consecutive irq bits to free + */ +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num) +{ + unsigned long i, flags; + + if (!iv->avail || num == 0) + return; + spin_lock_irqsave(&iv->lock, flags); + for (i = 0; i < num; i++) { + /* Clear (possibly left over) interrupt bit */ + clear_bit_inv(bit + i, iv->vector); + /* Make the bit positions available again */ + set_bit_inv(bit + i, iv->avail); + } + if (bit + num >= iv->end) { + /* Find new end of bit-field */ + while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail)) + iv->end--; } + spin_unlock_irqrestore(&iv->lock, flags); +} +EXPORT_SYMBOL(airq_iv_free); + +/** + * airq_iv_scan - scan interrupt vector for non-zero bits + * @iv: pointer to interrupt vector structure + * @start: bit number to start the search + * @end: bit number to end the search + * + * Returns the bit number of the next non-zero interrupt bit, or + * -1UL if the scan completed without finding any more any non-zero bits. + */ +unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, + unsigned long end) +{ + unsigned long bit; + + /* Find non-zero bit starting from 'ivs->next'. */ + bit = find_next_bit_inv(iv->vector, end, start); + if (bit >= end) + return -1UL; + clear_bit_inv(bit, iv->vector); + return bit; } +EXPORT_SYMBOL(airq_iv_scan); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2d2a966a3b3..b3f791b2c1f 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -1,7 +1,7 @@ /* * S/390 common I/O routines -- blacklisting of specific devices * - * Copyright IBM Corp. 1999, 2002 + * Copyright IBM Corp. 1999, 2013 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -17,8 +17,9 @@ #include <linux/ctype.h> #include <linux/device.h> -#include <asm/cio.h> #include <asm/uaccess.h> +#include <asm/cio.h> +#include <asm/ipl.h> #include "blacklist.h" #include "cio.h" @@ -172,6 +173,29 @@ static int blacklist_parse_parameters(char *str, range_action action, to_cssid = __MAX_CSSID; to_ssid = __MAX_SSID; to = __MAX_SUBCHANNEL; + } else if (strcmp(parm, "ipldev") == 0) { + if (ipl_info.type == IPL_TYPE_CCW) { + from_cssid = 0; + from_ssid = ipl_info.data.ccw.dev_id.ssid; + from = ipl_info.data.ccw.dev_id.devno; + } else if (ipl_info.type == IPL_TYPE_FCP || + ipl_info.type == IPL_TYPE_FCP_DUMP) { + from_cssid = 0; + from_ssid = ipl_info.data.fcp.dev_id.ssid; + from = ipl_info.data.fcp.dev_id.devno; + } else { + continue; + } + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } else if (strcmp(parm, "condev") == 0) { + if (console_devno == -1) + continue; + + from_cssid = to_cssid = 0; + from_ssid = to_ssid = 0; + from = to = console_devno; } else { rc = parse_busid(strsep(&parm, "-"), &from_cssid, &from_ssid, &from, msgtrigger); @@ -236,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf) parm = strsep(&buf, " "); - if (strcmp("free", parm) == 0) + if (strcmp("free", parm) == 0) { rc = blacklist_parse_parameters(buf, free, 0); - else if (strcmp("add", parm) == 0) + css_schedule_eval_all_unreg(0); + } else if (strcmp("add", parm) == 0) rc = blacklist_parse_parameters(buf, add, 0); else if (strcmp("purge", parm) == 0) return ccw_purge_blacklisted(); else return -EINVAL; - css_schedule_reprobe(); return rc; } diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 84846c2b96d..e443b0d0b23 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -128,16 +128,16 @@ static ssize_t ccwgroup_online_store(struct device *dev, const char *buf, size_t count) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); unsigned long value; int ret; - if (!dev->driver) - return -EINVAL; - if (!try_module_get(gdrv->driver.owner)) - return -EINVAL; + device_lock(dev); + if (!dev->driver) { + ret = -EINVAL; + goto out; + } - ret = strict_strtoul(buf, 0, &value); + ret = kstrtoul(buf, 0, &value); if (ret) goto out; @@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev, else ret = -EINVAL; out: - module_put(gdrv->driver.owner); + device_unlock(dev); return (ret == 0) ? count : ret; } @@ -168,14 +168,12 @@ static ssize_t ccwgroup_online_show(struct device *dev, * Provide an 'ungroup' attribute so the user can remove group devices no * longer needed or accidentially created. Saves memory :) */ -static void ccwgroup_ungroup_callback(struct device *dev) +static void ccwgroup_ungroup(struct ccwgroup_device *gdev) { - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); - device_unregister(dev); + device_unregister(&gdev->dev); __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); @@ -186,7 +184,7 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - int rc; + int rc = 0; /* Prevent concurrent online/offline processing and ungrouping. */ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) @@ -195,15 +193,15 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev, rc = -EINVAL; goto out; } - /* Note that we cannot unregister the device from one of its - * attribute methods, so we have to use this roundabout approach. - */ - rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); + + if (device_remove_file_self(dev, attr)) + ccwgroup_ungroup(gdev); + else + rc = -ENODEV; out: if (rc) { - if (rc != -EAGAIN) - /* Release onoff "lock" when ungrouping failed. */ - atomic_set(&gdev->onoff, 0); + /* Release onoff "lock" when ungrouping failed. */ + atomic_set(&gdev->onoff, 0); return rc; } return count; @@ -224,6 +222,15 @@ static const struct attribute_group *ccwgroup_attr_groups[] = { NULL, }; +static void ccwgroup_ungroup_workfn(struct work_struct *work) +{ + struct ccwgroup_device *gdev = + container_of(work, struct ccwgroup_device, ungroup_work); + + ccwgroup_ungroup(gdev); + put_device(&gdev->dev); +} + static void ccwgroup_release(struct device *dev) { kfree(to_ccwgroupdev(dev)); @@ -323,6 +330,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, atomic_set(&gdev->onoff, 0); mutex_init(&gdev->reg_mutex); mutex_lock(&gdev->reg_mutex); + INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn); gdev->count = num_devices; gdev->dev.bus = &ccwgroup_bus_type; gdev->dev.parent = parent; @@ -404,10 +412,12 @@ EXPORT_SYMBOL(ccwgroup_create_dev); static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct device *dev = data; + struct ccwgroup_device *gdev = to_ccwgroupdev(data); - if (action == BUS_NOTIFY_UNBIND_DRIVER) - device_schedule_callback(dev, ccwgroup_ungroup_callback); + if (action == BUS_NOTIFY_UNBIND_DRIVER) { + get_device(&gdev->dev); + schedule_work(&gdev->ungroup_work); + } return NOTIFY_OK; } @@ -576,11 +586,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) __ccwgroup_match_all))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - mutex_lock(&gdev->reg_mutex); - __ccwgroup_remove_symlinks(gdev); - device_unregister(dev); - __ccwgroup_remove_cdev_refs(gdev); - mutex_unlock(&gdev->reg_mutex); + ccwgroup_ungroup(gdev); put_device(dev); } driver_unregister(&cdriver->driver); @@ -627,13 +633,7 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) get_device(&gdev->dev); spin_unlock_irq(cdev->ccwlock); /* Unregister group device. */ - mutex_lock(&gdev->reg_mutex); - if (device_is_registered(&gdev->dev)) { - __ccwgroup_remove_symlinks(gdev); - device_unregister(&gdev->dev); - __ccwgroup_remove_cdev_refs(gdev); - } - mutex_unlock(&gdev->reg_mutex); + ccwgroup_ungroup(gdev); /* Release ccwgroup device reference for local processing. */ put_device(&gdev->dev); } diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 5156264d0c7..07676c22d51 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -46,7 +46,7 @@ static u16 ccwreq_next_path(struct ccw_device *cdev) goto out; } req->retries = req->maxretries; - req->mask = lpm_adjust(req->mask >>= 1, req->lpm); + req->mask = lpm_adjust(req->mask >> 1, req->lpm); out: return req->mask; } @@ -252,7 +252,7 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) */ void ccw_request_handler(struct ccw_device *cdev) { - struct irb *irb = (struct irb *)&S390_lowcore.irb; + struct irb *irb = &__get_cpu_var(cio_irb); struct ccw_request *req = &cdev->private->req; enum io_status status; int rc = -EOPNOTSUPP; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 50ad5fdd815..d497aa05a72 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev, static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); +static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); + +static ssize_t chp_chid_external_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); + static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, + &dev_attr_chid.attr, + &dev_attr_chid_external.attr, NULL, }; static struct attribute_group chp_attr_group = { @@ -377,6 +413,26 @@ static void chp_release(struct device *dev) } /** + * chp_update_desc - update channel-path description + * @chp - channel-path + * + * Update the channel-path description of the specified channel-path. + * Return zero on success, non-zero otherwise. + */ +int chp_update_desc(struct channel_path *chp) +{ + int rc; + + rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); + if (rc) + return rc; + + rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + + return rc; +} + +/** * chp_new - register a new channel-path * @chpid - channel-path ID * @@ -403,7 +459,7 @@ int chp_new(struct chp_id chpid) mutex_init(&chp->lock); /* Obtain channel path description and fill it in. */ - ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); + ret = chp_update_desc(chp); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { @@ -453,7 +509,7 @@ out: * On success return a newly allocated copy of the channel-path description * data associated with the given channel-path ID. Return %NULL on error. */ -void *chp_get_chp_desc(struct chp_id chpid) +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) { struct channel_path *chp; struct channel_path_desc *desc; diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index e1399dbee83..4efd5b867cc 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -44,6 +44,7 @@ struct channel_path { struct mutex lock; /* Serialize access to below members. */ int state; struct channel_path_desc desc; + struct channel_path_desc_fmt1 desc_fmt1; /* Channel-measurement related stuff: */ int cmg; int shared; @@ -59,9 +60,10 @@ static inline struct channel_path *chpid_to_chp(struct chp_id chpid) int chp_get_status(struct chp_id chpid); u8 chp_get_sch_opm(struct subchannel *sch); int chp_is_registered(struct chp_id chpid); -void *chp_get_chp_desc(struct chp_id chpid); +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); +int chp_update_desc(struct channel_path *chp); int chp_new(struct chp_id chpid); void chp_cfg_schedule(struct chp_id chpid, int configure); void chp_cfg_cancel_deconfigure(struct chp_id chpid); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 10729bbcece..e3bf885f4a6 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -20,6 +20,7 @@ #include <asm/chpid.h> #include <asm/chsc.h> #include <asm/crw.h> +#include <asm/isc.h> #include "css.h" #include "cio.h" @@ -54,6 +55,7 @@ int chsc_error_from_response(int response) case 0x0004: return -EOPNOTSUPP; case 0x000b: + case 0x0107: /* "Channel busy" for the op 0x003d */ return -EBUSY; case 0x0100: case 0x0102: @@ -144,6 +146,65 @@ out: return ret; } +/** + * chsc_ssqd() - store subchannel QDIO data (SSQD) + * @schid: id of the subchannel on which SSQD is performed + * @ssqd: request and response block for SSQD + * + * Returns 0 on success. + */ +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) +{ + memset(ssqd, 0, sizeof(*ssqd)); + ssqd->request.length = 0x0010; + ssqd->request.code = 0x0024; + ssqd->first_sch = schid.sch_no; + ssqd->last_sch = schid.sch_no; + ssqd->ssid = schid.ssid; + + if (chsc(ssqd)) + return -EIO; + + return chsc_error_from_response(ssqd->response.code); +} +EXPORT_SYMBOL_GPL(chsc_ssqd); + +/** + * chsc_sadc() - set adapter device controls (SADC) + * @schid: id of the subchannel on which SADC is performed + * @scssc: request and response block for SADC + * @summary_indicator_addr: summary indicator address + * @subchannel_indicator_addr: subchannel indicator address + * + * Returns 0 on success. + */ +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr) +{ + memset(scssc, 0, sizeof(*scssc)); + scssc->request.length = 0x0fe0; + scssc->request.code = 0x0021; + scssc->operation_code = 0; + + scssc->summary_indicator_addr = summary_indicator_addr; + scssc->subchannel_indicator_addr = subchannel_indicator_addr; + + scssc->ks = PAGE_DEFAULT_KEY >> 4; + scssc->kc = PAGE_DEFAULT_KEY >> 4; + scssc->isc = QDIO_AIRQ_ISC; + scssc->schid = schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc->word_with_d_bit = 0x10000000; + + if (chsc(scssc)) + return -EIO; + + return chsc_error_from_response(scssc->response.code); +} +EXPORT_SYMBOL_GPL(chsc_sadc); + static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) { spin_lock_irq(sch->lock); @@ -177,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid) for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } -static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) -{ - struct schib schib; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - static int __s390_process_res_acc(struct subchannel *sch, void *data) { spin_lock_irq(sch->lock); @@ -227,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link) * The more information we have (info), the less scanning * will we have to do. */ - for_each_subchannel_staged(__s390_process_res_acc, - s390_process_res_acc_new_sch, link); + for_each_subchannel_staged(__s390_process_res_acc, NULL, link); + css_schedule_reprobe(); } static int @@ -376,7 +417,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) continue; } mutex_lock(&chp->lock); - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + chp_update_desc(chp); mutex_unlock(&chp->lock); } } @@ -433,9 +474,22 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) " failed (rc=%d).\n", ret); } +static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) +{ + int ret; + + CIO_CRW_EVENT(4, "chsc: scm available information\n"); + if (sei_area->rs != 7) + return; + + ret = scm_process_availability_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: process availability information" + " failed (rc=%d).\n", ret); +} + static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { -#ifdef CONFIG_PCI switch (sei_area->cc) { case 1: zpci_event_error(sei_area->ccdf); @@ -444,11 +498,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) zpci_event_availability(sei_area->ccdf); break; default: - CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", sei_area->cc); break; } -#endif } static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) @@ -470,58 +523,76 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) case 12: /* scm change notification */ chsc_process_sei_scm_change(sei_area); break; + case 14: /* scm available notification */ + chsc_process_sei_scm_avail(sei_area); + break; default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } } -static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { - do { + static int ntsm_unsupported; + + while (true) { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; - sei->ntsm = ntsm; + if (!ntsm_unsupported) + sei->ntsm = ntsm; if (chsc(sei)) break; - if (sei->response.code == 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei successful\n"); + if (sei->response.code != 0x0001) { + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); - /* Check if we might have lost some information. */ - if (sei->u.nt0_area.flags & 0x40) { - CIO_CRW_EVENT(2, "chsc: event overflow\n"); - css_schedule_eval_all(); + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; } + break; + } - switch (sei->nt) { - case 0: - chsc_process_sei_nt0(&sei->u.nt0_area); - break; - case 2: - chsc_process_sei_nt2(&sei->u.nt2_area); - break; - default: - CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n", - sei->nt); - break; - } - } else { - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", - sei->response.code); + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } - } while (sei->u.nt0_area.flags & 0x80); - return 0; + if (!(sei->u.nt0_area.flags & 0x80)) + break; + } } +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - struct chsc_sei *sei; + struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); @@ -531,14 +602,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); - if (!sei_page) - return; - /* Access to sei_page is serialized through machine check handler - * thread, so no need for locking. */ - sei = sei_page; CIO_TRACE_EVENT(2, "prcss"); - __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); } void chsc_chp_online(struct chp_id chpid) @@ -556,6 +622,7 @@ void chsc_chp_online(struct chp_id chpid) css_wait_for_slow_path(); for_each_subchannel_staged(__s390_process_res_acc, NULL, &link); + css_schedule_reprobe(); } } @@ -590,19 +657,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) return 0; } -static int -__s390_vary_chpid_on(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - /** * chsc_chp_vary - propagate channel-path vary operation to subchannels * @chpid: channl-path ID @@ -618,10 +672,11 @@ int chsc_chp_vary(struct chp_id chpid, int on) * Redo PathVerification on the devices the chpid connects to */ if (on) { - /* Try to update the channel path descritor. */ - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + /* Try to update the channel path description. */ + chp_update_desc(chp); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &chpid); + NULL, &chpid); + css_schedule_reprobe(); } else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, NULL, &chpid); @@ -812,9 +867,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, { struct chsc_response_struct *chsc_resp; struct chsc_scpd *scpd_area; + unsigned long flags; int ret; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); scpd_area = chsc_page; ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); if (ret) @@ -822,7 +878,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, chsc_resp = (void *)&scpd_area->response; memcpy(desc, &chsc_resp->data, sizeof(*desc)); out: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -1160,3 +1216,35 @@ out: return ret; } EXPORT_SYMBOL_GPL(chsc_scm_info); + +/** + * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info. + * @schid: id of the subchannel on which PNSO is performed + * @brinfo_area: request and response block for the operation + * @resume_token: resume token for multiblock response + * @cnc: Boolean change-notification control + * + * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * + * Returns 0 on success. + */ +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc) +{ + memset(brinfo_area, 0, sizeof(*brinfo_area)); + brinfo_area->request.length = 0x0030; + brinfo_area->request.code = 0x003d; /* network-subchannel operation */ + brinfo_area->m = schid.m; + brinfo_area->ssid = schid.ssid; + brinfo_area->sch = schid.sch_no; + brinfo_area->cssid = schid.cssid; + brinfo_area->oc = 0; /* Store-network-bridging-information list */ + brinfo_area->resume_token = resume_token; + brinfo_area->n = (cnc != 0); + if (chsc(brinfo_area)) + return -EIO; + return chsc_error_from_response(brinfo_area->response.code); +} +EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 662dab4b93e..76c9b50700b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -7,14 +7,10 @@ #include <asm/chpid.h> #include <asm/chsc.h> #include <asm/schid.h> +#include <asm/qdio.h> #define CHSC_SDA_OC_MSS 0x2 -struct chsc_header { - u16 length; - u16 code; -} __attribute__ ((packed)); - #define NR_MEASUREMENT_CHARS 5 struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; @@ -25,17 +21,6 @@ struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; } __attribute__ ((packed)); -struct channel_path_desc { - u8 flags; - u8 lsn; - u8 desc; - u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; - u8 chpp; -} __attribute__ ((packed)); - struct channel_path_desc_fmt1 { u8 flags; u8 lsn; @@ -43,7 +28,9 @@ struct channel_path_desc_fmt1 { u8 chpid; u32:24; u8 chpp; - u32 unused[3]; + u32 unused[2]; + u16 chid; + u32:16; u16 mdc; u16:13; u8 r:1; @@ -63,7 +50,9 @@ struct css_chsc_char { u32 : 20; u32 scssc : 1; /* bit 107 */ u32 scsscf : 1; /* bit 108 */ - u32 : 19; + u32:7; + u32 pnso:1; /* bit 116 */ + u32:11; }__attribute__((packed)); extern struct css_chsc_char css_chsc_characteristics; @@ -75,6 +64,40 @@ struct chsc_ssd_info { u16 fla[8]; }; +struct chsc_ssqd_area { + struct chsc_header request; + u16:10; + u8 ssid:2; + u8 fmt:4; + u16 first_sch; + u16:16; + u16 last_sch; + u32:32; + struct chsc_header response; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __packed; + +struct chsc_scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __packed; + struct chsc_scpd { struct chsc_header request; u32:2; @@ -114,7 +137,9 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); - +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr); int chsc_error_from_response(int response); int chsc_siosl(struct subchannel_id schid); @@ -154,10 +179,59 @@ struct chsc_scm_info { int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); +struct chsc_brinfo_resume_token { + u64 t1; + u64 t2; +} __packed; + +struct chsc_brinfo_naihdr { + struct chsc_brinfo_resume_token resume_token; + u32:32; + u32 instance; + u32:24; + u8 naids; + u32 reserved[3]; +} __packed; + +struct chsc_pnso_area { + struct chsc_header request; + u8:2; + u8 m:1; + u8:5; + u8:2; + u8 ssid:2; + u8 fmt:4; + u16 sch; + u8:8; + u8 cssid; + u16:16; + u8 oc; + u32:24; + struct chsc_brinfo_resume_token resume_token; + u32 n:1; + u32:31; + u32 reserved[3]; + struct chsc_header response; + u32:32; + struct chsc_brinfo_naihdr naihdr; + union { + struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0]; + struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; + struct qdio_brinfo_entry_l2 l2[0]; + } entries; +} __packed; + +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc); + #ifdef CONFIG_SCM_BUS int scm_update_information(void); +int scm_process_availability_information(void); #else /* CONFIG_SCM_BUS */ -#define scm_update_information() 0 +static inline int scm_update_information(void) { return 0; } +static inline int scm_process_availability_information(void) { return 0; } #endif /* CONFIG_SCM_BUS */ diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index facdf809113..3d22d2a4ce1 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -29,6 +29,10 @@ static debug_info_t *chsc_debug_msg_id; static debug_info_t *chsc_debug_log_id; +static struct chsc_request *on_close_request; +static struct chsc_async_area *on_close_chsc_area; +static DEFINE_MUTEX(on_close_mutex); + #define CHSC_MSG(imp, args...) do { \ debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ } while (0) @@ -54,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) { struct chsc_private *private = dev_get_drvdata(&sch->dev); struct chsc_request *request = private->request; - struct irb *irb = (struct irb *)&S390_lowcore.irb; + struct irb *irb = &__get_cpu_var(cio_irb); CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); @@ -169,8 +173,7 @@ static struct css_driver chsc_subchannel_driver = { static int __init chsc_init_dbfs(void) { - chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, - 16 * sizeof(long)); + chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); if (!chsc_debug_msg_id) goto out; debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); @@ -258,7 +261,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, CHSC_LOG(2, "schid"); CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); cc = chsc(chsc_area); - sprintf(dbf, "cc:%d", cc); + snprintf(dbf, sizeof(dbf), "cc:%d", cc); CHSC_LOG(2, dbf); switch (cc) { case 0: @@ -287,11 +290,11 @@ static int chsc_async(struct chsc_async_area *chsc_area, return ret; } -static void chsc_log_command(struct chsc_async_area *chsc_area) +static void chsc_log_command(void *chsc_area) { char dbf[10]; - sprintf(dbf, "CHSC:%x", chsc_area->header.code); + snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]); CHSC_LOG(0, dbf); CHSC_LOG_HEX(0, chsc_area, 32); } @@ -355,13 +358,106 @@ static int chsc_ioctl_start(void __user *user_area) if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) ret = -EFAULT; out_free: - sprintf(dbf, "ret:%d", ret); + snprintf(dbf, sizeof(dbf), "ret:%d", ret); CHSC_LOG(0, dbf); kfree(request); free_page((unsigned long)chsc_area); return ret; } +static int chsc_ioctl_on_close_set(void __user *user_area) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (on_close_chsc_area) { + ret = -EBUSY; + goto out_unlock; + } + on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL); + if (!on_close_request) { + ret = -ENOMEM; + goto out_unlock; + } + on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!on_close_chsc_area) { + ret = -ENOMEM; + goto out_free_request; + } + if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free_chsc; + } + ret = 0; + goto out_unlock; + +out_free_chsc: + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; +out_free_request: + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocsret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_on_close_remove(void) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) { + ret = -ENOENT; + goto out_unlock; + } + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; + ret = 0; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocrret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_start_sync(void __user *user_area) +{ + struct chsc_sync_area *chsc_area; + int ret, ccode; + + chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!chsc_area) + return -ENOMEM; + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + if (chsc_area->header.code & 0x4000) { + ret = -EINVAL; + goto out_free; + } + chsc_log_command(chsc_area); + ccode = chsc(chsc_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; + else + ret = 0; +out_free: + free_page((unsigned long)chsc_area); + return ret; +} + static int chsc_ioctl_info_channel_path(void __user *user_cd) { struct chsc_chp_cd *cd; @@ -795,6 +891,8 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd, switch (cmd) { case CHSC_START: return chsc_ioctl_start(argp); + case CHSC_START_SYNC: + return chsc_ioctl_start_sync(argp); case CHSC_INFO_CHANNEL_PATH: return chsc_ioctl_info_channel_path(argp); case CHSC_INFO_CU: @@ -809,14 +907,60 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd, return chsc_ioctl_chpd(argp); case CHSC_INFO_DCAL: return chsc_ioctl_dcal(argp); + case CHSC_ON_CLOSE_SET: + return chsc_ioctl_on_close_set(argp); + case CHSC_ON_CLOSE_REMOVE: + return chsc_ioctl_on_close_remove(); default: /* unknown ioctl number */ return -ENOIOCTLCMD; } } +static atomic_t chsc_ready_for_use = ATOMIC_INIT(1); + +static int chsc_open(struct inode *inode, struct file *file) +{ + if (!atomic_dec_and_test(&chsc_ready_for_use)) { + atomic_inc(&chsc_ready_for_use); + return -EBUSY; + } + return nonseekable_open(inode, file); +} + +static int chsc_release(struct inode *inode, struct file *filp) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) + goto out_unlock; + init_completion(&on_close_request->completion); + CHSC_LOG(0, "on_close"); + chsc_log_command(on_close_chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(on_close_chsc_area, on_close_request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&on_close_request->completion); + ret = chsc_examine_irb(on_close_request); + } + snprintf(dbf, sizeof(dbf), "relret:%d", ret); + CHSC_LOG(0, dbf); + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + atomic_inc(&chsc_ready_for_use); + return 0; +} + static const struct file_operations chsc_fops = { .owner = THIS_MODULE, - .open = nonseekable_open, + .open = chsc_open, + .release = chsc_release, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, .llseek = no_llseek, diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index c8faf6230b0..2905d8b0ec9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -18,6 +18,7 @@ #include <linux/device.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <asm/cio.h> #include <asm/delay.h> #include <asm/irq.h> @@ -28,7 +29,7 @@ #include <asm/chpid.h> #include <asm/airq.h> #include <asm/isc.h> -#include <asm/cputime.h> +#include <linux/cputime.h> #include <asm/fcx.h> #include <asm/nmi.h> #include <asm/crw.h> @@ -45,6 +46,9 @@ debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; debug_info_t *cio_debug_crw_id; +DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb); +EXPORT_PER_CPU_SYMBOL(cio_irb); + /* * Function: cio_debug_init * Initializes three debug logs for common I/O: @@ -54,7 +58,7 @@ debug_info_t *cio_debug_crw_id; */ static int __init cio_debug_init(void) { - cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); + cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long)); if (!cio_debug_msg_id) goto out_unregister; debug_register_view(cio_debug_msg_id, &debug_sprintf_view); @@ -64,7 +68,7 @@ static int __init cio_debug_init(void) goto out_unregister; debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); debug_set_level(cio_debug_trace_id, 2); - cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); + cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long)); if (!cio_debug_crw_id) goto out_unregister; debug_register_view(cio_debug_crw_id, &debug_sprintf_view); @@ -342,8 +346,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib) */ int cio_commit_config(struct subchannel *sch) { - struct schib schib; int ccode, retry, ret = 0; + struct schib schib; + struct irb irb; if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; @@ -367,7 +372,10 @@ int cio_commit_config(struct subchannel *sch) ret = -EAGAIN; break; case 1: /* status pending */ - return -EBUSY; + ret = -EBUSY; + if (tsch(sch->schid, &irb)) + return ret; + break; case 2: /* busy */ udelay(100); /* allow for recovery */ ret = -EBUSY; @@ -403,7 +411,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib); */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { - int retry; int ret; CIO_TRACE_EVENT(2, "ensch"); @@ -418,20 +425,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) sch->config.isc = sch->isc; sch->config.intparm = intparm; - for (retry = 0; retry < 3; retry++) { + ret = cio_commit_config(sch); + if (ret == -EIO) { + /* + * Got a program check in msch. Try without + * the concurrent sense bit the next time. + */ + sch->config.csense = 0; ret = cio_commit_config(sch); - if (ret == -EIO) { - /* - * Got a program check in msch. Try without - * the concurrent sense bit the next time. - */ - sch->config.csense = 0; - } else if (ret == -EBUSY) { - struct irb irb; - if (tsch(sch->schid, &irb) != 0) - break; - } else - break; } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; @@ -444,7 +445,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); */ int cio_disable_subchannel(struct subchannel *sch) { - int retry; int ret; CIO_TRACE_EVENT(2, "dissch"); @@ -456,30 +456,13 @@ int cio_disable_subchannel(struct subchannel *sch) return -ENODEV; sch->config.ena = 0; + ret = cio_commit_config(sch); - for (retry = 0; retry < 3; retry++) { - ret = cio_commit_config(sch); - if (ret == -EBUSY) { - struct irb irb; - if (tsch(sch->schid, &irb) != 0) - break; - } else - break; - } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; } EXPORT_SYMBOL_GPL(cio_disable_subchannel); -int cio_create_sch_lock(struct subchannel *sch) -{ - sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!sch->lock) - return -ENOMEM; - spin_lock_init(sch->lock); - return 0; -} - static int cio_check_devno_blacklisted(struct subchannel *sch) { if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { @@ -536,32 +519,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) sprintf(dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT(4, dbf_txt); - /* Nuke all fields. */ - memset(sch, 0, sizeof(struct subchannel)); - - sch->schid = schid; - if (cio_is_console(schid)) { - sch->lock = cio_get_console_lock(); - } else { - err = cio_create_sch_lock(sch); - if (err) - goto out; - } - mutex_init(&sch->reg_mutex); - /* * The first subchannel that is not-operational (ccode==3) - * indicates that there aren't any more devices available. + * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set - * is not valid. + * is not valid. */ - ccode = stsch_err (schid, &sch->schib); + ccode = stsch_err(schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } - /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; + sch->schid = schid; switch (sch->st) { case SUBCHANNEL_TYPE_IO: @@ -578,92 +548,72 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); - return 0; out: - if (!cio_is_console(schid)) - kfree(sch->lock); - sch->lock = NULL; return err; } /* - * do_IRQ() handles all normal I/O device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - * + * do_cio_interrupt() handles all normal I/O device IRQ's */ -void __irq_entry do_IRQ(struct pt_regs *regs) +static irqreturn_t do_cio_interrupt(int irq, void *dummy) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; - struct pt_regs *old_regs; - old_regs = set_irq_regs(regs); - irq_enter(); __this_cpu_write(s390_idle.nohz_delay, 1); - if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) - /* Serve timer interrupts first. */ - clock_comparator_work(); - /* - * Get interrupt information from lowcore - */ - tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; - irb = (struct irb *)&S390_lowcore.irb; - do { - kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); - if (tpi_info->adapter_IO) { - do_adapter_IO(tpi_info->isc); - continue; - } - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (!sch) { - /* Clear pending interrupt condition. */ - inc_irq_stat(IRQIO_CIO); - tsch(tpi_info->schid, irb); - continue; - } - spin_lock(sch->lock); - /* Store interrupt response block to lowcore. */ - if (tsch(tpi_info->schid, irb) == 0) { - /* Keep subchannel information word up to date. */ - memcpy (&sch->schib.scsw, &irb->scsw, - sizeof (irb->scsw)); - /* Call interrupt handler if there is one. */ - if (sch->driver && sch->driver->irq) - sch->driver->irq(sch); - else - inc_irq_stat(IRQIO_CIO); - } else + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + irb = &__get_cpu_var(cio_irb); + sch = (struct subchannel *)(unsigned long) tpi_info->intparm; + if (!sch) { + /* Clear pending interrupt condition. */ + inc_irq_stat(IRQIO_CIO); + tsch(tpi_info->schid, irb); + return IRQ_HANDLED; + } + spin_lock(sch->lock); + /* Store interrupt response block to lowcore. */ + if (tsch(tpi_info->schid, irb) == 0) { + /* Keep subchannel information word up to date. */ + memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); + /* Call interrupt handler if there is one. */ + if (sch->driver && sch->driver->irq) + sch->driver->irq(sch); + else inc_irq_stat(IRQIO_CIO); - spin_unlock(sch->lock); - /* - * Are more interrupts pending? - * If so, the tpi instruction will update the lowcore - * to hold the info for the next interrupt. - * We don't do this for VM because a tpi drops the cpu - * out of the sie which costs more cycles than it saves. - */ - } while (MACHINE_IS_LPAR && tpi(NULL) != 0); - irq_exit(); - set_irq_regs(old_regs); + } else + inc_irq_stat(IRQIO_CIO); + spin_unlock(sch->lock); + + return IRQ_HANDLED; +} + +static struct irqaction io_interrupt = { + .name = "IO", + .handler = do_cio_interrupt, +}; + +void __init init_cio_interrupts(void) +{ + irq_set_chip_and_handler(IO_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(IO_INTERRUPT, &io_interrupt); } #ifdef CONFIG_CCW_CONSOLE -static struct subchannel console_subchannel; -static struct io_subchannel_private console_priv; -static int console_subchannel_in_use; +static struct subchannel *console_sch; +static struct lock_class_key console_sch_key; /* * Use cio_tsch to update the subchannel status and call the interrupt handler - * if status had been pending. Called with the console_subchannel lock. + * if status had been pending. Called with the subchannel's lock held. */ -static void cio_tsch(struct subchannel *sch) +void cio_tsch(struct subchannel *sch) { struct irb *irb; int irq_context; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); /* Store interrupt response block to lowcore. */ if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ @@ -675,6 +625,7 @@ static void cio_tsch(struct subchannel *sch) local_bh_disable(); irq_enter(); } + kstat_incr_irq_this_cpu(IO_INTERRUPT); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else @@ -685,135 +636,91 @@ static void cio_tsch(struct subchannel *sch) } } -void *cio_get_console_priv(void) -{ - return &console_priv; -} - -/* - * busy wait for the next interrupt on the console - */ -void wait_cons_dev(void) +static int cio_test_for_console(struct subchannel_id schid, void *data) { - if (!console_subchannel_in_use) - return; - - while (1) { - cio_tsch(&console_subchannel); - if (console_subchannel.schib.scsw.cmd.actl == 0) - break; - udelay_simple(100); - } -} + struct schib schib; -static int -cio_test_for_console(struct subchannel_id schid, void *data) -{ - if (stsch_err(schid, &console_subchannel.schib) != 0) + if (stsch_err(schid, &schib) != 0) return -ENXIO; - if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && - console_subchannel.schib.pmcw.dnv && - (console_subchannel.schib.pmcw.dev == console_devno)) { + if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && + (schib.pmcw.dev == console_devno)) { console_irq = schid.sch_no; return 1; /* found */ } return 0; } - -static int -cio_get_console_sch_no(void) +static int cio_get_console_sch_no(void) { struct subchannel_id schid; - + struct schib schib; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; - if (stsch_err(schid, &console_subchannel.schib) != 0 || - (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || - !console_subchannel.schib.pmcw.dnv) + if (stsch_err(schid, &schib) != 0 || + (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1; - console_devno = console_subchannel.schib.pmcw.dev; + console_devno = schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); - if (console_irq == -1) - return -1; - } else { - /* unlike in 2.4, we cannot autoprobe here, since - * the channel subsystem is not fully initialized. - * With some luck, the HWC console can take over */ - return -1; } return console_irq; } -struct subchannel * -cio_probe_console(void) +struct subchannel *cio_probe_console(void) { - int sch_no, ret; struct subchannel_id schid; + struct subchannel *sch; + int sch_no, ret; - if (xchg(&console_subchannel_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch_no = cio_get_console_sch_no(); if (sch_no == -1) { - console_subchannel_in_use = 0; pr_warning("No CCW console was found\n"); return ERR_PTR(-ENODEV); } - memset(&console_subchannel, 0, sizeof(struct subchannel)); init_subchannel_id(&schid); schid.sch_no = sch_no; - ret = cio_validate_subchannel(&console_subchannel, schid); - if (ret) { - console_subchannel_in_use = 0; - return ERR_PTR(-ENODEV); - } + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return sch; - /* - * enable console I/O-interrupt subclass - */ + lockdep_set_class(sch->lock, &console_sch_key); isc_register(CONSOLE_ISC); - console_subchannel.config.isc = CONSOLE_ISC; - console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; - ret = cio_commit_config(&console_subchannel); + sch->config.isc = CONSOLE_ISC; + sch->config.intparm = (u32)(addr_t)sch; + ret = cio_commit_config(sch); if (ret) { isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; + put_device(&sch->dev); return ERR_PTR(ret); } - return &console_subchannel; + console_sch = sch; + return sch; } -void -cio_release_console(void) -{ - console_subchannel.config.intparm = 0; - cio_commit_config(&console_subchannel); - isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; -} - -/* Bah... hack to catch console special sausages. */ -int -cio_is_console(struct subchannel_id schid) +int cio_is_console(struct subchannel_id schid) { - if (!console_subchannel_in_use) + if (!console_sch) return 0; - return schid_equal(&schid, &console_subchannel.schid); + return schid_equal(&schid, &console_sch->schid); } -struct subchannel * -cio_get_console_subchannel(void) +void cio_register_early_subchannels(void) { - if (!console_subchannel_in_use) - return NULL; - return &console_subchannel; + int ret; + + if (!console_sch) + return; + + ret = css_register_subchannel(console_sch); + if (ret) + put_device(&console_sch->dev); } +#endif /* CONFIG_CCW_CONSOLE */ -#endif static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { @@ -844,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.schid, (struct irb *)&S390_lowcore.irb); + tsch(ti.schid, &__get_cpu_var(cio_irb)); if (schid_equal(&ti.schid, &schid)) return 0; } @@ -962,9 +869,9 @@ static void css_reset(void) atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ - timeout = get_clock() + (RCHP_TIMEOUT << 12); + timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { - if (get_clock() > timeout) + if (get_tod_clock_fast() > timeout) break; cpu_relax(); } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 4a1ff5c2eb8..a01376ae174 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -102,6 +102,8 @@ struct subchannel { struct schib_config config; } __attribute__ ((aligned(8))); +DECLARE_PER_CPU(struct irb, cio_irb); + #define to_subchannel(n) container_of(n, struct subchannel, dev) extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); @@ -121,23 +123,15 @@ extern int cio_commit_config(struct subchannel *sch); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_intrg(struct subchannel *sch); -int cio_create_sch_lock(struct subchannel *); -void do_adapter_IO(u8 isc); -void do_IRQ(struct pt_regs *); - /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); -extern void cio_release_console(void); extern int cio_is_console(struct subchannel_id); -extern struct subchannel *cio_get_console_subchannel(void); -extern spinlock_t * cio_get_console_lock(void); -extern void *cio_get_console_priv(void); +extern void cio_register_early_subchannels(void); +extern void cio_tsch(struct subchannel *sch); #else #define cio_is_console(schid) 0 -#define cio_get_console_subchannel() NULL -#define cio_get_console_lock() NULL -#define cio_get_console_priv() NULL +static inline void cio_register_early_subchannels(void) {} #endif #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index c9fc61c0a86..23054f8fa9f 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -33,7 +33,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> -#include <linux/timex.h> /* get_clock() */ +#include <linux/timex.h> /* get_tod_clock() */ #include <asm/ccwdev.h> #include <asm/cio.h> @@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev) memcpy(cmb_data->last_block, hw_block, cmb_data->size); memcpy(reference_buf, hw_block, cmb_data->size); } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); - cmb_data->last_update = get_clock(); + cmb_data->last_update = get_tod_clock(); kfree(reference_buf); return 0; } @@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev) memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); cmb_data->last_update = 0; } - cdev->private->cmb_start_time = get_clock(); + cdev->private->cmb_start_time = get_tod_clock(); spin_unlock_irq(cdev->ccwlock); } @@ -1182,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev, int ret; unsigned long val; - ret = strict_strtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &val); if (ret) return ret; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fd00afd8b85..0268e5fd59b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data) struct cb_data *cb = data; int rc = 0; - idset_sch_del(cb->set, sch->schid); + if (cb->set) + idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; @@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; + if (fn_known && !fn_unknown) { + /* Skip idset allocation in case of known-only loop. */ + cb.set = NULL; + return bus_for_each_dev(&css_bus_type, NULL, &cb, + call_fn_known_sch); + } + cb.set = idset_sch_new(); if (!cb.set) /* fall back to brute force scanning in case of oom */ @@ -137,37 +145,53 @@ out: static void css_sch_todo(struct work_struct *work); -static struct subchannel * -css_alloc_subchannel(struct subchannel_id schid) +static int css_sch_create_locks(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + + spin_lock_init(sch->lock); + mutex_init(&sch->reg_mutex); + + return 0; +} + +static void css_subchannel_release(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + + sch->config.intparm = 0; + cio_commit_config(sch); + kfree(sch->lock); + kfree(sch); +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; - sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); - if (sch == NULL) + sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); + if (!sch) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, schid); - if (ret < 0) { - kfree(sch); - return ERR_PTR(ret); - } + + ret = cio_validate_subchannel(sch, schid); + if (ret < 0) + goto err; + + ret = css_sch_create_locks(sch); + if (ret) + goto err; + INIT_WORK(&sch->todo_work, css_sch_todo); + sch->dev.release = &css_subchannel_release; + device_initialize(&sch->dev); return sch; -} -static void -css_subchannel_release(struct device *dev) -{ - struct subchannel *sch; - - sch = to_subchannel(dev); - if (!cio_is_console(sch->schid)) { - /* Reset intparm to zeroes. */ - sch->config.intparm = 0; - cio_commit_config(sch); - kfree(sch->lock); - kfree(sch); - } +err: + kfree(sch); + return ERR_PTR(ret); } static int css_sch_device_register(struct subchannel *sch) @@ -177,7 +201,7 @@ static int css_sch_device_register(struct subchannel *sch) mutex_lock(&sch->reg_mutex); dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, sch->schid.sch_no); - ret = device_register(&sch->dev); + ret = device_add(&sch->dev); mutex_unlock(&sch->reg_mutex); return ret; } @@ -228,16 +252,11 @@ void css_update_ssd_info(struct subchannel *sch) { int ret; - if (cio_is_console(sch->schid)) { - /* Console is initialized too early for functions requiring - * memory allocation. */ + ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); + if (ret) ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - } else { - ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); - if (ret) - ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - ssd_register_chpids(&sch->ssd_info); - } + + ssd_register_chpids(&sch->ssd_info); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, @@ -275,14 +294,13 @@ static const struct attribute_group *default_subch_attr_groups[] = { NULL, }; -static int css_register_subchannel(struct subchannel *sch) +int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; - sch->dev.release = &css_subchannel_release; sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't @@ -314,23 +332,19 @@ static int css_register_subchannel(struct subchannel *sch) return ret; } -int css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid) { - int ret; struct subchannel *sch; + int ret; + + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return PTR_ERR(sch); - if (cio_is_console(schid)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - return PTR_ERR(sch); - } ret = css_register_subchannel(sch); - if (ret) { - if (!cio_is_console(schid)) - put_device(&sch->dev); - } + if (ret) + put_device(&sch->dev); + return ret; } @@ -540,11 +554,16 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) case -ENOMEM: case -EIO: /* These should abort looping */ + spin_lock_irq(&slow_subchannel_lock); idset_sch_del_subseq(slow_subchannel_set, schid); + spin_unlock_irq(&slow_subchannel_lock); break; default: rc = 0; } + /* Allow scheduling here since the containing loop might + * take a while. */ + cond_resched(); } return rc; } @@ -564,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused) spin_unlock_irqrestore(&slow_subchannel_lock, flags); } -static DECLARE_WORK(slow_path_work, css_slow_path_func); +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) @@ -574,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -585,7 +604,7 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -598,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data) return 0; } -static void css_schedule_eval_all_unreg(void) +void css_schedule_eval_all_unreg(unsigned long delay) { unsigned long flags; struct idset *unreg_set; @@ -616,7 +635,7 @@ static void css_schedule_eval_all_unreg(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, delay); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } @@ -629,7 +648,8 @@ void css_wait_for_slow_path(void) /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { - css_schedule_eval_all_unreg(); + /* Schedule with a delay to allow merging of subsequent calls. */ + css_schedule_eval_all_unreg(1 * HZ); } EXPORT_SYMBOL_GPL(css_schedule_reprobe); @@ -734,7 +754,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, int ret; unsigned long val; - ret = strict_strtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &val); if (ret) return ret; mutex_lock(&css->mutex); @@ -770,7 +790,7 @@ static int __init setup_css(int nr) css->pseudo_subchannel->dev.release = css_subchannel_release; dev_set_name(&css->pseudo_subchannel->dev, "defunct"); mutex_init(&css->pseudo_subchannel->reg_mutex); - ret = cio_create_sch_lock(css->pseudo_subchannel); + ret = css_sch_create_locks(css->pseudo_subchannel); if (ret) { kfree(css->pseudo_subchannel); return ret; @@ -780,7 +800,7 @@ static int __init setup_css(int nr) css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; - tod_high = (u32) (get_clock() >> 32); + tod_high = (u32) (get_tod_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } @@ -870,8 +890,7 @@ static struct notifier_block css_power_notifier = { /* * Now that the driver core is running, we can setup our channel subsystem. - * The struct subchannel's are created during probing (except for the - * static console subchannel). + * The struct subchannel's are created during probing. */ static int __init css_bus_init(void) { @@ -1050,6 +1069,8 @@ int css_complete_work(void) */ static int __init channel_subsystem_init_sync(void) { + /* Register subchannels which are already in use. */ + cio_register_early_subchannels(); /* Start initial subchannel evaluation. */ css_schedule_eval_all(); css_complete_work(); @@ -1065,9 +1086,8 @@ void channel_subsystem_reinit(void) chsc_enable_facility(CHSC_SDA_OC_MSS); chp_id_for_each(&chpid) { chp = chpid_to_chp(chpid); - if (!chp) - continue; - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + if (chp) + chp_update_desc(chp); } } diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 4af3dfe70ef..2c9107e2025 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); -extern int css_probe_device(struct subchannel_id); +extern int css_register_subchannel(struct subchannel *); +extern struct subchannel *css_alloc_subchannel(struct subchannel_id); extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int max_ssid; @@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); struct channel_subsystem { @@ -130,11 +130,10 @@ struct channel_subsystem { extern struct channel_subsystem *channel_subsystems[]; -void channel_subsystem_reinit(void); - /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); +void css_schedule_eval_all_unreg(unsigned long delay); int css_complete_work(void); int sch_is_pseudo_sch(struct subchannel *); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7cd5c6812ac..dfef5e63cb7 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -19,6 +19,7 @@ #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> +#include <linux/delay.h> #include <linux/timer.h> #include <linux/kernel_stat.h> @@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock); static int recovery_phase; static const unsigned long recovery_delay[] = { 3, 30, 300 }; +static atomic_t ccw_device_init_count = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); +static struct bus_type ccw_bus_type; + /******************* bus type handling ***********************/ /* The Linux driver model distinguishes between a bus type and @@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) return ret; } -static struct bus_type ccw_bus_type; - static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); @@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); -wait_queue_head_t ccw_device_init_wq; -atomic_t ccw_device_init_count; static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, @@ -191,10 +192,7 @@ int __init io_subchannel_init(void) { int ret; - init_waitqueue_head(&ccw_device_init_wq); - atomic_set(&ccw_device_init_count, 0); setup_timer(&recovery_timer, recovery_func, 0); - ret = bus_register(&ccw_bus_type); if (ret) return ret; @@ -335,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev) if (ret != 0) return ret; } - cdev->online = 0; spin_lock_irq(cdev->ccwlock); sch = to_subchannel(cdev->dev.parent); + cdev->online = 0; /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { @@ -448,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev) ret = cdev->drv->set_online(cdev); if (ret) goto rollback; + + spin_lock_irq(cdev->ccwlock); cdev->online = 1; + spin_unlock_irq(cdev->ccwlock); return 0; rollback: @@ -548,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, if (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { ret = -EAGAIN; - goto out_onoff; + goto out; } /* Prevent conflict between pending work and on-/offline processing.*/ if (work_pending(&cdev->private->todo_work)) { ret = -EAGAIN; - goto out_onoff; - } - - if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { - ret = -EINVAL; - goto out_onoff; + goto out; } if (!strncmp(buf, "force\n", count)) { force = 1; @@ -566,10 +562,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, ret = 0; } else { force = 0; - ret = strict_strtoul(buf, 16, &i); + ret = kstrtoul(buf, 16, &i); } if (ret) goto out; + + device_lock(dev); switch (i) { case 0: ret = online_store_handle_offline(cdev); @@ -580,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, default: ret = -EINVAL; } + device_unlock(dev); + out: - if (cdev->drv) - module_put(cdev->drv->driver.owner); -out_onoff: atomic_set(&cdev->private->onoff, 0); return (ret < 0) ? ret : count; } @@ -632,6 +629,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr, return count; } +static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%02x\n", sch->vpm); +} + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); @@ -640,11 +645,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); +static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, &dev_attr_logging.attr, + &dev_attr_vpm.attr, NULL, }; @@ -671,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = { NULL, }; -/* this is a simple abstraction for device_register that sets the - * correct bus type and adds the bus specific files */ -static int ccw_device_register(struct ccw_device *cdev) +static int ccw_device_add(struct ccw_device *cdev) { struct device *dev = &cdev->dev; - int ret; dev->bus = &ccw_bus_type; - ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, - cdev->private->dev_id.devno); - if (ret) - return ret; return device_add(dev); } @@ -757,22 +757,46 @@ static void ccw_device_todo(struct work_struct *work); static int io_subchannel_initialize_dev(struct subchannel *sch, struct ccw_device *cdev) { - cdev->private->cdev = cdev; - cdev->private->int_class = IRQIO_CIO; - atomic_set(&cdev->private->onoff, 0); + struct ccw_device_private *priv = cdev->private; + int ret; + + priv->cdev = cdev; + priv->int_class = IRQIO_CIO; + priv->state = DEV_STATE_NOT_OPER; + priv->dev_id.devno = sch->schib.pmcw.dev; + priv->dev_id.ssid = sch->schid.ssid; + priv->schid = sch->schid; + + INIT_WORK(&priv->todo_work, ccw_device_todo); + INIT_LIST_HEAD(&priv->cmb_list); + init_waitqueue_head(&priv->wait_q); + init_timer(&priv->timer); + + atomic_set(&priv->onoff, 0); + cdev->ccwlock = sch->lock; cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; - INIT_WORK(&cdev->private->todo_work, ccw_device_todo); cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); + ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + if (ret) + goto out_put; if (!get_device(&sch->dev)) { - /* Release reference from device_initialize(). */ - put_device(&cdev->dev); - return -ENODEV; + ret = -ENODEV; + goto out_put; } - cdev->private->flags.initialized = 1; + priv->flags.initialized = 1; + spin_lock_irq(sch->lock); + sch_set_cdev(sch, cdev); + spin_unlock_irq(sch->lock); return 0; + +out_put: + /* Release reference from device_initialize(). */ + put_device(&cdev->dev); + return ret; } static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) @@ -851,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev) dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ - ret = ccw_device_register(cdev); + ret = ccw_device_add(cdev); if (ret) { CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, @@ -916,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev) static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) { - struct ccw_device_private *priv; - - cdev->ccwlock = sch->lock; - - /* Init private data. */ - priv = cdev->private; - priv->dev_id.devno = sch->schib.pmcw.dev; - priv->dev_id.ssid = sch->schid.ssid; - priv->schid = sch->schid; - priv->state = DEV_STATE_NOT_OPER; - INIT_LIST_HEAD(&priv->cmb_list); - init_waitqueue_head(&priv->wait_q); - init_timer(&priv->timer); - /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ spin_lock_irq(sch->lock); - sch_set_cdev(sch, cdev); ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); } @@ -1076,19 +1085,14 @@ static int io_subchannel_probe(struct subchannel *sch) dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev = sch_get_cdev(sch); - cdev->dev.groups = ccwdev_attr_groups; - device_initialize(&cdev->dev); - cdev->private->flags.initialized = 1; - ccw_device_register(cdev); - /* - * Check if the device is already online. If it is - * the reference count needs to be corrected since we - * didn't obtain a reference in ccw_device_set_online. - */ - if (cdev->private->state != DEV_STATE_NOT_OPER && - cdev->private->state != DEV_STATE_OFFLINE && - cdev->private->state != DEV_STATE_BOXED) - get_device(&cdev->dev); + rc = ccw_device_add(cdev); + if (rc) { + /* Release online reference. */ + put_device(&cdev->dev); + goto out_schedule; + } + if (atomic_dec_and_test(&ccw_device_init_count)) + wake_up(&ccw_device_init_wq); return 0; } io_subchannel_init_fields(sch); @@ -1569,89 +1573,121 @@ out: return rc; } -#ifdef CONFIG_CCW_CONSOLE -static struct ccw_device console_cdev; -static struct ccw_device_private console_private; -static int console_cdev_in_use; - -static DEFINE_SPINLOCK(ccw_console_lock); - -spinlock_t * cio_get_console_lock(void) +static void ccw_device_set_int_class(struct ccw_device *cdev) { - return &ccw_console_lock; + struct ccw_driver *cdrv = cdev->drv; + + /* Note: we interpret class 0 in this context as an uninitialized + * field since it translates to a non-I/O interrupt class. */ + if (cdrv->int_class != 0) + cdev->private->int_class = cdrv->int_class; + else + cdev->private->int_class = IRQIO_CIO; } -static int ccw_device_console_enable(struct ccw_device *cdev, - struct subchannel *sch) +#ifdef CONFIG_CCW_CONSOLE +int __init ccw_device_enable_console(struct ccw_device *cdev) { - struct io_subchannel_private *io_priv = cio_get_console_priv(); + struct subchannel *sch = to_subchannel(cdev->dev.parent); int rc; - /* Attach subchannel private data. */ - memset(io_priv, 0, sizeof(*io_priv)); - set_io_private(sch, io_priv); + if (!cdev->drv || !cdev->handler) + return -EINVAL; + io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) return rc; sch->driver = &io_subchannel_driver; - /* Initialize the ccw_device structure. */ - cdev->dev.parent= &sch->dev; - sch_set_cdev(sch, cdev); io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */ spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - rc = -EIO; - if (cdev->private->state != DEV_STATE_OFFLINE) + ccw_device_wait_idle(cdev); + + /* Hold on to an extra reference while device is online. */ + get_device(&cdev->dev); + rc = ccw_device_online(cdev); + if (rc) goto out_unlock; - ccw_device_online(cdev); + while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - if (cdev->private->state != DEV_STATE_ONLINE) - goto out_unlock; - rc = 0; + ccw_device_wait_idle(cdev); + + if (cdev->private->state == DEV_STATE_ONLINE) + cdev->online = 1; + else + rc = -EIO; out_unlock: spin_unlock_irq(cdev->ccwlock); + if (rc) /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return rc; } -struct ccw_device * -ccw_device_probe_console(void) +struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) { + struct io_subchannel_private *io_priv; + struct ccw_device *cdev; struct subchannel *sch; - int ret; - if (xchg(&console_cdev_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch = cio_probe_console(); - if (IS_ERR(sch)) { - console_cdev_in_use = 0; - return (void *) sch; + if (IS_ERR(sch)) + return ERR_CAST(sch); + + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) { + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } - memset(&console_cdev, 0, sizeof(struct ccw_device)); - memset(&console_private, 0, sizeof(struct ccw_device_private)); - console_cdev.private = &console_private; - console_private.cdev = &console_cdev; - console_private.int_class = IRQIO_CIO; - ret = ccw_device_console_enable(&console_cdev, sch); - if (ret) { - cio_release_console(); - console_cdev_in_use = 0; - return ERR_PTR(ret); + set_io_private(sch, io_priv); + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + put_device(&sch->dev); + kfree(io_priv); + return cdev; + } + cdev->drv = drv; + ccw_device_set_int_class(cdev); + return cdev; +} + +void __init ccw_device_destroy_console(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct io_subchannel_private *io_priv = to_io_private(sch); + + set_io_private(sch, NULL); + put_device(&sch->dev); + put_device(&cdev->dev); + kfree(io_priv); +} + +/** + * ccw_device_wait_idle() - busy wait for device to become idle + * @cdev: ccw device + * + * Poll until activity control is zero, that is, no function or data + * transfer is pending/active. + * Called with device lock being held. + */ +void ccw_device_wait_idle(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + while (1) { + cio_tsch(sch); + if (sch->schib.scsw.cmd.actl == 0) + break; + udelay_simple(100); } - console_cdev.online = 1; - return &console_cdev; } static int ccw_device_pm_restore(struct device *dev); -int ccw_device_force_console(void) +int ccw_device_force_console(struct ccw_device *cdev) { - if (!console_cdev_in_use) - return -ENODEV; - return ccw_device_pm_restore(&console_cdev.dev); + return ccw_device_pm_restore(&cdev->dev); } EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif @@ -1710,15 +1746,8 @@ ccw_device_probe (struct device *dev) int ret; cdev->drv = cdrv; /* to let the driver call _set_online */ - /* Note: we interpret class 0 in this context as an uninitialized - * field since it translates to a non-I/O interrupt class. */ - if (cdrv->int_class != 0) - cdev->private->int_class = cdrv->int_class; - else - cdev->private->int_class = IRQIO_CIO; - + ccw_device_set_int_class(cdev); ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; - if (ret) { cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; @@ -1728,8 +1757,7 @@ ccw_device_probe (struct device *dev) return 0; } -static int -ccw_device_remove (struct device *dev) +static int ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; @@ -1737,9 +1765,10 @@ ccw_device_remove (struct device *dev) if (cdrv->remove) cdrv->remove(cdev); + + spin_lock_irq(cdev->ccwlock); if (cdev->online) { cdev->online = 0; - spin_lock_irq(cdev->ccwlock); ret = ccw_device_offline(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) @@ -1752,10 +1781,12 @@ ccw_device_remove (struct device *dev) cdev->private->dev_id.devno); /* Give up reference obtained in ccw_device_set_online(). */ put_device(&cdev->dev); + spin_lock_irq(cdev->ccwlock); } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; + spin_unlock_irq(cdev->ccwlock); return 0; } diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 7d4ecb65db0..8d1d2987317 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern wait_queue_head_t ccw_device_init_wq; -extern atomic_t ccw_device_init_count; int __init io_subchannel_init(void); void io_subchannel_recog_done(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1bb1d00095a..0bc902b3cd8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " - "device information:\n", get_clock()); + "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); @@ -739,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) struct irb *irb; int is_cmd; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ if (!scsw_is_solicited(&irb->scsw)) { @@ -805,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); /* Check for unsolicited interrupt. */ if (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index c77b6e06bf6..f3c417943da 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -563,14 +563,23 @@ out_unlock: return rc; } -void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) +/** + * chp_get_chp_desc - return newly allocated channel-path descriptor + * @cdev: device to obtain the descriptor for + * @chp_idx: index of the channel path + * + * On success return a newly allocated copy of the channel-path description + * data associated with the given channel path. Return %NULL on error. + */ +struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, + int chp_idx) { struct subchannel *sch; struct chp_id chpid; sch = to_subchannel(cdev->dev.parent); chp_id_init(&chpid); - chpid.id = sch->schib.pmcw.chpid[chp_no]; + chpid.id = sch->schib.pmcw.chpid[chp_idx]; return chp_get_chp_desc(chpid); } @@ -704,9 +713,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { struct subchannel *sch = to_subchannel(cdev->dev.parent); - struct channel_path_desc_fmt1 desc; + struct channel_path *chp; struct chp_id chpid; - int mdc = 0, ret, i; + int mdc = 0, i; /* Adjust requested path mask to excluded varied off paths. */ if (mask) @@ -719,14 +728,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) if (!(mask & (0x80 >> i))) continue; chpid.id = sch->schib.pmcw.chpid[i]; - ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc); - if (ret) - return ret; - if (!desc.f) + chp = chpid_to_chp(chpid); + if (!chp) + continue; + + mutex_lock(&chp->lock); + if (!chp->desc_fmt1.f) { + mutex_unlock(&chp->lock); return 0; - if (!desc.r) + } + if (!chp->desc_fmt1.r) mdc = 1; - mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc; + mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) : + chp->desc_fmt1.mdc; + mutex_unlock(&chp->lock); } return mdc; diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 908d287f66c..37ada05e82a 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -23,6 +23,8 @@ #define PGID_RETRIES 256 #define PGID_TIMEOUT (10 * HZ) +static void verify_start(struct ccw_device *cdev); + /* * Process path verification data and report result. */ @@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - /* Adjust lpm. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; nop_build_cp(cdev); @@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: sch->vpm |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; nop_do(cdev); return; @@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn) req->cp = cp; } +static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) +{ + if (rc) { + /* We don't know the path groups' state. Abort. */ + verify_done(cdev, rc); + return; + } + /* + * Path groups have been reset. Restart path verification but + * leave paths in path_noirq_mask out. + */ + cdev->private->flags.pgid_unknown = 0; + verify_start(cdev); +} + +/* + * Reset pathgroups and restart path verification, leave unusable paths out. + */ +static void pgid_wipeout_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", + id->ssid, id->devno, cdev->private->pgid_valid_mask, + cdev->private->path_noirq_mask); + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam; + req->callback = pgid_wipeout_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); +} + /* * Perform establish/resign SET PGID on a single path. */ @@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev) return; out_nopath: + if (cdev->private->flags.pgid_unknown) { + /* At least one SPID could be partially done. */ + pgid_wipeout_start(cdev); + return; + } verify_done(cdev, sch->vpm ? 0 : -EACCES); } -static void verify_start(struct ccw_device *cdev); - /* * Process SET PGID request result for a single path. */ @@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc) case 0: sch->vpm |= req->lpm & sch->opm; break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; case -EOPNOTSUPP: if (cdev->private->flags.mpath) { @@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc) else { donepm = pgid_to_donepm(cdev); sch->vpm = donepm & sch->opm; - cdev->private->pgid_todo_mask &= ~donepm; cdev->private->pgid_reset_mask |= reset; + cdev->private->pgid_todo_mask &= + ~(donepm | cdev->private->path_noirq_mask); pgid_fill(cdev, pgid); } out: @@ -341,6 +404,10 @@ out: cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { case 0: + if (cdev->private->flags.pgid_unknown) { + pgid_wipeout_start(cdev); + return; + } /* Anything left to do? */ if (cdev->private->pgid_todo_mask == 0) { verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); @@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; + int ret; - /* Adjust lpm if paths are not set in pam. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; snid_build_cp(cdev); @@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev) return; out_nopath: - snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); + if (cdev->private->pgid_valid_mask) + ret = 0; + else if (cdev->private->path_noirq_mask) + ret = -ETIME; + else + ret = -EACCES; + snid_done(cdev, ret); } /* @@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: cdev->private->pgid_valid_mask |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; snid_do(cdev); return; @@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev) sch->vpm = 0; sch->lpm = sch->schib.pmcw.pam; + + /* Initialize PGID data. */ + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + cdev->private->pgid_valid_mask = 0; + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; + cdev->private->path_notoper_mask = 0; + /* Initialize request data. */ memset(req, 0, sizeof(*req)); req->timeout = PGID_TIMEOUT; @@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev) */ void ccw_device_verify_start(struct ccw_device *cdev) { - struct subchannel *sch = to_subchannel(cdev->dev.parent); - CIO_TRACE_EVENT(4, "vrfy"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); - /* Initialize PGID data. */ - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); - cdev->private->pgid_valid_mask = 0; - cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; /* * Initialize pathgroup and multipath state with target values. * They may change in the course of path verification. @@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev) cdev->private->flags.pgroup = cdev->private->options.pgroup; cdev->private->flags.mpath = cdev->private->options.mpath; cdev->private->flags.doverify = 0; + cdev->private->path_noirq_mask = 0; verify_start(cdev); } diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index d9eddcba7e8..c4f7bf3e24c 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -6,6 +6,7 @@ */ #include <linux/kernel_stat.h> +#include <linux/completion.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/device.h> @@ -42,7 +43,7 @@ static debug_info_t *eadm_debug; static void EADM_LOG_HEX(int level, void *data, int length) { - if (level > eadm_debug->level) + if (!debug_level_enabled(eadm_debug, level)) return; while (length > 0) { debug_event(eadm_debug, level, data, length); @@ -133,7 +134,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) { struct eadm_private *private = get_eadm_private(sch); struct eadm_scsw *scsw = &sch->schib.scsw.eadm; - struct irb *irb = (struct irb *)&S390_lowcore.irb; + struct irb *irb = &__get_cpu_var(cio_irb); int error = 0; EADM_LOG(6, "irq"); @@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch) } scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error); private->state = EADM_IDLE; + + if (private->completion) + complete(private->completion); } static struct subchannel *eadm_get_idle_sch(void) @@ -186,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void) return NULL; } -static int eadm_start_aob(struct aob *aob) +int eadm_start_aob(struct aob *aob) { struct eadm_private *private; struct subchannel *sch; @@ -214,6 +218,7 @@ out_unlock: return ret; } +EXPORT_SYMBOL_GPL(eadm_start_aob); static int eadm_subchannel_probe(struct subchannel *sch) { @@ -255,13 +260,32 @@ out: static void eadm_quiesce(struct subchannel *sch) { + struct eadm_private *private = get_eadm_private(sch); + DECLARE_COMPLETION_ONSTACK(completion); int ret; + spin_lock_irq(sch->lock); + if (private->state != EADM_BUSY) + goto disable; + + if (eadm_subchannel_clear(sch)) + goto disable; + + private->completion = &completion; + spin_unlock_irq(sch->lock); + + wait_for_completion_io(&completion); + + spin_lock_irq(sch->lock); + private->completion = NULL; + +disable: + eadm_subchannel_set_timeout(sch, 0); do { - spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); - spin_unlock_irq(sch->lock); } while (ret == -EBUSY); + + spin_unlock_irq(sch->lock); } static int eadm_subchannel_remove(struct subchannel *sch) @@ -357,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = { .restore = eadm_subchannel_restore, }; -static struct eadm_ops eadm_ops = { - .eadm_start = eadm_start_aob, - .owner = THIS_MODULE, -}; - static int __init eadm_sch_init(void) { int ret; @@ -381,7 +400,6 @@ static int __init eadm_sch_init(void) if (ret) goto cleanup; - register_eadm_ops(&eadm_ops); return ret; cleanup: @@ -392,7 +410,6 @@ cleanup: static void __exit eadm_sch_exit(void) { - unregister_eadm_ops(&eadm_ops); css_driver_unregister(&eadm_subchannel_driver); isc_unregister(EADM_SCH_ISC); debug_unregister(eadm_debug); diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h index 2779be09398..9664e4653f9 100644 --- a/drivers/s390/cio/eadm_sch.h +++ b/drivers/s390/cio/eadm_sch.h @@ -1,6 +1,7 @@ #ifndef EADM_SCH_H #define EADM_SCH_H +#include <linux/completion.h> #include <linux/device.h> #include <linux/timer.h> #include <linux/list.h> @@ -9,9 +10,10 @@ struct eadm_private { union orb orb; enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state; + struct completion *completion; + struct subchannel *sch; struct timer_list timer; struct list_head head; - struct subchannel *sch; } __aligned(8); #define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev)) diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 65d13e38803..5a999084a22 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -17,7 +17,7 @@ struct idset { static inline unsigned long bitmap_size(int num_ssid, int num_id) { - return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); + return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); } static struct idset *idset_new(int num_ssid, int num_id) diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 76253dfcc1b..b108f4a5c7d 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -126,6 +126,10 @@ struct ccw_device_private { u8 pgid_valid_mask; /* mask of valid PGIDs */ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ u8 pgid_reset_mask; /* mask of PGIDs which were reset */ + u8 path_noirq_mask; /* mask of paths for which no irq was + received */ + u8 path_notoper_mask; /* mask of paths which were found + not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ struct { @@ -145,6 +149,7 @@ struct ccw_device_private { unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ + unsigned int pgid_unknown:1;/* unknown pgid state */ unsigned int initialized:1; /* set if initial reference held */ } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 5132554d791..a563e4c0059 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -140,40 +140,6 @@ struct siga_flag { u8:3; } __attribute__ ((packed)); -struct chsc_ssqd_area { - struct chsc_header request; - u16:10; - u8 ssid:2; - u8 fmt:4; - u16 first_sch; - u16:16; - u16 last_sch; - u32:32; - struct chsc_header response; - u32:32; - struct qdio_ssqd_desc qdio_ssqd; -} __attribute__ ((packed)); - -struct scssc_area { - struct chsc_header request; - u16 operation_code; - u16:16; - u32:32; - u32:32; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32:21; - u32 isc:3; - u32 word_with_d_bit; - u32:32; - struct subchannel_id schid; - u32 reserved[1004]; - struct chsc_header response; - u32:32; -} __attribute__ ((packed)); - struct qdio_dev_perf_stat { unsigned int adapter_int; unsigned int qdio_int; @@ -393,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q) #define need_siga_sync_out_after_pci(q) \ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) -#define for_each_input_queue(irq_ptr, q, i) \ - for (i = 0, q = irq_ptr->input_qs[0]; \ - i < irq_ptr->nr_input_qs; \ - q = irq_ptr->input_qs[++i]) -#define for_each_output_queue(irq_ptr, q, i) \ - for (i = 0, q = irq_ptr->output_qs[0]; \ - i < irq_ptr->nr_output_qs; \ - q = irq_ptr->output_qs[++i]) +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_input_qs && \ + ({ q = irq_ptr->input_qs[i]; 1; }); i++) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_output_qs && \ + ({ q = irq_ptr->output_qs[i]; 1; }); i++) #define prev_buf(bufnr) \ ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index e6e0d31c02a..f1f3baa8e6e 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -7,6 +7,7 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/export.h> +#include <linux/slab.h> #include <asm/debug.h> #include "qdio_debug.h" #include "qdio.h" @@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error; static struct dentry *debugfs_root; #define QDIO_DEBUGFS_NAME_LEN 10 +#define QDIO_DBF_NAME_LEN 20 -void qdio_allocate_dbf(struct qdio_initialize *init_data, +struct qdio_dbf_entry { + char dbf_name[QDIO_DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; + +static LIST_HEAD(qdio_dbf_list); +static DEFINE_MUTEX(qdio_dbf_list_mutex); + +static debug_info_t *qdio_get_dbf_entry(char *name) +{ + struct qdio_dbf_entry *entry; + debug_info_t *rc = NULL; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry(entry, &qdio_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } + } + mutex_unlock(&qdio_dbf_list_mutex); + return rc; +} + +static void qdio_clear_dbf_list(void) +{ + struct qdio_dbf_entry *entry, *tmp; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qdio_dbf_list_mutex); +} + +int qdio_allocate_dbf(struct qdio_initialize *init_data, struct qdio_irq *irq_ptr) { - char text[20]; + char text[QDIO_DBF_NAME_LEN]; + struct qdio_dbf_entry *new_entry; DBF_EVENT("qfmt:%1d", init_data->q_format); DBF_HEX(init_data->adapter_name, 8); @@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data, DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); /* allocate trace view for the interface */ - snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); - irq_ptr->debug_area = debug_register(text, 2, 1, 16); - debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); - debug_set_level(irq_ptr->debug_area, DBF_WARN); - DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); + snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s", + dev_name(&init_data->cdev->dev)); + irq_ptr->debug_area = qdio_get_dbf_entry(text); + if (irq_ptr->debug_area) + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused"); + else { + irq_ptr->debug_area = debug_register(text, 2, 1, 16); + if (!irq_ptr->debug_area) + return -ENOMEM; + if (debug_register_view(irq_ptr->debug_area, + &debug_hex_ascii_view)) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + debug_set_level(irq_ptr->debug_area, DBF_WARN); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); + new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL); + if (!new_entry) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN); + new_entry->dbf_info = irq_ptr->debug_area; + mutex_lock(&qdio_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qdio_dbf_list); + mutex_unlock(&qdio_dbf_list_mutex); + } + return 0; } static int qstat_show(struct seq_file *m, void *v) @@ -128,7 +192,7 @@ static int qstat_show(struct seq_file *m, void *v) static int qstat_seq_open(struct inode *inode, struct file *filp) { return single_open(filp, qstat_show, - filp->f_path.dentry->d_inode->i_private); + file_inode(filp)->i_private); } static const struct file_operations debugfs_fops = { @@ -221,10 +285,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, static int qperf_seq_open(struct inode *inode, struct file *filp) { return single_open(filp, qperf_show, - filp->f_path.dentry->d_inode->i_private); + file_inode(filp)->i_private); } -static struct file_operations debugfs_perf_fops = { +static const struct file_operations debugfs_perf_fops = { .owner = THIS_MODULE, .open = qperf_seq_open, .read = seq_read, @@ -232,7 +296,8 @@ static struct file_operations debugfs_perf_fops = { .llseek = seq_lseek, .release = single_release, }; -static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) + +static void setup_debugfs_entry(struct qdio_q *q) { char name[QDIO_DEBUGFS_NAME_LEN]; @@ -263,12 +328,12 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) irq_ptr->debugfs_perf = NULL; for_each_input_queue(irq_ptr, q, i) - setup_debugfs_entry(q, cdev); + setup_debugfs_entry(q); for_each_output_queue(irq_ptr, q, i) - setup_debugfs_entry(q, cdev); + setup_debugfs_entry(q); } -void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; @@ -299,6 +364,7 @@ int __init qdio_debug_init(void) void qdio_debug_exit(void) { + qdio_clear_dbf_list(); debugfs_remove(debugfs_root); if (qdio_dbf_setup) debug_unregister(qdio_dbf_setup); diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 7f8b973da29..f33ce857761 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h @@ -16,12 +16,6 @@ extern debug_info_t *qdio_dbf_setup; extern debug_info_t *qdio_dbf_error; -/* sort out low debug levels early to avoid wasted sprints */ -static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) -{ - return (level <= dbf_grp->level); -} - #define DBF_ERR 3 /* error conditions */ #define DBF_WARN 4 /* warning conditions */ #define DBF_INFO 6 /* informational */ @@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len) #define DBF_DEV_EVENT(level, device, text...) \ do { \ char debug_buffer[QDIO_DBF_LEN]; \ - if (qdio_dbf_passes(device->debug_area, level)) { \ + if (debug_level_enabled(device->debug_area, level)) { \ snprintf(debug_buffer, QDIO_DBF_LEN, text); \ debug_text_event(device->debug_area, level, debug_buffer); \ } \ @@ -81,12 +75,11 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr, } } -void qdio_allocate_dbf(struct qdio_initialize *init_data, +int qdio_allocate_dbf(struct qdio_initialize *init_data, struct qdio_irq *irq_ptr); void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev); -void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, - struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr); int qdio_debug_init(void); void qdio_debug_exit(void); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 1671d3461f2..848e3b64ea6 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -338,10 +338,10 @@ again: retries++; if (!start_time) { - start_time = get_clock(); + start_time = get_tod_clock_fast(); goto again; } - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) goto again; } if (retries) { @@ -409,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q) set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } -static inline void account_sbals(struct qdio_q *q, int count) +static inline void account_sbals(struct qdio_q *q, unsigned int count) { - int pos = 0; + int pos; q->q_stats.nr_sbal_total += count; if (count == QDIO_MAX_BUFFERS_MASK) { q->q_stats.nr_sbals[7]++; return; } - while (count >>= 1) - pos++; + pos = ilog2(count); q->q_stats.nr_sbals[pos]++; } @@ -504,7 +503,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock_fast(); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -528,7 +527,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) case SLSB_P_INPUT_PRIMED: inbound_primed(q, count); q->first_to_check = add_buf(q->first_to_check, count); - if (atomic_sub(count, &q->nr_buf_used) == 0) + if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); @@ -563,7 +562,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if (bufnr != q->last_move) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_clock(); + q->u.in.timestamp = get_tod_clock(); return 1; } else return 0; @@ -595,7 +594,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; @@ -608,50 +607,6 @@ static inline int contains_aobs(struct qdio_q *q) return !q->is_input_q && q->u.out.use_cq; } -static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q, - int i, struct qaob *aob) -{ - int tmp; - - DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i, - (unsigned long) virt_to_phys(aob)); - DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx", - (unsigned long) aob->res0[0]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx", - (unsigned long) aob->res0[1]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx", - (unsigned long) aob->res0[2]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx", - (unsigned long) aob->res0[3]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx", - (unsigned long) aob->res0[4]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx", - (unsigned long) aob->res0[5]); - DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1); - DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2); - DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3); - DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc); - DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags); - DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs); - DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count); - for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) { - DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp, - (unsigned long) aob->sba[tmp]); - DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp, - (unsigned long) q->sbal[i]->element[tmp].addr); - DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]); - DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp, - q->sbal[i]->element[tmp].length); - } - DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0); - for (tmp = 0; tmp < 2; ++tmp) { - DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp, - (unsigned long) aob->res4[tmp]); - } - DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1); - DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2); -} - static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) { unsigned char state = 0; @@ -772,7 +727,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock_fast(); if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && @@ -1040,7 +995,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!pci_out_supported(q)) + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) return; for_each_output_queue(irq_ptr, q, i) { @@ -1226,7 +1181,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) tiqdio_remove_input_queues(irq_ptr); qdio_shutdown_queues(cdev); - qdio_shutdown_debug_entries(irq_ptr, cdev); + qdio_shutdown_debug_entries(irq_ptr); /* cleanup subchannel */ spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -1278,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev) return -ENODEV; DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); - if (irq_ptr->debug_area != NULL) { - debug_unregister(irq_ptr->debug_area); - irq_ptr->debug_area = NULL; - } + irq_ptr->debug_area = NULL; cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); @@ -1320,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data) goto out_err; mutex_init(&irq_ptr->setup_mutex); - qdio_allocate_dbf(init_data, irq_ptr); + if (qdio_allocate_dbf(init_data, irq_ptr)) + goto out_rel; /* * Allocate a page for the chsc calls in qdio_establish. @@ -1541,7 +1495,7 @@ static inline int buf_in_between(int bufnr, int start, int count) static int handle_inbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { - int used, diff; + int diff; qperf_inc(q, inbound_call); @@ -1574,7 +1528,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); - used = atomic_add_return(count, &q->nr_buf_used) - count; + atomic_add(count, &q->nr_buf_used); if (need_siga_in(q)) return qdio_siga_input(q); @@ -1796,6 +1750,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr) } EXPORT_SYMBOL(qdio_stop_irq); +/** + * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. + * @schid: Subchannel ID. + * @cnc: Boolean Change-Notification Control + * @response: Response code will be stored at this address + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer passed from the caller to qdio_pnso_brinfo() + * @type: Type of the address entry passed to the callback + * @entry: Entry containg the address of the specified type + * @priv: Pointer to pass to the callback function. + * + * Performs "Store-network-bridging-information list" operation and calls + * the callback function for every entry in the list. If "change- + * notification-control" is set, further changes in the address list + * will be reported via the IPA command. + */ +int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv) +{ + struct chsc_pnso_area *rr; + int rc; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + /* on the first iteration, naihdr.resume_token will be zero */ + rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); + if (rc != 0 && rc != -EBUSY) + goto out; + if (rr->response.code != 1) { + rc = -EIO; + continue; + } else + rc = 0; + + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + elems = (rr->response.length - + sizeof(struct chsc_header) - + sizeof(struct chsc_brinfo_naihdr)) / + size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + switch (size) { + case sizeof(struct qdio_brinfo_entry_l3_ipv6): + (*cb)(priv, l3_ipv6_addr, + &rr->entries.l3_ipv6[i]); + break; + case sizeof(struct qdio_brinfo_entry_l3_ipv4): + (*cb)(priv, l3_ipv4_addr, + &rr->entries.l3_ipv4[i]); + break; + case sizeof(struct qdio_brinfo_entry_l2): + (*cb)(priv, l2_addr_lnid, + &rr->entries.l2[i]); + break; + default: + WARN_ON_ONCE(1); + rc = -EIO; + goto out; + } + } while (rr->response.code == 0x0107 || /* channel busy */ + (rr->response.code == 1 && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + (*response) = rr->response.code; + +out: + free_page((unsigned long)rr); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); + static int __init init_QDIO(void) { int rc; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 16ecd35b8e5..f5f4a91fab4 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -254,40 +254,31 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, int rc; DBF_EVENT("getssqd:%4x", schid->sch_no); - if (irq_ptr != NULL) - ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; - else + if (!irq_ptr) { ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); - memset(ssqd, 0, PAGE_SIZE); - - ssqd->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - ssqd->first_sch = schid->sch_no; - ssqd->last_sch = schid->sch_no; - ssqd->ssid = schid->ssid; - - if (chsc(ssqd)) - return -EIO; - rc = chsc_error_from_response(ssqd->response.code); + if (!ssqd) + return -ENOMEM; + } else { + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + } + + rc = chsc_ssqd(*schid, ssqd); if (rc) - return rc; + goto out; if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || (ssqd->qdio_ssqd.sch != schid->sch_no)) - return -EINVAL; - - if (irq_ptr != NULL) - memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, - sizeof(struct qdio_ssqd_desc)); - else { - memcpy(data, &ssqd->qdio_ssqd, - sizeof(struct qdio_ssqd_desc)); + rc = -EINVAL; + + if (!rc) + memcpy(data, &ssqd->qdio_ssqd, sizeof(*data)); + +out: + if (!irq_ptr) free_page((unsigned long)ssqd); - } - return 0; + + return rc; } void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) @@ -295,7 +286,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) unsigned char qdioac; int rc; - rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); + rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc); if (rc) { DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%x", rc); diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index bde5255200d..5d06253c2a7 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -36,8 +36,13 @@ struct indicator_t { static LIST_HEAD(tiq_list); static DEFINE_MUTEX(tiq_list_lock); -/* adapter local summary indicator */ -static u8 *tiqdio_alsi; +/* Adapter interrupt definitions */ +static void tiqdio_thinint_handler(struct airq_struct *airq); + +static struct airq_struct tiqdio_airq = { + .handler = tiqdio_thinint_handler, + .isc = QDIO_AIRQ_ISC, +}; static struct indicator_t *q_indicators; @@ -176,7 +181,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) * @alsi: pointer to adapter local summary indicator * @data: NULL */ -static void tiqdio_thinint_handler(void *alsi, void *data) +static void tiqdio_thinint_handler(struct airq_struct *airq) { u32 si_used = clear_shared_ind(); struct qdio_q *q; @@ -208,51 +213,31 @@ static void tiqdio_thinint_handler(void *alsi, void *data) static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { - struct scssc_area *scssc_area; + struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; + u64 summary_indicator_addr, subchannel_indicator_addr; int rc; - scssc_area = (struct scssc_area *)irq_ptr->chsc_page; - memset(scssc_area, 0, PAGE_SIZE); - if (reset) { - scssc_area->summary_indicator_addr = 0; - scssc_area->subchannel_indicator_addr = 0; + summary_indicator_addr = 0; + subchannel_indicator_addr = 0; } else { - scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); - scssc_area->subchannel_indicator_addr = - virt_to_phys(irq_ptr->dsci); + summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); + subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - scssc_area->ks = PAGE_DEFAULT_KEY >> 4; - scssc_area->kc = PAGE_DEFAULT_KEY >> 4; - scssc_area->isc = QDIO_AIRQ_ISC; - scssc_area->schid = irq_ptr->schid; - - /* enable the time delay disablement facility */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - - rc = chsc(scssc_area); - if (rc) - return -EIO; - - rc = chsc_error_from_response(scssc_area->response.code); + rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, + subchannel_indicator_addr); if (rc) { DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, - scssc_area->response.code); - DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); - return rc; + scssc->response.code); + goto out; } DBF_EVENT("setscind"); - DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); - DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); - return 0; + DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); + DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); +out: + return rc; } /* allocate non-shared indicators and shared indicator */ @@ -272,14 +257,12 @@ void tiqdio_free_memory(void) int __init tiqdio_register_thinints(void) { - isc_register(QDIO_AIRQ_ISC); - tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, - NULL, QDIO_AIRQ_ISC); - if (IS_ERR(tiqdio_alsi)) { - DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); - tiqdio_alsi = NULL; - isc_unregister(QDIO_AIRQ_ISC); - return -ENOMEM; + int rc; + + rc = register_adapter_interrupt(&tiqdio_airq); + if (rc) { + DBF_EVENT("RTI:%x", rc); + return rc; } return 0; } @@ -312,9 +295,5 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) void __exit tiqdio_unregister_thinints(void) { WARN_ON(!list_empty(&tiq_list)); - - if (tiqdio_alsi) { - s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); - isc_unregister(QDIO_AIRQ_ISC); - } + unregister_adapter_interrupt(&tiqdio_airq); } diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51..15268edc54a 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -15,8 +15,6 @@ #include "chsc.h" static struct device *scm_root; -static struct eadm_ops *eadm_ops; -static DEFINE_MUTEX(eadm_ops_mutex); #define to_scm_dev(n) container_of(n, struct scm_device, dev) #define to_scm_drv(d) container_of(d, struct scm_driver, drv) @@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv) } EXPORT_SYMBOL_GPL(scm_driver_unregister); -int scm_get_ref(void) -{ - int ret = 0; - - mutex_lock(&eadm_ops_mutex); - if (!eadm_ops || !try_module_get(eadm_ops->owner)) - ret = -ENOENT; - mutex_unlock(&eadm_ops_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(scm_get_ref); - -void scm_put_ref(void) -{ - mutex_lock(&eadm_ops_mutex); - module_put(eadm_ops->owner); - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(scm_put_ref); - -void register_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = ops; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(register_eadm_ops); - -void unregister_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = NULL; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_eadm_ops); - -int scm_start_aob(struct aob *aob) -{ - return eadm_ops->eadm_start(aob); -} -EXPORT_SYMBOL_GPL(scm_start_aob); - void scm_irq_handler(struct aob *aob, int error) { struct aob_rq_header *aobrq = (void *) aob->request.data; @@ -211,7 +166,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) goto out; scmdrv = to_scm_drv(scmdev->dev.driver); if (changed && scmdrv->notify) - scmdrv->notify(scmdev); + scmdrv->notify(scmdev, SCM_CHANGE); out: device_unlock(&scmdev->dev); if (changed) @@ -297,6 +252,22 @@ int scm_update_information(void) return ret; } +static int scm_dev_avail(struct device *dev, void *unused) +{ + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + struct scm_device *scmdev = to_scm_dev(dev); + + if (dev->driver && scmdrv->notify) + scmdrv->notify(scmdev, SCM_AVAIL); + + return 0; +} + +int scm_process_availability_information(void) +{ + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); +} + static int __init scm_init(void) { int ret; |
