diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-03-09 17:11:53 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-09 17:11:53 +0100 |
commit | 548b84166917d6f5e2296123b85ad24aecd3801d (patch) | |
tree | 0ab0300e23a02df0fe3c0579627e4998bb122c00 /drivers/s390/cio | |
parent | cfb581bcd4f8c158c6f2b48bf5e232bb9e6855c0 (diff) | |
parent | 57d54889cd00db2752994b389ba714138652e60c (diff) |
Merge commit 'v2.6.34-rc1' into perf/urgent
Conflicts:
tools/perf/util/probe-event.c
Merge reason: Pick up -rc1 and resolve the conflict as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/ccwreq.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc_sch.c | 4 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 14 | ||||
-rw-r--r-- | drivers/s390/cio/crw.c | 29 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 79 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 5 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 165 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 43 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 92 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 24 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 31 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 20 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 4 |
15 files changed, 334 insertions, 183 deletions
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 7a28a3029a3..37df42af05e 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) */ void ccw_request_handler(struct ccw_device *cdev) { + struct irb *irb = (struct irb *)&S390_lowcore.irb; struct ccw_request *req = &cdev->private->req; - struct irb *irb = (struct irb *) __LC_IRB; enum io_status status; int rc = -EOPNOTSUPP; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 1ecd3e56764..4038f5b4f14 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) secm_area->request.length = 0x0050; secm_area->request.code = 0x0016; - secm_area->key = PAGE_DEFAULT_KEY; + secm_area->key = PAGE_DEFAULT_KEY >> 4; secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index c84ac944307..852612f5dba 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) { struct chsc_private *private = sch->private; struct chsc_request *request = private->request; - struct irb *irb = (struct irb *)__LC_IRB; + struct irb *irb = (struct irb *)&S390_lowcore.irb; CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); @@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, int ret = -ENODEV; char dbf[10]; - chsc_area->header.key = PAGE_DEFAULT_KEY; + chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; while ((sch = chsc_get_next_subchannel(sch))) { spin_lock(sch->lock); private = sch->private; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 126f240715a..f736cdcf08a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs) /* * Get interrupt information from lowcore */ - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; - irb = (struct irb *) __LC_IRB; + tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; + irb = (struct irb *)&S390_lowcore.irb; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; /* @@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) * We don't do this for VM because a tpi drops the cpu * out of the sie which costs more cycles than it saves. */ - } while (!MACHINE_IS_VM && tpi (NULL) != 0); + } while (MACHINE_IS_LPAR && tpi(NULL) != 0); irq_exit(); set_irq_regs(old_regs); } @@ -682,10 +682,10 @@ static int cio_tpi(void) struct irb *irb; int irq_context; - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; + tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ @@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.schid, (struct irb *)__LC_IRB); + tsch(ti.schid, (struct irb *)&S390_lowcore.irb); if (schid_equal(&ti.schid, &schid)) return 0; } @@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) struct subchannel_id schid; struct schib schib; - schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; + schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; if (stsch(schid, &schib)) diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index d157665d0e7..425f741a280 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c @@ -8,15 +8,16 @@ * Heiko Carstens <heiko.carstens@de.ibm.com>, */ -#include <linux/semaphore.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/init.h> +#include <linux/wait.h> #include <asm/crw.h> -static struct semaphore crw_semaphore; static DEFINE_MUTEX(crw_handler_mutex); static crw_handler_t crw_handlers[NR_RSCS]; +static atomic_t crw_nr_req = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); /** * crw_register_handler() - register a channel report word handler @@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc) static int crw_collect_info(void *unused) { struct crw crw[2]; - int ccode; + int ccode, signal; unsigned int chain; - int ignore; repeat: - ignore = down_interruptible(&crw_semaphore); + signal = wait_event_interruptible(crw_handler_wait_q, + atomic_read(&crw_nr_req) > 0); + if (unlikely(signal)) + atomic_inc(&crw_nr_req); chain = 0; while (1) { crw_handler_t handler; @@ -122,25 +125,23 @@ repeat: /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } + if (atomic_dec_and_test(&crw_nr_req)) + wake_up(&crw_handler_wait_q); goto repeat; return 0; } void crw_handle_channel_report(void) { - up(&crw_semaphore); + atomic_inc(&crw_nr_req); + wake_up(&crw_handler_wait_q); } -/* - * Separate initcall needed for semaphore initialization since - * crw_handle_channel_report might be called before crw_machine_check_init. - */ -static int __init crw_init_semaphore(void) +void crw_wait_for_channel_report(void) { - init_MUTEX_LOCKED(&crw_semaphore); - return 0; + crw_handle_channel_report(); + wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); } -pure_initcall(crw_init_semaphore); /* * Machine checks for the channel subsystem must be enabled diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa1..2769da54f2b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/reboot.h> #include <linux/suspend.h> +#include <linux/proc_fs.h> #include <asm/isc.h> #include <asm/crw.h> @@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) if (!get_device(&sch->dev)) return; sch->todo = todo; - if (!queue_work(slow_path_wq, &sch->todo_work)) { + if (!queue_work(cio_work_q, &sch->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&sch->dev); } @@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused) } static DECLARE_WORK(slow_path_work, css_slow_path_func); -struct workqueue_struct *slow_path_wq; +struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) { @@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -563,7 +564,7 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } void css_wait_for_slow_path(void) { - flush_workqueue(slow_path_wq); + flush_workqueue(cio_work_q); } /* Schedule reprobing of all unregistered subchannels. */ @@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void) ret = css_bus_init(); if (ret) return ret; - + cio_work_q = create_singlethread_workqueue("cio"); + if (!cio_work_q) { + ret = -ENOMEM; + goto out_bus; + } ret = io_subchannel_init(); if (ret) - css_bus_cleanup(); + goto out_wq; return ret; +out_wq: + destroy_workqueue(cio_work_q); +out_bus: + css_bus_cleanup(); + return ret; } subsys_initcall(channel_subsystem_init); @@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused) struct css_driver *cssdrv = to_cssdriver(drv); if (cssdrv->settle) - cssdrv->settle(); + return cssdrv->settle(); return 0; } +int css_complete_work(void) +{ + int ret; + + /* Wait for the evaluation of subchannels to finish. */ + ret = wait_event_interruptible(css_eval_wq, + atomic_read(&css_eval_scheduled) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + /* Wait for the subchannel type specific initialization to finish */ + return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); +} + + /* * Wait for the initialization of devices to finish, to make sure we are * done with our setup if the search for the root device starts. @@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void) { /* Start initial subchannel evaluation. */ css_schedule_eval_all(); - /* Wait for the evaluation of subchannels to finish. */ - wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); - /* Wait for the subchannel type specific initialization to finish */ - return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); + css_complete_work(); + return 0; } subsys_initcall_sync(channel_subsystem_init_sync); +#ifdef CONFIG_PROC_FS +static ssize_t cio_settle_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret; + + /* Handle pending CRW's. */ + crw_wait_for_channel_report(); + ret = css_complete_work(); + + return ret ? ret : count; +} + +static const struct file_operations cio_settle_proc_fops = { + .write = cio_settle_write, +}; + +static int __init cio_settle_init(void) +{ + struct proc_dir_entry *entry; + + entry = proc_create("cio_settle", S_IWUSR, NULL, + &cio_settle_proc_fops); + if (!entry) + return -ENOMEM; + return 0; +} +device_initcall(cio_settle_init); +#endif /*CONFIG_PROC_FS*/ + int sch_is_pseudo_sch(struct subchannel *sch) { return sch == to_css(sch->dev.parent)->pseudo_subchannel; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index fe84b92cde6..7e37886de23 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -95,7 +95,7 @@ struct css_driver { int (*freeze)(struct subchannel *); int (*thaw) (struct subchannel *); int (*restore)(struct subchannel *); - void (*settle)(void); + int (*settle)(void); const char *name; }; @@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[]; /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); +int css_complete_work(void); int sch_is_pseudo_sch(struct subchannel *); struct schib; int css_sch_is_valid(struct schib *); -extern struct workqueue_struct *slow_path_wq; +extern struct workqueue_struct *cio_work_q; void css_wait_for_slow_path(void); void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index a6c7d5426fb..6d229f3523a 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); -struct workqueue_struct *ccw_device_work; wait_queue_head_t ccw_device_init_wq; atomic_t ccw_device_init_count; @@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch) return 0; } -static void io_subchannel_settle(void) +static int io_subchannel_settle(void) { - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); - flush_workqueue(ccw_device_work); + int ret; + + ret = wait_event_interruptible(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + return 0; } static struct css_driver io_subchannel_driver = { @@ -188,27 +192,13 @@ int __init io_subchannel_init(void) atomic_set(&ccw_device_init_count, 0); setup_timer(&recovery_timer, recovery_func, 0); - ccw_device_work = create_singlethread_workqueue("cio"); - if (!ccw_device_work) - return -ENOMEM; - slow_path_wq = create_singlethread_workqueue("kslowcrw"); - if (!slow_path_wq) { - ret = -ENOMEM; - goto out_err; - } - if ((ret = bus_register (&ccw_bus_type))) - goto out_err; - + ret = bus_register(&ccw_bus_type); + if (ret) + return ret; ret = css_driver_register(&io_subchannel_driver); if (ret) - goto out_err; + bus_unregister(&ccw_bus_type); - return 0; -out_err: - if (ccw_device_work) - destroy_workqueue(ccw_device_work); - if (slow_path_wq) - destroy_workqueue(slow_path_wq); return ret; } @@ -774,7 +764,7 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) static void io_subchannel_register(struct ccw_device *cdev) { struct subchannel *sch; - int ret; + int ret, adjust_init_count = 1; unsigned long flags; sch = to_subchannel(cdev->dev.parent); @@ -803,6 +793,7 @@ static void io_subchannel_register(struct ccw_device *cdev) cdev->private->dev_id.ssid, cdev->private->dev_id.devno); } + adjust_init_count = 0; goto out; } /* @@ -828,7 +819,7 @@ out: cdev->private->flags.recog_done = 1; wake_up(&cdev->private->wait_q); out_err: - if (atomic_dec_and_test(&ccw_device_init_count)) + if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); } @@ -1348,7 +1339,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) /* Not operational. */ if (!cdev) return IO_SCH_UNREG; - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } @@ -1356,12 +1347,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) if (!cdev) return IO_SCH_ATTACH; if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG_ATTACH; return IO_SCH_ORPH_ATTACH; } if ((sch->schib.pmcw.pam & sch->opm) == 0) { - if (!ccw_device_notify(cdev, CIO_NO_PATH)) + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_DISC; } @@ -1410,6 +1401,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_VERIFY: + if (cdev->private->flags.resuming == 1) { + if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { + ccw_device_set_notoper(cdev); + break; + } + } /* Trigger path verification. */ io_subchannel_verify(sch); rc = 0; @@ -1448,7 +1445,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) break; case IO_SCH_UNREG_ATTACH: /* Unregister ccw device. */ - ccw_device_unregister(cdev); + if (!cdev->private->flags.resuming) + ccw_device_unregister(cdev); break; default: break; @@ -1457,7 +1455,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_UNREG: - css_sch_device_unregister(sch); + if (!cdev || !cdev->private->flags.resuming) + css_sch_device_unregister(sch); break; case IO_SCH_ORPH_ATTACH: case IO_SCH_UNREG_ATTACH: @@ -1779,26 +1778,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); - if (cio_is_console(sch->schid)) - goto out; + spin_lock_irq(sch->lock); + if (cio_is_console(sch->schid)) { + cio_enable_subchannel(sch, (u32)(addr_t)sch); + goto out_unlock; + } /* * While we were sleeping, devices may have gone or become * available again. Kick re-detection. */ - spin_lock_irq(sch->lock); cdev->private->flags.resuming = 1; + css_schedule_eval(sch->schid); + spin_unlock_irq(sch->lock); + css_complete_work(); + + /* cdev may have been moved to a different subchannel. */ + sch = to_subchannel(cdev->dev.parent); + spin_lock_irq(sch->lock); + if (cdev->private->state != DEV_STATE_ONLINE && + cdev->private->state != DEV_STATE_OFFLINE) + goto out_unlock; + ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED); -out: + spin_lock_irq(sch->lock); + +out_unlock: cdev->private->flags.resuming = 0; + spin_unlock_irq(sch->lock); } static int resume_handle_boxed(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_BOXED; - if (ccw_device_notify(cdev, CIO_BOXED)) + if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; @@ -1807,7 +1822,7 @@ static int resume_handle_boxed(struct ccw_device *cdev) static int resume_handle_disc(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_DISCONNECTED; - if (ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; @@ -1816,40 +1831,31 @@ static int resume_handle_disc(struct ccw_device *cdev) static int ccw_device_pm_restore(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); - struct subchannel *sch = to_subchannel(cdev->dev.parent); - int ret = 0, cm_enabled; + struct subchannel *sch; + int ret = 0; __ccw_device_pm_restore(cdev); + sch = to_subchannel(cdev->dev.parent); spin_lock_irq(sch->lock); - if (cio_is_console(sch->schid)) { - cio_enable_subchannel(sch, (u32)(addr_t)sch); - spin_unlock_irq(sch->lock); + if (cio_is_console(sch->schid)) goto out_restore; - } - cdev->private->flags.donotify = 0; + /* check recognition results */ switch (cdev->private->state) { case DEV_STATE_OFFLINE: + case DEV_STATE_ONLINE: + cdev->private->flags.donotify = 0; break; case DEV_STATE_BOXED: ret = resume_handle_boxed(cdev); - spin_unlock_irq(sch->lock); if (ret) - goto out; + goto out_unlock; goto out_restore; - case DEV_STATE_DISCONNECTED: - goto out_disc_unlock; default: - goto out_unreg_unlock; - } - /* check if the device id has changed */ - if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { - CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno " - "changed from %04x to %04x)\n", - sch->schid.ssid, sch->schid.sch_no, - cdev->private->dev_id.devno, - sch->schib.pmcw.dev); - goto out_unreg_unlock; + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; } /* check if the device type has changed */ if (!ccw_device_test_sense_data(cdev)) { @@ -1858,24 +1864,30 @@ static int ccw_device_pm_restore(struct device *dev) ret = -ENODEV; goto out_unlock; } - if (!cdev->online) { - ret = 0; + if (!cdev->online) goto out_unlock; - } - ret = ccw_device_online(cdev); - if (ret) - goto out_disc_unlock; - cm_enabled = cdev->private->cmb != NULL; + if (ccw_device_online(cdev)) { + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; + } spin_unlock_irq(sch->lock); - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - if (cdev->private->state != DEV_STATE_ONLINE) { - spin_lock_irq(sch->lock); - goto out_disc_unlock; + spin_lock_irq(sch->lock); + + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + ret = -ENODEV; + goto out_unlock; } - if (cm_enabled) { + + /* reenable cmf, if needed */ + if (cdev->private->cmb) { + spin_unlock_irq(sch->lock); ret = ccw_set_cmf(cdev, 1); + spin_lock_irq(sch->lock); if (ret) { CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " "(rc=%d)\n", cdev->private->dev_id.ssid, @@ -1885,21 +1897,11 @@ static int ccw_device_pm_restore(struct device *dev) } out_restore: + spin_unlock_irq(sch->lock); if (cdev->online && cdev->drv && cdev->drv->restore) ret = cdev->drv->restore(cdev); -out: return ret; -out_disc_unlock: - ret = resume_handle_disc(cdev); - spin_unlock_irq(sch->lock); - if (ret) - return ret; - goto out_restore; - -out_unreg_unlock: - ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); - ret = -ENODEV; out_unlock: spin_unlock_irq(sch->lock); return ret; @@ -2028,7 +2030,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) /* Get workqueue ref. */ if (!get_device(&cdev->dev)) return; - if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { + if (!queue_work(cio_work_q, &cdev->private->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&cdev->dev); } @@ -2041,5 +2043,4 @@ EXPORT_SYMBOL(ccw_driver_register); EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(ccw_bus_type); -EXPORT_SYMBOL(ccw_device_work); EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index bcfe13e4263..379de2d1ec4 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -4,7 +4,7 @@ #include <asm/ccwdev.h> #include <asm/atomic.h> #include <linux/wait.h> - +#include <linux/notifier.h> #include "io_sch.h" /* @@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern struct workqueue_struct *ccw_device_work; extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; int __init io_subchannel_init(void); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ae760658a13..c56ab94612f 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } } +/** + * ccw_device_notify() - inform the device's driver about an event + * @cdev: device for which an event occured + * @event: event that occurred + * + * Returns: + * -%EINVAL if the device is offline or has no driver. + * -%EOPNOTSUPP if the device's driver has no notifier registered. + * %NOTIFY_OK if the driver wants to keep the device. + * %NOTIFY_BAD if the driver doesn't want to keep the device. + */ int ccw_device_notify(struct ccw_device *cdev, int event) { + int ret = -EINVAL; + if (!cdev->drv) - return 0; + goto out; if (!cdev->online) - return 0; + goto out; CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, event); - return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; + if (!cdev->drv->notify) { + ret = -EOPNOTSUPP; + goto out; + } + if (cdev->drv->notify(cdev, event)) + ret = NOTIFY_OK; + else + ret = NOTIFY_BAD; +out: + return ret; } static void ccw_device_oper_notify(struct ccw_device *cdev) { - if (ccw_device_notify(cdev, CIO_OPER)) { + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { /* Reenable channel measurements, if needed. */ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); return; @@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state) case DEV_STATE_BOXED: CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) + if (cdev->online && + ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); cdev->private->flags.donotify = 0; break; case DEV_STATE_NOT_OPER: CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state) CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " "%04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (!ccw_device_notify(cdev, CIO_NO_PATH)) + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev) static void ccw_device_generic_notoper(struct ccw_device *cdev, enum dev_event dev_event) { - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) struct irb *irb; int is_cmd; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ if (!scsw_is_solicited(&irb->scsw)) { @@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; /* Check for unsolicited interrupt. */ if (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 44f2f6a97f3..48aa0647432 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -208,18 +208,27 @@ struct qdio_dev_perf_stat { unsigned int eqbs_partial; unsigned int sqbs; unsigned int sqbs_partial; +} ____cacheline_aligned; + +struct qdio_queue_perf_stat { + /* + * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. + * Since max. 127 SBALs are scanned reuse entry for 128 as queue full + * aka 127 SBALs found. + */ + unsigned int nr_sbals[8]; + unsigned int nr_sbal_error; + unsigned int nr_sbal_nop; + unsigned int nr_sbal_total; }; struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; - /* first ACK'ed buffer */ int ack_start; - /* how much sbals are acknowledged with qebsm */ int ack_count; - /* last time of noticing incoming data */ u64 timestamp; }; @@ -227,40 +236,27 @@ struct qdio_input_q { struct qdio_output_q { /* PCIs are enabled for the queue */ int pci_out_enabled; - /* IQDIO: output multiple buffers (enhanced SIGA) */ int use_enh_siga; - /* timer to check for more outbound work */ struct timer_list timer; }; +/* + * Note on cache alignment: grouped slsb and write mostly data at the beginning + * sbal[] is read-only and starts on a new cacheline followed by read mostly. + */ struct qdio_q { struct slsb slsb; + union { struct qdio_input_q in; struct qdio_output_q out; } u; - /* queue number */ - int nr; - - /* bitmask of queue number */ - int mask; - - /* input or output queue */ - int is_input_q; - - /* list of thinint input queues */ - struct list_head entry; - - /* upper-layer program handler */ - qdio_handler_t (*handler); - /* * inbound: next buffer the program should check for - * outbound: next buffer to check for having been processed - * by the card + * outbound: next buffer to check if adapter processed it */ int first_to_check; @@ -273,16 +269,32 @@ struct qdio_q { /* number of buffers in use by the adapter */ atomic_t nr_buf_used; - struct qdio_irq *irq_ptr; - struct dentry *debugfs_q; - struct tasklet_struct tasklet; - /* error condition during a data transfer */ unsigned int qdio_error; - struct sl *sl; - struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; + struct tasklet_struct tasklet; + struct qdio_queue_perf_stat q_stats; + + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; + + /* queue number */ + int nr; + + /* bitmask of queue number */ + int mask; + + /* input or output queue */ + int is_input_q; + + /* list of thinint input queues */ + struct list_head entry; + /* upper-layer program handler */ + qdio_handler_t (*handler); + + struct dentry *debugfs_q; + struct qdio_irq *irq_ptr; + struct sl *sl; /* * Warning: Leave this member at the end so it won't be cleared in * qdio_fill_qs. A page is allocated under this pointer and used for @@ -317,12 +329,8 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); - struct qdio_dev_perf_stat perf_stat; int perf_stat_enabled; - /* - * Warning: Leave these members together at the end so they won't be - * cleared in qdio_setup_irq. - */ + struct qdr *qdr; unsigned long chsc_page; @@ -331,6 +339,7 @@ struct qdio_irq { debug_info_t *debug_area; struct mutex setup_mutex; + struct qdio_dev_perf_stat perf_stat; }; /* helper functions */ @@ -341,9 +350,20 @@ struct qdio_irq { (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ css_general_characteristics.aif_osa) -#define qperf(qdev,attr) qdev->perf_stat.attr -#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ - q->irq_ptr->perf_stat.attr++ +#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) + +#define qperf_inc(__q, __attr) \ +({ \ + struct qdio_irq *qdev = (__q)->irq_ptr; \ + if (qdev->perf_stat_enabled) \ + (qdev->perf_stat.__attr)++; \ +}) + +static inline void account_sbals_error(struct qdio_q *q, int count) +{ + q->q_stats.nr_sbal_error += count; + q->q_stats.nr_sbal_total += count; +} /* the highest iqdio queue is used for multicast */ static inline int multicast_outbound(struct qdio_q *q) diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f49761ff9a0..6ce83f56d53 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -33,7 +33,6 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data, DBF_HEX(&init_data->input_handler, sizeof(void *)); DBF_HEX(&init_data->output_handler, sizeof(void *)); DBF_HEX(&init_data->int_parm, sizeof(long)); - DBF_HEX(&init_data->flags, sizeof(long)); DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); @@ -60,7 +59,7 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); seq_printf(m, "polling: %d ack start: %d ack count: %d\n", q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); - seq_printf(m, "slsb buffer states:\n"); + seq_printf(m, "SBAL states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { @@ -97,6 +96,20 @@ static int qstat_show(struct seq_file *m, void *v) } seq_printf(m, "\n"); seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); + + seq_printf(m, "\nSBAL statistics:"); + if (!q->irq_ptr->perf_stat_enabled) { + seq_printf(m, " disabled\n"); + return 0; + } + + seq_printf(m, "\n1 2.. 4.. 8.. " + "16.. 32.. 64.. 127\n"); + for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++) + seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]); + seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n", + q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop, + q->q_stats.nr_sbal_total); return 0; } @@ -181,9 +194,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, { struct seq_file *seq = file->private_data; struct qdio_irq *irq_ptr = seq->private; + struct qdio_q *q; unsigned long val; char buf[8]; - int ret; + int ret, i; if (!irq_ptr) return 0; @@ -201,6 +215,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, case 0: irq_ptr->perf_stat_enabled = 0; memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + for_each_input_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); + for_each_output_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); break; case 1: irq_ptr->perf_stat_enabled = 1; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 62b654af923..4f8f7431177 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q) set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } +static inline void account_sbals(struct qdio_q *q, int count) +{ + int pos = 0; + + q->q_stats.nr_sbal_total += count; + if (count == QDIO_MAX_BUFFERS_MASK) { + q->q_stats.nr_sbals[7]++; + return; + } + while (count >>= 1) + pos++; + q->q_stats.nr_sbals[pos]++; +} + static void announce_buffer_error(struct qdio_q *q, int count) { q->qdio_error |= QDIO_ERROR_SLSB_STATE; @@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) q->first_to_check = add_buf(q->first_to_check, count); if (atomic_sub(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); break; case SLSB_P_INPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); break; case SLSB_CU_INPUT_EMPTY: case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); break; default: @@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; - if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) + if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) q->u.in.timestamp = get_usecs(); return 1; } else @@ -568,10 +588,11 @@ static void qdio_kick_handler(struct qdio_q *q) if (q->is_input_q) { qperf_inc(q, inbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); - } else + } else { qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); + } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->irq_ptr->int_parm); @@ -643,15 +664,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); break; case SLSB_P_OUTPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); break; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 8c2dea5fa2b..7f4a7546514 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->qdf0[i + nr].slsba = (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; - irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; } static void setup_qdr(struct qdio_irq *irq_ptr, @@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; for (i = 0; i < qdio_init->no_input_qs; i++) __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); @@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data) struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; int rc; - memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); + memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); + memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); + memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); + memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); + memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + + irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; + irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; + /* wipes qib.ac, required by ar7063 */ memset(irq_ptr->qdr, 0, sizeof(struct qdr)); diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 091d904d318..9942c1031b2 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) .code = 0x0021, }; scssc_area->operation_code = 0; - scssc_area->ks = PAGE_DEFAULT_KEY; - scssc_area->kc = PAGE_DEFAULT_KEY; + scssc_area->ks = PAGE_DEFAULT_KEY >> 4; + scssc_area->kc = PAGE_DEFAULT_KEY >> 4; scssc_area->isc = QDIO_AIRQ_ISC; scssc_area->schid = irq_ptr->schid; |