diff options
Diffstat (limited to 'drivers/s390/char')
| -rw-r--r-- | drivers/s390/char/Makefile | 6 | ||||
| -rw-r--r-- | drivers/s390/char/con3215.c | 8 | ||||
| -rw-r--r-- | drivers/s390/char/con3270.c | 13 | ||||
| -rw-r--r-- | drivers/s390/char/fs3270.c | 7 | ||||
| -rw-r--r-- | drivers/s390/char/monwriter.c | 2 | ||||
| -rw-r--r-- | drivers/s390/char/raw3270.c | 38 | ||||
| -rw-r--r-- | drivers/s390/char/raw3270.h | 3 | ||||
| -rw-r--r-- | drivers/s390/char/sclp.c | 184 | ||||
| -rw-r--r-- | drivers/s390/char/sclp.h | 20 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_cmd.c | 212 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_con.c | 31 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_config.c | 2 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_ctl.c | 144 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_early.c | 315 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_sdias.c | 78 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_sdias.h | 46 | ||||
| -rw-r--r-- | drivers/s390/char/sclp_vt220.c | 55 | ||||
| -rw-r--r-- | drivers/s390/char/tape_class.c | 2 | ||||
| -rw-r--r-- | drivers/s390/char/tape_std.c | 3 | ||||
| -rw-r--r-- | drivers/s390/char/tty3270.c | 17 | ||||
| -rw-r--r-- | drivers/s390/char/vmlogrdr.c | 4 | ||||
| -rw-r--r-- | drivers/s390/char/vmur.c | 6 | ||||
| -rw-r--r-- | drivers/s390/char/vmwatchdog.c | 337 | ||||
| -rw-r--r-- | drivers/s390/char/zcore.c | 92 |
24 files changed, 929 insertions, 696 deletions
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index f3c32520744..78b6ace7edc 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,8 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ + sclp_early.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o @@ -18,7 +19,6 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o -obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o obj-$(CONFIG_VMCP) += vmcp.o @@ -32,4 +32,4 @@ obj-$(CONFIG_MONWRITER) += monwriter.o obj-$(CONFIG_S390_VMUR) += vmur.o zcore_mod-objs := sclp_sdias.o zcore.o -obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o +obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index eb5d22795c4..5af7f0bd612 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -922,7 +922,7 @@ static int __init con3215_init(void) raw3215_freelist = req; } - cdev = ccw_device_probe_console(); + cdev = ccw_device_create_console(&raw3215_ccw_driver); if (IS_ERR(cdev)) return -ENODEV; @@ -932,6 +932,12 @@ static int __init con3215_init(void) cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; + if (ccw_device_enable_console(cdev)) { + ccw_device_destroy_console(cdev); + raw3215_free_info(raw); + raw3215[0] = NULL; + return -ENODEV; + } /* Request the console irq */ if (raw3215_startup(raw) != 0) { diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 699fd3e363d..75ffe9980c3 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -7,6 +7,7 @@ * Copyright IBM Corp. 2003, 2009 */ +#include <linux/module.h> #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -30,6 +31,9 @@ static struct raw3270_fn con3270_fn; +static bool auto_update = 1; +module_param(auto_update, bool, 0); + /* * Main 3270 console view data structure. */ @@ -204,6 +208,8 @@ con3270_update(struct con3270 *cp) struct string *s, *n; int rc; + if (!auto_update && !raw3270_view_active(&cp->view)) + return; if (cp->view.dev) raw3270_activate_view(&cp->view); @@ -529,6 +535,7 @@ con3270_flush(void) if (!cp->view.dev) return; raw3270_pm_unfreeze(&cp->view); + raw3270_activate_view(&cp->view); spin_lock_irqsave(&cp->view.lock, flags); con3270_wait_write(cp); cp->nr_up = 0; @@ -576,7 +583,6 @@ static struct console con3270 = { static int __init con3270_init(void) { - struct ccw_device *cdev; struct raw3270 *rp; void *cbuf; int i; @@ -591,10 +597,7 @@ con3270_init(void) cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } - cdev = ccw_device_probe_console(); - if (IS_ERR(cdev)) - return -ENODEV; - rp = raw3270_setup_console(cdev); + rp = raw3270_setup_console(); if (IS_ERR(rp)) return PTR_ERR(rp); diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 96e52bf7593..71e97473801 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -524,20 +524,20 @@ static const struct file_operations fs3270_fops = { .llseek = no_llseek, }; -void fs3270_create_cb(int minor) +static void fs3270_create_cb(int minor) { __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), NULL, "3270/tub%d", minor); } -void fs3270_destroy_cb(int minor) +static void fs3270_destroy_cb(int minor) { device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); } -struct raw3270_notifier fs3270_notifier = +static struct raw3270_notifier fs3270_notifier = { .create = fs3270_create_cb, .destroy = fs3270_destroy_cb, @@ -564,6 +564,7 @@ static void __exit fs3270_exit(void) { raw3270_unregister_notifier(&fs3270_notifier); + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 4600aa10a1c..668b32b0dc1 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) struct appldata_product_id id; int rc; - strcpy(id.prod_nr, "LNXAPPL"); + strncpy(id.prod_nr, "LNXAPPL", 7); id.prod_fn = myhdr->applid; id.record_nr = myhdr->record_num; id.version_nr = myhdr->version; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 24a08e8f19e..220acb4cbee 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -276,6 +276,15 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view, } int +raw3270_view_active(struct raw3270_view *view) +{ + struct raw3270 *rp = view->dev; + + return rp && rp->view == view && + !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags); +} + +int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) { unsigned long flags; @@ -615,14 +624,15 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data) if (rp->state != RAW3270_STATE_RESET) return; - if (rq && rq->rc) { + if (rq->rc) { /* Reset command failed. */ rp->state = RAW3270_STATE_INIT; - } else if (0 && MACHINE_IS_VM) { + } else if (MACHINE_IS_VM) { raw3270_size_device_vm(rp); raw3270_size_device_done(rp); } else raw3270_writesf_readpart(rp); + memset(&rp->init_reset, 0, sizeof(rp->init_reset)); } static int @@ -630,9 +640,10 @@ __raw3270_reset_device(struct raw3270 *rp) { int rc; + /* Check if reset is already pending */ + if (rp->init_reset.view) + return -EBUSY; /* Store reset data stream to init_data/init_reset */ - memset(&rp->init_reset, 0, sizeof(rp->init_reset)); - memset(&rp->init_data, 0, sizeof(rp->init_data)); rp->init_data[0] = TW_KR; rp->init_reset.ccw.cmd_code = TC_EWRITEA; rp->init_reset.ccw.flags = CCW_FLAG_SLI; @@ -776,22 +787,37 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) } #ifdef CONFIG_TN3270_CONSOLE +/* Tentative definition - see below for actual definition. */ +static struct ccw_driver raw3270_ccw_driver; + /* * Setup 3270 device configured as console. */ -struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) +struct raw3270 __init *raw3270_setup_console(void) { + struct ccw_device *cdev; unsigned long flags; struct raw3270 *rp; char *ascebc; int rc; + cdev = ccw_device_create_console(&raw3270_ccw_driver); + if (IS_ERR(cdev)) + return ERR_CAST(cdev); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); ascebc = kzalloc(256, GFP_KERNEL); rc = raw3270_setup_device(cdev, rp, ascebc); if (rc) return ERR_PTR(rc); set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); + + rc = ccw_device_enable_console(cdev); + if (rc) { + ccw_device_destroy_console(cdev); + return ERR_PTR(rc); + } + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); do { __raw3270_reset_device(rp); @@ -826,7 +852,7 @@ raw3270_create_device(struct ccw_device *cdev) char *ascebc; int rc; - rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); if (!rp) return ERR_PTR(-ENOMEM); ascebc = kmalloc(256, GFP_KERNEL); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 7b73ff8c1bd..e1e41c2861f 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -173,6 +173,7 @@ int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *); int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *); int raw3270_reset(struct raw3270_view *); struct raw3270_view *raw3270_view(struct raw3270_view *); +int raw3270_view_active(struct raw3270_view *); /* Reference count inliner for view structures. */ static inline void @@ -190,7 +191,7 @@ raw3270_put_view(struct raw3270_view *view) wake_up(&raw3270_wait_queue); } -struct raw3270 *raw3270_setup_console(struct ccw_device *cdev); +struct raw3270 *raw3270_setup_console(void); void raw3270_wait_cons_dev(struct raw3270 *); /* Notifier for device addition/removal */ diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index bd6871bf545..c316051d9bd 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -50,16 +50,50 @@ static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); /* Suspend request */ static DECLARE_COMPLETION(sclp_request_queue_flushed); +/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ +int sclp_console_pages = SCLP_CONSOLE_PAGES; +/* Flag to indicate if buffer pages are dropped on buffer full condition */ +int sclp_console_drop = 0; +/* Number of times the console dropped buffer pages */ +unsigned long sclp_console_full; + static void sclp_suspend_req_cb(struct sclp_req *req, void *data) { complete(&sclp_request_queue_flushed); } +static int __init sclp_setup_console_pages(char *str) +{ + int pages, rc; + + rc = kstrtoint(str, 0, &pages); + if (!rc && pages >= SCLP_CONSOLE_PAGES) + sclp_console_pages = pages; + return 1; +} + +__setup("sclp_con_pages=", sclp_setup_console_pages); + +static int __init sclp_setup_console_drop(char *str) +{ + int drop, rc; + + rc = kstrtoint(str, 0, &drop); + if (!rc && drop) + sclp_console_drop = 1; + return 1; +} + +__setup("sclp_con_drop=", sclp_setup_console_drop); + static struct sclp_req sclp_suspend_req; /* Timer for request retries. */ static struct timer_list sclp_request_timer; +/* Timer for queued requests. */ +static struct timer_list sclp_queue_timer; + /* Internal state: is the driver initialized? */ static volatile enum sclp_init_state_t { sclp_init_state_uninitialized, @@ -117,14 +151,19 @@ static int sclp_init(void); int sclp_service_call(sclp_cmdw_t command, void *sccb) { - int cc; + int cc = 4; /* Initialize for program check handling */ asm volatile( - " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ - " ipm %0\n" - " srl %0,28" - : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) + "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) + EX_TABLE(1b, 2b) + : "+&d" (cc) : "d" (command), "a" (__pa(sccb)) : "cc", "memory"); + if (cc == 4) + return -EINVAL; if (cc == 3) return -EIO; if (cc == 2) @@ -179,6 +218,76 @@ sclp_request_timeout(unsigned long data) sclp_process_queue(); } +/* + * Returns the expire value in jiffies of the next pending request timeout, + * if any. Needs to be called with sclp_lock. + */ +static unsigned long __sclp_req_queue_find_next_timeout(void) +{ + unsigned long expires_next = 0; + struct sclp_req *req; + + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (!expires_next || + (time_before(req->queue_expires, expires_next))) + expires_next = req->queue_expires; + } + return expires_next; +} + +/* + * Returns expired request, if any, and removes it from the list. + */ +static struct sclp_req *__sclp_req_queue_remove_expired_req(void) +{ + unsigned long flags, now; + struct sclp_req *req; + + spin_lock_irqsave(&sclp_lock, flags); + now = jiffies; + /* Don't need list_for_each_safe because we break out after list_del */ + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (time_before_eq(req->queue_expires, now)) { + if (req->status == SCLP_REQ_QUEUED) { + req->status = SCLP_REQ_QUEUED_TIMEOUT; + list_del(&req->list); + goto out; + } + } + } + req = NULL; +out: + spin_unlock_irqrestore(&sclp_lock, flags); + return req; +} + +/* + * Timeout handler for queued requests. Removes request from list and + * invokes callback. This timer can be set per request in situations where + * waiting too long would be harmful to the system, e.g. during SE reboot. + */ +static void sclp_req_queue_timeout(unsigned long data) +{ + unsigned long flags, expires_next; + struct sclp_req *req; + + do { + req = __sclp_req_queue_remove_expired_req(); + if (req && req->callback) + req->callback(req, req->callback_data); + } while (req); + + spin_lock_irqsave(&sclp_lock, flags); + expires_next = __sclp_req_queue_find_next_timeout(); + if (expires_next) + mod_timer(&sclp_queue_timer, expires_next); + spin_unlock_irqrestore(&sclp_lock, flags); +} + /* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise. * Called while sclp_lock is locked. */ @@ -281,6 +390,13 @@ sclp_add_request(struct sclp_req *req) req->start_count = 0; list_add_tail(&req->list, &sclp_req_queue); rc = 0; + if (req->queue_timeout) { + req->queue_expires = jiffies + req->queue_timeout * HZ; + if (!timer_pending(&sclp_queue_timer) || + time_after(sclp_queue_timer.expires, req->queue_expires)) + mod_timer(&sclp_queue_timer, req->queue_expires); + } else + req->queue_expires = 0; /* Start if request is first in list */ if (sclp_running_state == sclp_running_state_idle && req->list.prev == &sclp_req_queue) { @@ -450,7 +566,7 @@ sclp_sync_wait(void) timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_tod_clock() + + timeout = get_tod_clock_fast() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -472,7 +588,7 @@ sclp_sync_wait(void) while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ if (timer_pending(&sclp_request_timer) && - get_tod_clock() > timeout && + get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); cpu_relax(); @@ -856,7 +972,7 @@ sclp_check_interface(void) spin_lock_irqsave(&sclp_lock, flags); /* Prepare init mask command */ - rc = register_external_interrupt(0x2401, sclp_check_handler); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); if (rc) { spin_unlock_irqrestore(&sclp_lock, flags); return rc; @@ -874,12 +990,12 @@ sclp_check_interface(void) spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal interruption - needs to happen * with IRQs enabled. */ - service_subclass_irq_register(); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); /* Wait for signal from interrupt or timeout */ sclp_sync_wait(); /* Disable service-signal interruption - needs to happen * with IRQs enabled. */ - service_subclass_irq_unregister(); + irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); spin_lock_irqsave(&sclp_lock, flags); del_timer(&sclp_request_timer); if (sclp_init_req.status == SCLP_REQ_DONE && @@ -889,7 +1005,7 @@ sclp_check_interface(void) } else rc = -EBUSY; } - unregister_external_interrupt(0x2401, sclp_check_handler); + unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -1013,11 +1129,47 @@ static const struct dev_pm_ops sclp_pm_ops = { .restore = sclp_restore, }; +static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%i\n", sclp_console_pages); +} + +static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL); + +static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%i\n", sclp_console_drop); +} + +static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL); + +static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%lu\n", sclp_console_full); +} + +static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL); + +static struct attribute *sclp_drv_attrs[] = { + &driver_attr_con_pages.attr, + &driver_attr_con_drop.attr, + &driver_attr_con_full.attr, + NULL, +}; +static struct attribute_group sclp_drv_attr_group = { + .attrs = sclp_drv_attrs, +}; +static const struct attribute_group *sclp_drv_attr_groups[] = { + &sclp_drv_attr_group, + NULL, +}; + static struct platform_driver sclp_pdrv = { .driver = { .name = "sclp", .owner = THIS_MODULE, .pm = &sclp_pm_ops, + .groups = sclp_drv_attr_groups, }, }; @@ -1041,6 +1193,8 @@ sclp_init(void) INIT_LIST_HEAD(&sclp_reg_list); list_add(&sclp_state_change_event.list, &sclp_reg_list); init_timer(&sclp_request_timer); + init_timer(&sclp_queue_timer); + sclp_queue_timer.function = sclp_req_queue_timeout; /* Check interface */ spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_check_interface(); @@ -1052,14 +1206,14 @@ sclp_init(void) if (rc) goto fail_init_state_uninitialized; /* Register interrupt handler */ - rc = register_external_interrupt(0x2401, sclp_interrupt_handler); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); if (rc) goto fail_unregister_reboot_notifier; sclp_init_state = sclp_init_state_initialized; spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal external interruption - needs to happen with * IRQs enabled. */ - service_subclass_irq_register(); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); sclp_init_mask(1); return 0; @@ -1096,10 +1250,12 @@ static __init int sclp_initcall(void) rc = platform_driver_register(&sclp_pdrv); if (rc) return rc; + sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); - rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + rc = PTR_ERR_OR_ZERO(sclp_pdev); if (rc) goto fail_platform_driver_unregister; + rc = atomic_notifier_chain_register(&panic_notifier_list, &sclp_on_panic_nb); if (rc) diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 25bcd4c0ed8..a68b5ec7d04 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -15,7 +15,7 @@ /* maximum number of pages concerning our own memory management */ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) -#define MAX_CONSOLE_PAGES 6 +#define SCLP_CONSOLE_PAGES 6 #define EVTYP_OPCMD 0x01 #define EVTYP_MSG 0x02 @@ -99,6 +99,7 @@ struct init_sccb { } __attribute__((packed)); extern u64 sclp_facilities; + #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) @@ -132,6 +133,11 @@ struct sclp_req { /* Callback that is called after reaching final status. */ void (*callback)(struct sclp_req *, void *data); void *callback_data; + int queue_timeout; /* request queue timeout (sec), set by + caller of sclp_add_request(), if + needed */ + /* Internal fields */ + unsigned long queue_expires; /* request queue timeout (jiffies) */ }; #define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */ @@ -139,6 +145,9 @@ struct sclp_req { #define SCLP_REQ_RUNNING 0x02 /* request is currently running */ #define SCLP_REQ_DONE 0x03 /* request is completed successfully */ #define SCLP_REQ_FAILED 0x05 /* request is finally failed */ +#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */ + +#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */ /* function pointers that a high level driver has to use for registration */ /* of some routines it wants to be called from the low level driver */ @@ -171,10 +180,19 @@ int sclp_remove_processed(struct sccb_header *sccb); int sclp_deactivate(void); int sclp_reactivate(void); int sclp_service_call(sclp_cmdw_t command, void *sccb); +int sclp_sync_request(sclp_cmdw_t command, void *sccb); +int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout); int sclp_sdias_init(void); void sclp_sdias_exit(void); +extern int sclp_console_pages; +extern int sclp_console_drop; +extern unsigned long sclp_console_full; +extern u8 sclp_fac84; +extern unsigned long long sclp_rzm; +extern unsigned long long sclp_rnmax; + /* useful inlines */ /* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index bf07c3a188d..6e14999f9e8 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -28,166 +28,6 @@ #include "sclp.h" -#define SCLP_CMDW_READ_SCP_INFO 0x00020001 -#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 - -struct read_info_sccb { - struct sccb_header header; /* 0-7 */ - u16 rnmax; /* 8-9 */ - u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ - u8 loadparm[8]; /* 24-31 */ - u8 _reserved1[48 - 32]; /* 32-47 */ - u64 facilities; /* 48-55 */ - u8 _reserved2[84 - 56]; /* 56-83 */ - u8 fac84; /* 84 */ - u8 fac85; /* 85 */ - u8 _reserved3[91 - 86]; /* 86-90 */ - u8 flags; /* 91 */ - u8 _reserved4[100 - 92]; /* 92-99 */ - u32 rnsize2; /* 100-103 */ - u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ -} __attribute__((packed, aligned(PAGE_SIZE))); - -static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE); -static struct read_info_sccb __initdata early_read_info_sccb; -static int __initdata early_read_info_sccb_valid; - -u64 sclp_facilities; -static u8 sclp_fac84; -static unsigned long long rzm; -static unsigned long long rnmax; - -static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) -{ - int rc; - - __ctl_set_bit(0, 9); - rc = sclp_service_call(cmd, sccb); - if (rc) - goto out; - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | - PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); - local_irq_disable(); -out: - /* Contents of the sccb might have changed. */ - barrier(); - __ctl_clear_bit(0, 9); - return rc; -} - -static void __init sclp_read_info_early(void) -{ - int rc; - int i; - struct read_info_sccb *sccb; - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, - SCLP_CMDW_READ_SCP_INFO}; - - sccb = &early_read_info_sccb; - for (i = 0; i < ARRAY_SIZE(commands); i++) { - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->header.function_code = 0x80; - sccb->header.control_mask[2] = 0x80; - rc = sclp_cmd_sync_early(commands[i], sccb); - } while (rc == -EBUSY); - - if (rc) - break; - if (sccb->header.response_code == 0x10) { - early_read_info_sccb_valid = 1; - break; - } - if (sccb->header.response_code != 0x1f0) - break; - } -} - -static void __init sclp_event_mask_early(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - int rc; - - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->mask_length = sizeof(sccb_mask_t); - rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); - } while (rc == -EBUSY); -} - -void __init sclp_facilities_detect(void) -{ - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!early_read_info_sccb_valid) - return; - - sccb = &early_read_info_sccb; - sclp_facilities = sccb->facilities; - sclp_fac84 = sccb->fac84; - if (sccb->fac85 & 0x02) - S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; - rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; - rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; - rzm <<= 20; - - sclp_event_mask_early(); -} - -bool __init sclp_has_linemode(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) - return 1; - return 0; -} - -bool __init sclp_has_vt220(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) - return 1; - return 0; -} - -unsigned long long sclp_get_rnmax(void) -{ - return rnmax; -} - -unsigned long long sclp_get_rzm(void) -{ - return rzm; -} - -/* - * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. - */ -void __init sclp_get_ipl_info(struct sclp_ipl_info *info) -{ - struct read_info_sccb *sccb; - - if (!early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); -} - static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -195,7 +35,12 @@ static void sclp_sync_callback(struct sclp_req *req, void *data) complete(completion); } -static int do_sync_request(sclp_cmdw_t cmd, void *sccb) +int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) +{ + return sclp_sync_request_timeout(cmd, sccb, 0); +} + +int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) { struct completion completion; struct sclp_req *request; @@ -204,6 +49,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; + if (timeout) + request->queue_timeout = timeout; request->command = cmd; request->sccb = sccb; request->status = SCLP_REQ_FILLED; @@ -270,7 +117,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); + rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb, + SCLP_QUEUE_INTERVAL); if (rc) goto out; if (sccb->header.response_code != 0x0010) { @@ -304,7 +152,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { @@ -354,14 +202,14 @@ struct assign_storage_sccb { int arch_get_memory_phys_device(unsigned long start_pfn) { - if (!rzm) + if (!sclp_rzm) return 0; - return PFN_PHYS(start_pfn) >> ilog2(rzm); + return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm); } static unsigned long long rn2addr(u16 rn) { - return (unsigned long long) (rn - 1) * rzm; + return (unsigned long long) (rn - 1) * sclp_rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) @@ -374,7 +222,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) return -ENOMEM; sccb->header.length = PAGE_SIZE; sccb->rn = rn; - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { @@ -402,7 +250,7 @@ static int sclp_assign_storage(u16 rn) if (rc) return rc; start = rn2addr(rn); - storage_key_init_range(start, start + rzm); + storage_key_init_range(start, start + sclp_rzm); return 0; } @@ -429,7 +277,8 @@ static int sclp_attach_storage(u8 id) if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; - rc = do_sync_request(0x00080001 | id << 8, sccb); + rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, + SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { @@ -460,7 +309,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size, istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; - if (start > istart + rzm - 1) + if (start > istart + sclp_rzm - 1) continue; if (online) rc |= sclp_assign_storage(incr->rn); @@ -524,7 +373,7 @@ static void __init add_memory_merged(u16 rn) if (!first_rn) goto skip_add; start = rn2addr(first_rn); - size = (unsigned long long ) num * rzm; + size = (unsigned long long) num * sclp_rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) @@ -572,7 +421,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) } if (!assigned) new_incr->rn = last_rn + 1; - if (new_incr->rn > rnmax) { + if (new_incr->rn > sclp_rnmax) { kfree(new_incr); return; } @@ -615,8 +464,6 @@ static int __init sclp_detect_standby_memory(void) if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; - if (!early_read_info_sccb_valid) - return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; rc = -ENOMEM; @@ -627,7 +474,7 @@ static int __init sclp_detect_standby_memory(void) for (id = 0; id <= sclp_max_storage_id; id++) { memset(sccb, 0, PAGE_SIZE); sccb->header.length = PAGE_SIZE; - rc = do_sync_request(0x00040001 | id << 8, sccb); + rc = sclp_sync_request(0x00040001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { @@ -659,7 +506,7 @@ static int __init sclp_detect_standby_memory(void) } if (rc || list_empty(&sclp_mem_list)) goto out; - for (i = 1; i <= rnmax - assigned; i++) + for (i = 1; i <= sclp_rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) @@ -668,7 +515,7 @@ static int __init sclp_detect_standby_memory(void) if (rc) goto out; sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); - rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + rc = PTR_ERR_OR_ZERO(sclp_pdev); if (rc) goto out_driver; sclp_add_standby_memory(); @@ -714,7 +561,7 @@ static int do_pci_configure(sclp_cmdw_t cmd, u32 fid) sccb->header.length = PAGE_SIZE; sccb->atype = SCLP_RECONFIG_PCI_ATPYE; sccb->aid = fid; - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { @@ -771,7 +618,7 @@ static int do_chp_configure(sclp_cmdw_t cmd) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { @@ -846,7 +693,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); + rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { @@ -862,3 +709,8 @@ out: free_page((unsigned long) sccb); return rc; } + +bool sclp_has_sprp(void) +{ + return !!(sclp_fac84 & 0x2); +} diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index ecf45c54f8c..5880def98fc 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -130,6 +130,31 @@ sclp_console_timeout(unsigned long data) } /* + * Drop oldest console buffer if sclp_con_drop is set + */ +static int +sclp_console_drop_buffer(void) +{ + struct list_head *list; + struct sclp_buffer *buffer; + void *page; + + if (!sclp_console_drop) + return 0; + list = sclp_con_outqueue.next; + if (sclp_con_queue_running) + /* The first element is in I/O */ + list = list->next; + if (list == &sclp_con_outqueue) + return 0; + list_del(list); + buffer = list_entry(list, struct sclp_buffer, list); + page = sclp_unmake_buffer(buffer); + list_add_tail((struct list_head *) page, &sclp_con_pages); + return 1; +} + +/* * Writes the given message to S390 system console */ static void @@ -150,9 +175,13 @@ sclp_console_write(struct console *console, const char *message, do { /* make sure we have a console output buffer */ if (sclp_conbuf == NULL) { + if (list_empty(&sclp_con_pages)) + sclp_console_full++; while (list_empty(&sclp_con_pages)) { if (sclp_con_suspended) goto out; + if (sclp_console_drop_buffer()) + break; spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_con_lock, flags); @@ -297,7 +326,7 @@ sclp_console_init(void) return rc; /* Allocate pages for output buffering */ INIT_LIST_HEAD(&sclp_con_pages); - for (i = 0; i < MAX_CONSOLE_PAGES; i++) { + for (i = 0; i < sclp_console_pages; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); list_add_tail(page, &sclp_con_pages); } diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 444d36183a2..94415620747 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -32,7 +32,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) struct device *dev; s390_adjust_jiffies(); - pr_warning("cpu capability changed.\n"); + pr_info("CPU capability may have changed\n"); get_online_cpus(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c new file mode 100644 index 00000000000..648cb86afd4 --- /dev/null +++ b/drivers/s390/char/sclp_ctl.c @@ -0,0 +1,144 @@ +/* + * IOCTL interface for SCLP + * + * Copyright IBM Corp. 2012 + * + * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com> + */ + +#include <linux/compat.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/ioctl.h> +#include <linux/fs.h> +#include <asm/compat.h> +#include <asm/sclp_ctl.h> +#include <asm/sclp.h> + +#include "sclp.h" + +/* + * Supported command words + */ +static unsigned int sclp_ctl_sccb_wlist[] = { + 0x00400002, + 0x00410002, +}; + +/* + * Check if command word is supported + */ +static int sclp_ctl_cmdw_supported(unsigned int cmdw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) { + if (cmdw == sclp_ctl_sccb_wlist[i]) + return 1; + } + return 0; +} + +static void __user *u64_to_uptr(u64 value) +{ + if (is_compat_task()) + return compat_ptr(value); + else + return (void __user *)(unsigned long)value; +} + +/* + * Start SCLP request + */ +static int sclp_ctl_ioctl_sccb(void __user *user_area) +{ + struct sclp_ctl_sccb ctl_sccb; + struct sccb_header *sccb; + int rc; + + if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) + return -EFAULT; + if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw)) + return -EOPNOTSUPP; + sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { + rc = -EFAULT; + goto out_free; + } + if (sccb->length > PAGE_SIZE || sccb->length < 8) + return -EINVAL; + if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) { + rc = -EFAULT; + goto out_free; + } + rc = sclp_sync_request(ctl_sccb.cmdw, sccb); + if (rc) + goto out_free; + if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length)) + rc = -EFAULT; +out_free: + free_page((unsigned long) sccb); + return rc; +} + +/* + * SCLP SCCB ioctl function + */ +static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + void __user *argp; + + if (is_compat_task()) + argp = compat_ptr(arg); + else + argp = (void __user *) arg; + switch (cmd) { + case SCLP_CTL_SCCB: + return sclp_ctl_ioctl_sccb(argp); + default: /* unknown ioctl number */ + return -ENOTTY; + } +} + +/* + * File operations + */ +static const struct file_operations sclp_ctl_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .unlocked_ioctl = sclp_ctl_ioctl, + .compat_ioctl = sclp_ctl_ioctl, + .llseek = no_llseek, +}; + +/* + * Misc device definition + */ +static struct miscdevice sclp_ctl_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sclp", + .fops = &sclp_ctl_fops, +}; + +/* + * Register sclp_ctl misc device + */ +static int __init sclp_ctl_init(void) +{ + return misc_register(&sclp_ctl_device); +} +module_init(sclp_ctl_init); + +/* + * Deregister sclp_ctl misc device + */ +static void __exit sclp_ctl_exit(void) +{ + misc_deregister(&sclp_ctl_device); +} +module_exit(sclp_ctl_exit); diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c new file mode 100644 index 00000000000..1918d9dff45 --- /dev/null +++ b/drivers/s390/char/sclp_early.c @@ -0,0 +1,315 @@ +/* + * SCLP early driver + * + * Copyright IBM Corp. 2013 + */ + +#define KMSG_COMPONENT "sclp_early" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/ctl_reg.h> +#include <asm/sclp.h> +#include <asm/ipl.h> +#include "sclp_sdias.h" +#include "sclp.h" + +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 + +struct read_info_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[16 - 11]; /* 11-15 */ + u16 ncpurl; /* 16-17 */ + u16 cpuoff; /* 18-19 */ + u8 _reserved7[24 - 20]; /* 20-23 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2a[76 - 56]; /* 56-75 */ + u32 ibc; /* 76-79 */ + u8 _reserved2b[84 - 80]; /* 80-83 */ + u8 fac84; /* 84 */ + u8 fac85; /* 85 */ + u8 _reserved3[91 - 86]; /* 86-90 */ + u8 flags; /* 91 */ + u8 _reserved4[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved5[120 - 112]; /* 112-119 */ + u16 hcpua; /* 120-121 */ + u8 _reserved6[4096 - 122]; /* 122-4095 */ +} __packed __aligned(PAGE_SIZE); + +static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; +static unsigned int sclp_con_has_vt220 __initdata; +static unsigned int sclp_con_has_linemode __initdata; +static unsigned long sclp_hsa_size; +static unsigned int sclp_max_cpu; +static struct sclp_ipl_info sclp_ipl_info; +static unsigned char sclp_siif; +static u32 sclp_ibc; + +u64 sclp_facilities; +u8 sclp_fac84; +unsigned long long sclp_rzm; +unsigned long long sclp_rnmax; + +static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + __ctl_set_bit(0, 9); + rc = sclp_service_call(cmd, sccb); + if (rc) + goto out; + __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | + PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); + local_irq_disable(); +out: + /* Contents of the sccb might have changed. */ + barrier(); + __ctl_clear_bit(0, 9); + return rc; +} + +static int __init sclp_read_info_early(struct read_info_sccb *sccb) +{ + int rc, i; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.function_code = 0x80; + sccb->header.control_mask[2] = 0x80; + rc = sclp_cmd_sync_early(commands[i], sccb); + } while (rc == -EBUSY); + + if (rc) + break; + if (sccb->header.response_code == 0x10) + return 0; + if (sccb->header.response_code != 0x1f0) + break; + } + return -EIO; +} + +static void __init sclp_facilities_detect(struct read_info_sccb *sccb) +{ + struct sclp_cpu_entry *cpue; + u16 boot_cpu_address, cpu; + + if (sclp_read_info_early(sccb)) + return; + + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; + sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + sclp_rzm <<= 20; + sclp_ibc = sccb->ibc; + + if (!sccb->hcpua) { + if (MACHINE_IS_VM) + sclp_max_cpu = 64; + else + sclp_max_cpu = sccb->ncpurl; + } else { + sclp_max_cpu = sccb->hcpua + 1; + } + + boot_cpu_address = stap(); + cpue = (void *)sccb + sccb->cpuoff; + for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { + if (boot_cpu_address != cpue->address) + continue; + sclp_siif = cpue->siif; + break; + } + + /* Save IPL information */ + sclp_ipl_info.is_valid = 1; + if (sccb->flags & 0x2) + sclp_ipl_info.has_dump = 1; + memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); +} + +bool __init sclp_has_linemode(void) +{ + return !!sclp_con_has_linemode; +} + +bool __init sclp_has_vt220(void) +{ + return !!sclp_con_has_vt220; +} + +unsigned long long sclp_get_rnmax(void) +{ + return sclp_rnmax; +} + +unsigned long long sclp_get_rzm(void) +{ + return sclp_rzm; +} + +unsigned int sclp_get_max_cpu(void) +{ + return sclp_max_cpu; +} + +int sclp_has_siif(void) +{ + return sclp_siif; +} +EXPORT_SYMBOL(sclp_has_siif); + +unsigned int sclp_get_ibc(void) +{ + return sclp_ibc; +} +EXPORT_SYMBOL(sclp_get_ibc); + +/* + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. The sclp_facilities_detect() function retrieves + * and saves the IPL information. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + *info = sclp_ipl_info; +} + +static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + do { + rc = sclp_cmd_sync_early(cmd, sccb); + } while (rc == -EBUSY); + + if (rc) + return -EIO; + if (((struct sccb_header *) sccb)->response_code != 0x0020) + return -EIO; + return 0; +} + +static void __init sccb_init_eq_size(struct sdias_sccb *sccb) +{ + memset(sccb, 0, sizeof(*sccb)); + + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; +} + +static int __init sclp_set_event_mask(struct init_sccb *sccb, + unsigned long receive_mask, + unsigned long send_mask) +{ + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + sccb->receive_mask = receive_mask; + sccb->send_mask = send_mask; + return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); +} + +static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) +{ + sccb_init_eq_size(sccb); + if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) + return -EIO; + if (sccb->evbuf.blk_cnt == 0) + return 0; + return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +static long __init sclp_hsa_copy_wait(struct sccb_header *sccb) +{ + memset(sccb, 0, PAGE_SIZE); + sccb->length = PAGE_SIZE; + if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) + return -EIO; + if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0) + return 0; + return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +unsigned long sclp_get_hsa_size(void) +{ + return sclp_hsa_size; +} + +static void __init sclp_hsa_size_detect(void *sccb) +{ + long size; + + /* First try synchronous interface (LPAR) */ + if (sclp_set_event_mask(sccb, 0, 0x40000010)) + return; + size = sclp_hsa_size_init(sccb); + if (size < 0) + return; + if (size != 0) + goto out; + /* Then try asynchronous interface (z/VM) */ + if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010)) + return; + size = sclp_hsa_size_init(sccb); + if (size < 0) + return; + size = sclp_hsa_copy_wait(sccb); + if (size < 0) + return; +out: + sclp_hsa_size = size; +} + +static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) +{ + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +static void __init sclp_console_detect(struct init_sccb *sccb) +{ + if (sccb->header.response_code != 0x20) + return; + + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + sclp_con_has_vt220 = 1; + + if (sclp_con_check_linemode(sccb)) + sclp_con_has_linemode = 1; +} + +void __init sclp_early_detect(void) +{ + void *sccb = &sccb_early; + + sclp_facilities_detect(sccb); + sclp_hsa_size_detect(sccb); + + /* Turn off SCLP event notifications. Also save remote masks in the + * sccb. These are sufficient to detect sclp console capabilities. + */ + sclp_set_event_mask(sccb, 0, 0); + sclp_console_detect(sccb); +} diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index b1032931a1c..561a0414b35 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -1,7 +1,7 @@ /* - * Sclp "store data in absolut storage" + * SCLP "store data in absolute storage" * - * Copyright IBM Corp. 2003, 2007 + * Copyright IBM Corp. 2003, 2013 * Author(s): Michael Holzheu */ @@ -14,6 +14,7 @@ #include <asm/debug.h> #include <asm/ipl.h> +#include "sclp_sdias.h" #include "sclp.h" #include "sclp_rw.h" @@ -22,46 +23,12 @@ #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 -#define EQ_STORE_DATA 0x0 -#define EQ_SIZE 0x1 -#define DI_FCP_DUMP 0x0 -#define ASA_SIZE_32 0x0 -#define ASA_SIZE_64 0x1 -#define EVSTATE_ALL_STORED 0x0 -#define EVSTATE_NO_DATA 0x3 -#define EVSTATE_PART_STORED 0x10 - static struct debug_info *sdias_dbf; static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -struct sdias_evbuf { - struct evbuf_header hdr; - u8 event_qual; - u8 data_id; - u64 reserved2; - u32 event_id; - u16 reserved3; - u8 asa_size; - u8 event_status; - u32 reserved4; - u32 blk_cnt; - u64 asa; - u32 reserved5; - u32 fbn; - u32 reserved6; - u32 lbn; - u16 reserved7; - u16 dbs; -} __attribute__((packed)); - -struct sdias_sccb { - struct sccb_header hdr; - struct sdias_evbuf evbuf; -} __attribute__((packed)); - static struct sdias_sccb sccb __attribute__((aligned(4096))); static struct sdias_evbuf sdias_evbuf; @@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void) sccb.hdr.length = sizeof(sccb); sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = EQ_SIZE; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_SIZE; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; sccb.evbuf.dbs = 1; @@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = EQ_STORE_DATA; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; #ifdef CONFIG_64BIT - sccb.evbuf.asa_size = ASA_SIZE_64; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; #else - sccb.evbuf.asa_size = ASA_SIZE_32; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; #endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; @@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) } switch (sdias_evbuf.event_status) { - case EVSTATE_ALL_STORED: - TRACE("all stored\n"); - break; - case EVSTATE_PART_STORED: - TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); - break; - case EVSTATE_NO_DATA: - TRACE("no data\n"); - /* fall through */ - default: - pr_err("Error from SCLP while copying hsa. " - "Event status = %x\n", - sdias_evbuf.event_status); - rc = -EIO; + case SDIAS_EVSTATE_ALL_STORED: + TRACE("all stored\n"); + break; + case SDIAS_EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); + break; + case SDIAS_EVSTATE_NO_DATA: + TRACE("no data\n"); + /* fall through */ + default: + pr_err("Error from SCLP while copying hsa. Event status = %x\n", + sdias_evbuf.event_status); + rc = -EIO; } out: mutex_unlock(&sdias_mutex); diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h new file mode 100644 index 00000000000..f2431c41415 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.h @@ -0,0 +1,46 @@ +/* + * SCLP "store data in absolute storage" + * + * Copyright IBM Corp. 2003, 2013 + */ + +#ifndef SCLP_SDIAS_H +#define SCLP_SDIAS_H + +#include "sclp.h" + +#define SDIAS_EQ_STORE_DATA 0x0 +#define SDIAS_EQ_SIZE 0x1 +#define SDIAS_DI_FCP_DUMP 0x0 +#define SDIAS_ASA_SIZE_32 0x0 +#define SDIAS_ASA_SIZE_64 0x1 +#define SDIAS_EVSTATE_ALL_STORED 0x0 +#define SDIAS_EVSTATE_NO_DATA 0x3 +#define SDIAS_EVSTATE_PART_STORED 0x10 + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __packed; + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __packed; + +#endif /* SCLP_SDIAS_H */ diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 5aaaa2ec8df..b9a9f721716 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -97,13 +97,16 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg, static int __sclp_vt220_emit(struct sclp_vt220_request *request); static void sclp_vt220_emit_current(void); -/* Registration structure for our interest in SCLP event buffers */ +/* Registration structure for SCLP output event buffers */ static struct sclp_register sclp_vt220_register = { .send_mask = EVTYP_VT220MSG_MASK, + .pm_event_fn = sclp_vt220_pm_event_fn, +}; + +/* Registration structure for SCLP input event buffers */ +static struct sclp_register sclp_vt220_register_input = { .receive_mask = EVTYP_VT220MSG_MASK, - .state_change_fn = NULL, .receiver_fn = sclp_vt220_receiver_fn, - .pm_event_fn = sclp_vt220_pm_event_fn, }; @@ -362,6 +365,31 @@ sclp_vt220_timeout(unsigned long data) #define BUFFER_MAX_DELAY HZ/20 +/* + * Drop oldest console buffer if sclp_con_drop is set + */ +static int +sclp_vt220_drop_buffer(void) +{ + struct list_head *list; + struct sclp_vt220_request *request; + void *page; + + if (!sclp_console_drop) + return 0; + list = sclp_vt220_outqueue.next; + if (sclp_vt220_queue_running) + /* The first element is in I/O */ + list = list->next; + if (list == &sclp_vt220_outqueue) + return 0; + list_del(list); + request = list_entry(list, struct sclp_vt220_request, list); + page = request->sclp_req.sccb; + list_add_tail((struct list_head *) page, &sclp_vt220_empty); + return 1; +} + /* * Internal implementation of the write function. Write COUNT bytes of data * from memory at BUF @@ -390,12 +418,16 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, do { /* Create an sclp output buffer if none exists yet */ if (sclp_vt220_current_request == NULL) { + if (list_empty(&sclp_vt220_empty)) + sclp_console_full++; while (list_empty(&sclp_vt220_empty)) { - spin_unlock_irqrestore(&sclp_vt220_lock, flags); if (may_fail || sclp_vt220_suspended) goto out; - else - sclp_sync_wait(); + if (sclp_vt220_drop_buffer()) + break; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + + sclp_sync_wait(); spin_lock_irqsave(&sclp_vt220_lock, flags); } page = (void *) sclp_vt220_empty.next; @@ -428,8 +460,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY; add_timer(&sclp_vt220_timer); } - spin_unlock_irqrestore(&sclp_vt220_lock, flags); out: + spin_unlock_irqrestore(&sclp_vt220_lock, flags); return overall_written; } @@ -686,9 +718,14 @@ static int __init sclp_vt220_tty_init(void) rc = tty_register_driver(driver); if (rc) goto out_init; + rc = sclp_register(&sclp_vt220_register_input); + if (rc) + goto out_reg; sclp_vt220_driver = driver; return 0; +out_reg: + tty_unregister_driver(driver); out_init: __sclp_vt220_cleanup(); out_driver: @@ -801,9 +838,7 @@ sclp_vt220_con_init(void) { int rc; - if (!CONSOLE_IS_SCLP) - return 0; - rc = __sclp_vt220_init(MAX_CONSOLE_PAGES); + rc = __sclp_vt220_init(sclp_console_pages); if (rc) return rc; /* Attach linux console */ diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index 54b3c79203f..91c3c642c76 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev( tcd->class_device = device_create(tape_class, device, tcd->char_device->dev, NULL, "%s", tcd->device_name); - rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; + rc = PTR_RET(tcd->class_device); if (rc) goto fail_with_cdev; rc = sysfs_create_link( diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 981a99fd8d4..3478e19ae19 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -78,7 +78,8 @@ tape_std_assign(struct tape_device *device) rc = tape_do_io_interruptible(device, request); - del_timer(&timeout); + del_timer_sync(&timeout); + destroy_timer_on_stack(&timeout); if (rc != 0) { DBF_EVENT(3, "%08x: assign failed - device might be busy\n", diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index cee69dac3e1..e91b89dc6d1 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work); */ static void tty3270_set_timer(struct tty3270 *tp, int expires) { - if (expires == 0) - del_timer(&tp->timer); - else - mod_timer(&tp->timer, jiffies + expires); + mod_timer(&tp->timer, jiffies + expires); } /* @@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp) { int pages; - del_timer_sync(&tp->timer); kbd_free(tp->kbd); raw3270_request_free(tp->kreset); raw3270_request_free(tp->read); @@ -810,7 +806,7 @@ static void tty3270_resize_work(struct work_struct *work) struct winsize ws; screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); - if (!screen) + if (IS_ERR(screen)) return; /* Switch to new output size */ spin_lock_bh(&tp->view.lock); @@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view) { struct tty3270 *tp = container_of(view, struct tty3270, view); + del_timer_sync(&tp->timer); tty3270_free_screen(tp->screen, tp->view.rows); tty3270_free_view(tp); } @@ -942,7 +939,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return rc; } - tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); + tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols); if (IS_ERR(tp->screen)) { rc = PTR_ERR(tp->screen); raw3270_put_view(&tp->view); @@ -1845,17 +1842,17 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; -void tty3270_create_cb(int minor) +static void tty3270_create_cb(int minor) { tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); } -void tty3270_destroy_cb(int minor) +static void tty3270_destroy_cb(int minor) { tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); } -struct raw3270_notifier tty3270_notifier = +static struct raw3270_notifier tty3270_notifier = { .create = tty3270_create_cb, .destroy = tty3270_destroy_cb, diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9b3a24e8d3a..a8848db7b09 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) int ret; dev_num = iminor(inode); - if (dev_num > MAXMINOR) + if (dev_num >= MAXMINOR) return -ENODEV; logptr = &sys_ser[dev_num]; @@ -761,7 +761,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (dev) { - dev_set_name(dev, priv->internal_name); + dev_set_name(dev, "%s", priv->internal_name); dev->bus = &iucv_bus; dev->parent = iucv_root; dev->driver = &vmlogrdr_driver; diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index c180e3135b3..0efb27f6f19 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -89,7 +89,7 @@ static DEFINE_MUTEX(vmur_mutex); * urd references: * - ur_probe gets a urd reference, ur_remove drops the reference * dev_get_drvdata(&cdev->dev) - * - ur_open gets a urd reference, ur_relase drops the reference + * - ur_open gets a urd reference, ur_release drops the reference * (urf->urd) * * cdev references: @@ -922,8 +922,8 @@ static int ur_set_online(struct ccw_device *cdev) goto fail_free_cdev; } - urd->device = device_create(vmur_class, NULL, urd->char_device->dev, - NULL, "%s", node_id); + urd->device = device_create(vmur_class, &cdev->dev, + urd->char_device->dev, NULL, "%s", node_id); if (IS_ERR(urd->device)) { rc = PTR_ERR(urd->device); TRACE("ur_set_online: device_create rc=%d\n", rc); diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c deleted file mode 100644 index e9b72311e25..00000000000 --- a/drivers/s390/char/vmwatchdog.c +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Watchdog implementation based on z/VM Watchdog Timer API - * - * Copyright IBM Corp. 2004, 2009 - * - * The user space watchdog daemon can use this driver as - * /dev/vmwatchdog to have z/VM execute the specified CP - * command when the timeout expires. The default command is - * "IPL", which which cause an immediate reboot. - */ -#define KMSG_COMPONENT "vmwatchdog" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/init.h> -#include <linux/fs.h> -#include <linux/kernel.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/slab.h> -#include <linux/suspend.h> -#include <linux/watchdog.h> - -#include <asm/ebcdic.h> -#include <asm/io.h> -#include <asm/uaccess.h> - -#define MAX_CMDLEN 240 -#define MIN_INTERVAL 15 -static char vmwdt_cmd[MAX_CMDLEN] = "IPL"; -static bool vmwdt_conceal; - -static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT; - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); -MODULE_DESCRIPTION("z/VM Watchdog Timer"); -module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644); -MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers"); -module_param_named(conceal, vmwdt_conceal, bool, 0644); -MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog " - " is active"); -module_param_named(nowayout, vmwdt_nowayout, bool, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started" - " (default=CONFIG_WATCHDOG_NOWAYOUT)"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - -static unsigned int vmwdt_interval = 60; -static unsigned long vmwdt_is_open; -static int vmwdt_expect_close; - -static DEFINE_MUTEX(vmwdt_mutex); - -#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ -#define VMWDT_RUNNING 1 /* The watchdog is armed */ - -enum vmwdt_func { - /* function codes */ - wdt_init = 0, - wdt_change = 1, - wdt_cancel = 2, - /* flags */ - wdt_conceal = 0x80000000, -}; - -static int __diag288(enum vmwdt_func func, unsigned int timeout, - char *cmd, size_t len) -{ - register unsigned long __func asm("2") = func; - register unsigned long __timeout asm("3") = timeout; - register unsigned long __cmdp asm("4") = virt_to_phys(cmd); - register unsigned long __cmdl asm("5") = len; - int err; - - err = -EINVAL; - asm volatile( - " diag %1,%3,0x288\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "+d" (err) : "d"(__func), "d"(__timeout), - "d"(__cmdp), "d"(__cmdl) : "1", "cc"); - return err; -} - -static int vmwdt_keepalive(void) -{ - /* we allocate new memory every time to avoid having - * to track the state. static allocation is not an - * option since that might not be contiguous in real - * storage in case of a modular build */ - static char *ebc_cmd; - size_t len; - int ret; - unsigned int func; - - ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL); - if (!ebc_cmd) - return -ENOMEM; - - len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN); - ASCEBC(ebc_cmd, MAX_CMDLEN); - EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); - - func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; - set_bit(VMWDT_RUNNING, &vmwdt_is_open); - ret = __diag288(func, vmwdt_interval, ebc_cmd, len); - WARN_ON(ret != 0); - kfree(ebc_cmd); - return ret; -} - -static int vmwdt_disable(void) -{ - int ret = __diag288(wdt_cancel, 0, "", 0); - WARN_ON(ret != 0); - clear_bit(VMWDT_RUNNING, &vmwdt_is_open); - return ret; -} - -static int __init vmwdt_probe(void) -{ - /* there is no real way to see if the watchdog is supported, - * so we try initializing it with a NOP command ("BEGIN") - * that won't cause any harm even if the following disable - * fails for some reason */ - static char __initdata ebc_begin[] = { - 194, 197, 199, 201, 213 - }; - if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) - return -EINVAL; - return vmwdt_disable(); -} - -static int vmwdt_open(struct inode *i, struct file *f) -{ - int ret; - if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) - return -EBUSY; - ret = vmwdt_keepalive(); - if (ret) - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return ret ? ret : nonseekable_open(i, f); -} - -static int vmwdt_close(struct inode *i, struct file *f) -{ - if (vmwdt_expect_close == 42) - vmwdt_disable(); - vmwdt_expect_close = 0; - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return 0; -} - -static struct watchdog_info vmwdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, - .firmware_version = 0, - .identity = "z/VM Watchdog Timer", -}; - -static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user((void __user *)arg, &vmwdt_info, - sizeof(vmwdt_info))) - return -EFAULT; - return 0; - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, (int __user *)arg); - case WDIOC_GETTEMP: - return -EINVAL; - case WDIOC_SETOPTIONS: - { - int options, ret; - if (get_user(options, (int __user *)arg)) - return -EFAULT; - ret = -EINVAL; - if (options & WDIOS_DISABLECARD) { - ret = vmwdt_disable(); - if (ret) - return ret; - } - if (options & WDIOS_ENABLECARD) { - ret = vmwdt_keepalive(); - } - return ret; - } - case WDIOC_GETTIMEOUT: - return put_user(vmwdt_interval, (int __user *)arg); - case WDIOC_SETTIMEOUT: - { - int interval; - if (get_user(interval, (int __user *)arg)) - return -EFAULT; - if (interval < MIN_INTERVAL) - return -EINVAL; - vmwdt_interval = interval; - } - return vmwdt_keepalive(); - case WDIOC_KEEPALIVE: - return vmwdt_keepalive(); - } - return -EINVAL; -} - -static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg) -{ - int rc; - - mutex_lock(&vmwdt_mutex); - rc = __vmwdt_ioctl(cmd, arg); - mutex_unlock(&vmwdt_mutex); - return (long) rc; -} - -static ssize_t vmwdt_write(struct file *f, const char __user *buf, - size_t count, loff_t *ppos) -{ - if(count) { - if (!vmwdt_nowayout) { - size_t i; - - /* note: just in case someone wrote the magic character - * five months ago... */ - vmwdt_expect_close = 0; - - for (i = 0; i != count; i++) { - char c; - if (get_user(c, buf+i)) - return -EFAULT; - if (c == 'V') - vmwdt_expect_close = 42; - } - } - /* someone wrote to us, we should restart timer */ - vmwdt_keepalive(); - } - return count; -} - -static int vmwdt_resume(void) -{ - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return NOTIFY_DONE; -} - -/* - * It makes no sense to go into suspend while the watchdog is running. - * Depending on the memory size, the watchdog might trigger, while we - * are still saving the memory. - * We reuse the open flag to ensure that suspend and watchdog open are - * exclusive operations - */ -static int vmwdt_suspend(void) -{ - if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { - pr_err("The system cannot be suspended while the watchdog" - " is in use\n"); - return notifier_from_errno(-EBUSY); - } - if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - pr_err("The system cannot be suspended while the watchdog" - " is running\n"); - return notifier_from_errno(-EBUSY); - } - return NOTIFY_DONE; -} - -/* - * This function is called for suspend and resume. - */ -static int vmwdt_power_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - switch (event) { - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: - return vmwdt_resume(); - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - return vmwdt_suspend(); - default: - return NOTIFY_DONE; - } -} - -static struct notifier_block vmwdt_power_notifier = { - .notifier_call = vmwdt_power_event, -}; - -static const struct file_operations vmwdt_fops = { - .open = &vmwdt_open, - .release = &vmwdt_close, - .unlocked_ioctl = &vmwdt_ioctl, - .write = &vmwdt_write, - .owner = THIS_MODULE, - .llseek = noop_llseek, -}; - -static struct miscdevice vmwdt_dev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &vmwdt_fops, -}; - -static int __init vmwdt_init(void) -{ - int ret; - - ret = vmwdt_probe(); - if (ret) - return ret; - ret = register_pm_notifier(&vmwdt_power_notifier); - if (ret) - return ret; - /* - * misc_register() has to be the last action in module_init(), because - * file operations will be available right after this. - */ - ret = misc_register(&vmwdt_dev); - if (ret) { - unregister_pm_notifier(&vmwdt_power_notifier); - return ret; - } - return 0; -} -module_init(vmwdt_init); - -static void __exit vmwdt_exit(void) -{ - unregister_pm_notifier(&vmwdt_power_notifier); - misc_deregister(&vmwdt_dev); -} -module_exit(vmwdt_exit); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 9e5e14686e7..1884653e447 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -17,6 +17,8 @@ #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <linux/module.h> +#include <linux/memblock.h> + #include <asm/asm-offsets.h> #include <asm/ipl.h> #include <asm/sclp.h> @@ -30,8 +32,8 @@ #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) -#define TO_USER 0 -#define TO_KERNEL 1 +#define TO_USER 1 +#define TO_KERNEL 0 #define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */ enum arch_id { @@ -73,7 +75,7 @@ static struct ipl_parameter_block *ipl_block; * @count: Size of buffer, which should be copied * @mode: Either TO_KERNEL or TO_USER */ -static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) +int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) { int offs, blk_num; static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); @@ -151,7 +153,7 @@ static int __init init_cpu_info(enum arch_id arch) /* get info for boot cpu from lowcore, stored in the HSA */ - sa = kmalloc(sizeof(*sa), GFP_KERNEL); + sa = dump_save_area_create(0); if (!sa) return -ENOMEM; if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { @@ -159,7 +161,6 @@ static int __init init_cpu_info(enum arch_id arch) kfree(sa); return -EIO; } - zfcpdump_save_areas[0] = sa; return 0; } @@ -246,24 +247,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len) static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) { unsigned long end; - int i = 0; + int i; if (count == 0) return 0; end = start + count; - while (zfcpdump_save_areas[i]) { + for (i = 0; i < dump_save_areas.count; i++) { unsigned long cp_start, cp_end; /* copy range */ unsigned long sa_start, sa_end; /* save area range */ unsigned long prefix; unsigned long sa_off, len, buf_off; + struct save_area *save_area = dump_save_areas.areas[i]; - prefix = zfcpdump_save_areas[i]->pref_reg; + prefix = save_area->pref_reg; sa_start = prefix + sys_info.sa_base; sa_end = prefix + sys_info.sa_base + sys_info.sa_size; if ((end < sa_start) || (start > sa_end)) - goto next; + continue; cp_start = max(start, sa_start); cp_end = min(end, sa_end); @@ -272,10 +274,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) len = cp_end - cp_start; TRACE("copy_lc for: %lx\n", start); - if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) + if (copy_lc(buf + buf_off, save_area, sa_off, len)) return -EFAULT; -next: - i++; } return 0; } @@ -330,9 +330,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, mem_offs = 0; /* Copy from HSA data */ - if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { - size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE - - mem_start)); + if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { + size = min((count - hdr_count), + (size_t) (sclp_get_hsa_size() - mem_start)); rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); if (rc) goto fail; @@ -413,33 +413,24 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, filp->private_data, - MEMORY_CHUNKS * CHUNK_INFO_SIZE); + memblock.memory.cnt * CHUNK_INFO_SIZE); } static int zcore_memmap_open(struct inode *inode, struct file *filp) { - int i; + struct memblock_region *reg; char *buf; - struct mem_chunk *chunk_array; + int i = 0; - chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - detect_memory_layout(chunk_array, 0); - buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); + buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL); if (!buf) { - kfree(chunk_array); return -ENOMEM; } - for (i = 0; i < MEMORY_CHUNKS; i++) { - sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", - (unsigned long long) chunk_array[i].addr, - (unsigned long long) chunk_array[i].size); - if (chunk_array[i].size == 0) - break; + for_each_memblock(memory, reg) { + sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ", + (unsigned long long) reg->base, + (unsigned long long) reg->size); } - kfree(chunk_array); filp->private_data = buf; return nonseekable_open(inode, filp); } @@ -492,7 +483,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, static char str[18]; if (hsa_available) - snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); + snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); else snprintf(str, sizeof(str), "0\n"); return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); @@ -586,38 +577,21 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) static int __init check_sdias(void) { - int rc, act_hsa_size; - - rc = sclp_sdias_blk_count(); - if (rc < 0) { + if (!sclp_get_hsa_size()) { TRACE("Could not determine HSA size\n"); - return rc; - } - act_hsa_size = (rc - 1) * PAGE_SIZE; - if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - TRACE("HSA size too small: %i\n", act_hsa_size); - return -EINVAL; + return -ENODEV; } return 0; } static int __init get_mem_info(unsigned long *mem, unsigned long *end) { - int i; - struct mem_chunk *chunk_array; + struct memblock_region *reg; - chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - detect_memory_layout(chunk_array, 0); - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (chunk_array[i].size == 0) - break; - *mem += chunk_array[i].size; - *end = max(*end, chunk_array[i].addr + chunk_array[i].size); + for_each_memblock(memory, reg) { + *mem += reg->size; + *end = max_t(unsigned long, *end, reg->base + reg->size); } - kfree(chunk_array); return 0; } @@ -637,8 +611,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr, hdr->num_pages = mem_size / PAGE_SIZE; hdr->tod = get_tod_clock(); get_cpu_id(&hdr->cpu_id); - for (i = 0; zfcpdump_save_areas[i]; i++) { - prefix = zfcpdump_save_areas[i]->pref_reg; + for (i = 0; i < dump_save_areas.count; i++) { + prefix = dump_save_areas.areas[i]->pref_reg; hdr->real_cpu_cnt++; if (!prefix) continue; @@ -664,7 +638,7 @@ static int __init zcore_reipl_init(void) ipl_block = (void *) __get_free_page(GFP_KERNEL); if (!ipl_block) return -ENOMEM; - if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) + if (ipib_info.ipib < sclp_get_hsa_size()) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); |
