aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/char/sclp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char/sclp.c')
-rw-r--r--drivers/s390/char/sclp.c211
1 files changed, 177 insertions, 34 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 35cc4686b99..c316051d9bd 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -7,6 +7,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/spinlock.h>
@@ -19,15 +20,12 @@
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <asm/types.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include "sclp.h"
#define SCLP_HEADER "sclp: "
-/* Structure for register_early_external_interrupt. */
-static ext_int_info_t ext_int_info_hwc;
-
/* Lock to protect internal data consistency. */
static DEFINE_SPINLOCK(sclp_lock);
@@ -52,16 +50,50 @@ static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
/* Suspend request */
static DECLARE_COMPLETION(sclp_request_queue_flushed);
+/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
+int sclp_console_pages = SCLP_CONSOLE_PAGES;
+/* Flag to indicate if buffer pages are dropped on buffer full condition */
+int sclp_console_drop = 0;
+/* Number of times the console dropped buffer pages */
+unsigned long sclp_console_full;
+
static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
{
complete(&sclp_request_queue_flushed);
}
+static int __init sclp_setup_console_pages(char *str)
+{
+ int pages, rc;
+
+ rc = kstrtoint(str, 0, &pages);
+ if (!rc && pages >= SCLP_CONSOLE_PAGES)
+ sclp_console_pages = pages;
+ return 1;
+}
+
+__setup("sclp_con_pages=", sclp_setup_console_pages);
+
+static int __init sclp_setup_console_drop(char *str)
+{
+ int drop, rc;
+
+ rc = kstrtoint(str, 0, &drop);
+ if (!rc && drop)
+ sclp_console_drop = 1;
+ return 1;
+}
+
+__setup("sclp_con_drop=", sclp_setup_console_drop);
+
static struct sclp_req sclp_suspend_req;
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
+/* Timer for queued requests. */
+static struct timer_list sclp_queue_timer;
+
/* Internal state: is the driver initialized? */
static volatile enum sclp_init_state_t {
sclp_init_state_uninitialized,
@@ -119,14 +151,19 @@ static int sclp_init(void);
int
sclp_service_call(sclp_cmdw_t command, void *sccb)
{
- int cc;
+ int cc = 4; /* Initialize for program check handling */
asm volatile(
- " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+ "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
: "cc", "memory");
+ if (cc == 4)
+ return -EINVAL;
if (cc == 3)
return -EIO;
if (cc == 2)
@@ -181,6 +218,76 @@ sclp_request_timeout(unsigned long data)
sclp_process_queue();
}
+/*
+ * Returns the expire value in jiffies of the next pending request timeout,
+ * if any. Needs to be called with sclp_lock.
+ */
+static unsigned long __sclp_req_queue_find_next_timeout(void)
+{
+ unsigned long expires_next = 0;
+ struct sclp_req *req;
+
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (!expires_next ||
+ (time_before(req->queue_expires, expires_next)))
+ expires_next = req->queue_expires;
+ }
+ return expires_next;
+}
+
+/*
+ * Returns expired request, if any, and removes it from the list.
+ */
+static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
+{
+ unsigned long flags, now;
+ struct sclp_req *req;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ now = jiffies;
+ /* Don't need list_for_each_safe because we break out after list_del */
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (time_before_eq(req->queue_expires, now)) {
+ if (req->status == SCLP_REQ_QUEUED) {
+ req->status = SCLP_REQ_QUEUED_TIMEOUT;
+ list_del(&req->list);
+ goto out;
+ }
+ }
+ }
+ req = NULL;
+out:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return req;
+}
+
+/*
+ * Timeout handler for queued requests. Removes request from list and
+ * invokes callback. This timer can be set per request in situations where
+ * waiting too long would be harmful to the system, e.g. during SE reboot.
+ */
+static void sclp_req_queue_timeout(unsigned long data)
+{
+ unsigned long flags, expires_next;
+ struct sclp_req *req;
+
+ do {
+ req = __sclp_req_queue_remove_expired_req();
+ if (req && req->callback)
+ req->callback(req, req->callback_data);
+ } while (req);
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ expires_next = __sclp_req_queue_find_next_timeout();
+ if (expires_next)
+ mod_timer(&sclp_queue_timer, expires_next);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
@@ -283,6 +390,13 @@ sclp_add_request(struct sclp_req *req)
req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue);
rc = 0;
+ if (req->queue_timeout) {
+ req->queue_expires = jiffies + req->queue_timeout * HZ;
+ if (!timer_pending(&sclp_queue_timer) ||
+ time_after(sclp_queue_timer.expires, req->queue_expires))
+ mod_timer(&sclp_queue_timer, req->queue_expires);
+ } else
+ req->queue_expires = 0;
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
@@ -336,7 +450,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
reg->receiver_fn(evbuf);
spin_lock_irqsave(&sclp_lock, flags);
} else if (reg == NULL)
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
@@ -395,13 +509,14 @@ __sclp_find_req(u32 sccb)
/* Handler for external interruption. Perform request post-processing.
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
-static void sclp_interrupt_handler(unsigned int ext_int_code,
+static void sclp_interrupt_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct sclp_req *req;
u32 finished_sccb;
u32 evbuf_pending;
+ inc_irq_stat(IRQEXT_SCP);
spin_lock(&sclp_lock);
finished_sccb = param32 & 0xfffffff8;
evbuf_pending = param32 & 0x3;
@@ -451,7 +566,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = get_clock() +
+ timeout = get_tod_clock_fast() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -473,7 +588,7 @@ sclp_sync_wait(void)
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- get_clock() > timeout &&
+ get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
cpu_relax();
@@ -655,16 +770,6 @@ sclp_remove_processed(struct sccb_header *sccb)
EXPORT_SYMBOL(sclp_remove_processed);
-struct init_sccb {
- struct sccb_header header;
- u16 _reserved;
- u16 mask_length;
- sccb_mask_t receive_mask;
- sccb_mask_t send_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
-} __attribute__((packed));
-
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
__sclp_make_init_req(u32 receive_mask, u32 send_mask)
@@ -819,11 +924,12 @@ EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */
-static void sclp_check_handler(unsigned int ext_int_code,
+static void sclp_check_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
u32 finished_sccb;
+ inc_irq_stat(IRQEXT_SCP);
finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
@@ -866,8 +972,7 @@ sclp_check_interface(void)
spin_lock_irqsave(&sclp_lock, flags);
/* Prepare init mask command */
- rc = register_early_external_interrupt(0x2401, sclp_check_handler,
- &ext_int_info_hwc);
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
if (rc) {
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
@@ -885,12 +990,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_set_bit(0, 9);
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_clear_bit(0,9);
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -900,8 +1005,7 @@ sclp_check_interface(void)
} else
rc = -EBUSY;
}
- unregister_early_external_interrupt(0x2401, sclp_check_handler,
- &ext_int_info_hwc);
+ unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -1025,11 +1129,47 @@ static const struct dev_pm_ops sclp_pm_ops = {
.restore = sclp_restore,
};
+static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%i\n", sclp_console_pages);
+}
+
+static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
+
+static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%i\n", sclp_console_drop);
+}
+
+static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
+
+static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%lu\n", sclp_console_full);
+}
+
+static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
+
+static struct attribute *sclp_drv_attrs[] = {
+ &driver_attr_con_pages.attr,
+ &driver_attr_con_drop.attr,
+ &driver_attr_con_full.attr,
+ NULL,
+};
+static struct attribute_group sclp_drv_attr_group = {
+ .attrs = sclp_drv_attrs,
+};
+static const struct attribute_group *sclp_drv_attr_groups[] = {
+ &sclp_drv_attr_group,
+ NULL,
+};
+
static struct platform_driver sclp_pdrv = {
.driver = {
.name = "sclp",
.owner = THIS_MODULE,
.pm = &sclp_pm_ops,
+ .groups = sclp_drv_attr_groups,
},
};
@@ -1053,6 +1193,8 @@ sclp_init(void)
INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list);
init_timer(&sclp_request_timer);
+ init_timer(&sclp_queue_timer);
+ sclp_queue_timer.function = sclp_req_queue_timeout;
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
@@ -1064,15 +1206,14 @@ sclp_init(void)
if (rc)
goto fail_init_state_uninitialized;
/* Register interrupt handler */
- rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
- &ext_int_info_hwc);
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
if (rc)
goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized;
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- ctl_set_bit(0, 9);
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
@@ -1109,10 +1250,12 @@ static __init int sclp_initcall(void)
rc = platform_driver_register(&sclp_pdrv);
if (rc)
return rc;
+
sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
- rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+ rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto fail_platform_driver_unregister;
+
rc = atomic_notifier_chain_register(&panic_notifier_list,
&sclp_on_panic_nb);
if (rc)