aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/char/sclp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char/sclp.c')
-rw-r--r--drivers/s390/char/sclp.c471
1 files changed, 401 insertions, 70 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4377e93a43d..c316051d9bd 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1,13 +1,13 @@
/*
- * drivers/s390/char/sclp.c
- * core function to access sclp interface
+ * core function to access sclp interface
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/spinlock.h>
@@ -16,16 +16,16 @@
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
#include <asm/types.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include "sclp.h"
#define SCLP_HEADER "sclp: "
-/* Structure for register_early_external_interrupt. */
-static ext_int_info_t ext_int_info_hwc;
-
/* Lock to protect internal data consistency. */
static DEFINE_SPINLOCK(sclp_lock);
@@ -47,9 +47,53 @@ static struct sclp_req sclp_init_req;
static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
+int sclp_console_pages = SCLP_CONSOLE_PAGES;
+/* Flag to indicate if buffer pages are dropped on buffer full condition */
+int sclp_console_drop = 0;
+/* Number of times the console dropped buffer pages */
+unsigned long sclp_console_full;
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+ complete(&sclp_request_queue_flushed);
+}
+
+static int __init sclp_setup_console_pages(char *str)
+{
+ int pages, rc;
+
+ rc = kstrtoint(str, 0, &pages);
+ if (!rc && pages >= SCLP_CONSOLE_PAGES)
+ sclp_console_pages = pages;
+ return 1;
+}
+
+__setup("sclp_con_pages=", sclp_setup_console_pages);
+
+static int __init sclp_setup_console_drop(char *str)
+{
+ int drop, rc;
+
+ rc = kstrtoint(str, 0, &drop);
+ if (!rc && drop)
+ sclp_console_drop = 1;
+ return 1;
+}
+
+__setup("sclp_con_drop=", sclp_setup_console_drop);
+
+static struct sclp_req sclp_suspend_req;
+
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
+/* Timer for queued requests. */
+static struct timer_list sclp_queue_timer;
+
/* Internal state: is the driver initialized? */
static volatile enum sclp_init_state_t {
sclp_init_state_uninitialized,
@@ -84,6 +128,12 @@ static volatile enum sclp_mask_state_t {
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+ sclp_suspend_state_running,
+ sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
/* Maximum retry counts */
#define SCLP_INIT_RETRY 3
#define SCLP_MASK_RETRY 3
@@ -101,14 +151,19 @@ static int sclp_init(void);
int
sclp_service_call(sclp_cmdw_t command, void *sccb)
{
- int cc;
+ int cc = 4; /* Initialize for program check handling */
asm volatile(
- " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+ "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
: "cc", "memory");
+ if (cc == 4)
+ return -EINVAL;
if (cc == 3)
return -EIO;
if (cc == 2)
@@ -163,6 +218,76 @@ sclp_request_timeout(unsigned long data)
sclp_process_queue();
}
+/*
+ * Returns the expire value in jiffies of the next pending request timeout,
+ * if any. Needs to be called with sclp_lock.
+ */
+static unsigned long __sclp_req_queue_find_next_timeout(void)
+{
+ unsigned long expires_next = 0;
+ struct sclp_req *req;
+
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (!expires_next ||
+ (time_before(req->queue_expires, expires_next)))
+ expires_next = req->queue_expires;
+ }
+ return expires_next;
+}
+
+/*
+ * Returns expired request, if any, and removes it from the list.
+ */
+static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
+{
+ unsigned long flags, now;
+ struct sclp_req *req;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ now = jiffies;
+ /* Don't need list_for_each_safe because we break out after list_del */
+ list_for_each_entry(req, &sclp_req_queue, list) {
+ if (!req->queue_expires)
+ continue;
+ if (time_before_eq(req->queue_expires, now)) {
+ if (req->status == SCLP_REQ_QUEUED) {
+ req->status = SCLP_REQ_QUEUED_TIMEOUT;
+ list_del(&req->list);
+ goto out;
+ }
+ }
+ }
+ req = NULL;
+out:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return req;
+}
+
+/*
+ * Timeout handler for queued requests. Removes request from list and
+ * invokes callback. This timer can be set per request in situations where
+ * waiting too long would be harmful to the system, e.g. during SE reboot.
+ */
+static void sclp_req_queue_timeout(unsigned long data)
+{
+ unsigned long flags, expires_next;
+ struct sclp_req *req;
+
+ do {
+ req = __sclp_req_queue_remove_expired_req();
+ if (req && req->callback)
+ req->callback(req, req->callback_data);
+ } while (req);
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ expires_next = __sclp_req_queue_find_next_timeout();
+ if (expires_next)
+ mod_timer(&sclp_queue_timer, expires_next);
+ spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
@@ -178,7 +303,7 @@ __sclp_start_request(struct sclp_req *req)
req->start_count++;
if (rc == 0) {
- /* Sucessfully started request */
+ /* Successfully started request */
req->status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
@@ -211,6 +336,8 @@ sclp_process_queue(void)
del_timer(&sclp_request_timer);
while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+ if (!req->sccb)
+ goto do_post;
rc = __sclp_start_request(req);
if (rc == 0)
break;
@@ -222,6 +349,7 @@ sclp_process_queue(void)
sclp_request_timeout, 0);
break;
}
+do_post:
/* Post-processing for aborted request */
list_del(&req->list);
if (req->callback) {
@@ -233,6 +361,19 @@ sclp_process_queue(void)
spin_unlock_irqrestore(&sclp_lock, flags);
}
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+ if (req == &sclp_suspend_req || req == &sclp_init_req)
+ return 1;
+ if (sclp_suspend_state != sclp_suspend_state_running)
+ return 0;
+ if (sclp_init_state != sclp_init_state_initialized)
+ return 0;
+ if (sclp_activation_state != sclp_activation_state_active)
+ return 0;
+ return 1;
+}
+
/* Queue a new request. Return zero on success, non-zero otherwise. */
int
sclp_add_request(struct sclp_req *req)
@@ -241,9 +382,7 @@ sclp_add_request(struct sclp_req *req)
int rc;
spin_lock_irqsave(&sclp_lock, flags);
- if ((sclp_init_state != sclp_init_state_initialized ||
- sclp_activation_state != sclp_activation_state_active) &&
- req != &sclp_init_req) {
+ if (!__sclp_can_add_request(req)) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EIO;
}
@@ -251,13 +390,26 @@ sclp_add_request(struct sclp_req *req)
req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue);
rc = 0;
+ if (req->queue_timeout) {
+ req->queue_expires = jiffies + req->queue_timeout * HZ;
+ if (!timer_pending(&sclp_queue_timer) ||
+ time_after(sclp_queue_timer.expires, req->queue_expires))
+ mod_timer(&sclp_queue_timer, req->queue_expires);
+ } else
+ req->queue_expires = 0;
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
+ if (!req->sccb) {
+ list_del(&req->list);
+ rc = -ENODATA;
+ goto out;
+ }
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
}
+out:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -298,7 +450,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
reg->receiver_fn(evbuf);
spin_lock_irqsave(&sclp_lock, flags);
} else if (reg == NULL)
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
@@ -357,16 +509,17 @@ __sclp_find_req(u32 sccb)
/* Handler for external interruption. Perform request post-processing.
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
-static void
-sclp_interrupt_handler(__u16 code)
+static void sclp_interrupt_handler(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
{
struct sclp_req *req;
u32 finished_sccb;
u32 evbuf_pending;
+ inc_irq_stat(IRQEXT_SCP);
spin_lock(&sclp_lock);
- finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
- evbuf_pending = S390_lowcore.ext_params & 0x3;
+ finished_sccb = param32 & 0xfffffff8;
+ evbuf_pending = param32 & 0x3;
if (finished_sccb) {
del_timer(&sclp_request_timer);
sclp_running_state = sclp_running_state_reset_pending;
@@ -413,7 +566,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = get_clock() +
+ timeout = get_tod_clock_fast() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -430,12 +583,12 @@ sclp_sync_wait(void)
cr0_sync &= 0xffff00a0;
cr0_sync |= 0x00000200;
__ctl_load(cr0_sync, 0, 0);
- __raw_local_irq_stosm(0x01);
+ __arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- get_clock() > timeout &&
+ get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
cpu_relax();
@@ -560,6 +713,7 @@ sclp_register(struct sclp_register *reg)
/* Trigger initial state change callback */
reg->sclp_receive_mask = 0;
reg->sclp_send_mask = 0;
+ reg->pm_event_posted = 0;
list_add(&reg->list, &sclp_reg_list);
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
@@ -616,16 +770,6 @@ sclp_remove_processed(struct sccb_header *sccb)
EXPORT_SYMBOL(sclp_remove_processed);
-struct init_sccb {
- struct sccb_header header;
- u16 _reserved;
- u16 mask_length;
- sccb_mask_t receive_mask;
- sccb_mask_t send_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
-} __attribute__((packed));
-
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
__sclp_make_init_req(u32 receive_mask, u32 send_mask)
@@ -780,12 +924,13 @@ EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */
-static void
-sclp_check_handler(__u16 code)
+static void sclp_check_handler(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
{
u32 finished_sccb;
- finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
+ inc_irq_stat(IRQEXT_SCP);
+ finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
return;
@@ -827,8 +972,7 @@ sclp_check_interface(void)
spin_lock_irqsave(&sclp_lock, flags);
/* Prepare init mask command */
- rc = register_early_external_interrupt(0x2401, sclp_check_handler,
- &ext_int_info_hwc);
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
if (rc) {
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
@@ -846,12 +990,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_set_bit(0, 9);
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_clear_bit(0,9);
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -861,8 +1005,7 @@ sclp_check_interface(void)
} else
rc = -EBUSY;
}
- unregister_early_external_interrupt(0x2401, sclp_check_handler,
- &ext_int_info_hwc);
+ unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -880,63 +1023,251 @@ static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
};
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+ struct sclp_register *reg;
+ unsigned long flags;
+
+ if (!rollback) {
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_for_each_entry(reg, &sclp_reg_list, list)
+ reg->pm_event_posted = 0;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ }
+ do {
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_for_each_entry(reg, &sclp_reg_list, list) {
+ if (rollback && reg->pm_event_posted)
+ goto found;
+ if (!rollback && !reg->pm_event_posted)
+ goto found;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return;
+found:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ if (reg->pm_event_fn)
+ reg->pm_event_fn(reg, sclp_pm_event);
+ reg->pm_event_posted = rollback ? 0 : 1;
+ } while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+ unsigned long flags;
+ int rc;
+
+ sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_suspended;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+
+ /* Init supend data */
+ memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+ sclp_suspend_req.callback = sclp_suspend_req_cb;
+ sclp_suspend_req.status = SCLP_REQ_FILLED;
+ init_completion(&sclp_request_queue_flushed);
+
+ rc = sclp_add_request(&sclp_suspend_req);
+ if (rc == 0)
+ wait_for_completion(&sclp_request_queue_flushed);
+ else if (rc != -ENODATA)
+ goto fail_thaw;
+
+ rc = sclp_deactivate();
+ if (rc)
+ goto fail_thaw;
+ return 0;
+
+fail_thaw:
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_running;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+ return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+ unsigned long flags;
+ int rc;
+
+ rc = sclp_reactivate();
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_running;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+
+ sclp_pm_event(event, 0);
+ return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+ return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+ return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static const struct dev_pm_ops sclp_pm_ops = {
+ .freeze = sclp_freeze,
+ .thaw = sclp_thaw,
+ .restore = sclp_restore,
+};
+
+static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%i\n", sclp_console_pages);
+}
+
+static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
+
+static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%i\n", sclp_console_drop);
+}
+
+static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
+
+static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
+{
+ return sprintf(buf, "%lu\n", sclp_console_full);
+}
+
+static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
+
+static struct attribute *sclp_drv_attrs[] = {
+ &driver_attr_con_pages.attr,
+ &driver_attr_con_drop.attr,
+ &driver_attr_con_full.attr,
+ NULL,
+};
+static struct attribute_group sclp_drv_attr_group = {
+ .attrs = sclp_drv_attrs,
+};
+static const struct attribute_group *sclp_drv_attr_groups[] = {
+ &sclp_drv_attr_group,
+ NULL,
+};
+
+static struct platform_driver sclp_pdrv = {
+ .driver = {
+ .name = "sclp",
+ .owner = THIS_MODULE,
+ .pm = &sclp_pm_ops,
+ .groups = sclp_drv_attr_groups,
+ },
+};
+
+static struct platform_device *sclp_pdev;
+
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
static int
sclp_init(void)
{
unsigned long flags;
- int rc;
+ int rc = 0;
spin_lock_irqsave(&sclp_lock, flags);
/* Check for previous or running initialization */
- if (sclp_init_state != sclp_init_state_uninitialized) {
- spin_unlock_irqrestore(&sclp_lock, flags);
- return 0;
- }
+ if (sclp_init_state != sclp_init_state_uninitialized)
+ goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
/* Set up variables */
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list);
init_timer(&sclp_request_timer);
+ init_timer(&sclp_queue_timer);
+ sclp_queue_timer.function = sclp_req_queue_timeout;
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
spin_lock_irqsave(&sclp_lock, flags);
- if (rc) {
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ if (rc)
+ goto fail_init_state_uninitialized;
/* Register reboot handler */
rc = register_reboot_notifier(&sclp_reboot_notifier);
- if (rc) {
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ if (rc)
+ goto fail_init_state_uninitialized;
/* Register interrupt handler */
- rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
- &ext_int_info_hwc);
- if (rc) {
- unregister_reboot_notifier(&sclp_reboot_notifier);
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
+ if (rc)
+ goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized;
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- ctl_set_bit(0, 9);
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
+
+fail_unregister_reboot_notifier:
+ unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+ sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
+}
+
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ if (sclp_suspend_state == sclp_suspend_state_suspended)
+ sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+ return NOTIFY_OK;
}
+static struct notifier_block sclp_on_panic_nb = {
+ .notifier_call = sclp_panic_notify,
+ .priority = SCLP_PANIC_PRIO,
+};
+
static __init int sclp_initcall(void)
{
+ int rc;
+
+ rc = platform_driver_register(&sclp_pdrv);
+ if (rc)
+ return rc;
+
+ sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+ rc = PTR_ERR_OR_ZERO(sclp_pdev);
+ if (rc)
+ goto fail_platform_driver_unregister;
+
+ rc = atomic_notifier_chain_register(&panic_notifier_list,
+ &sclp_on_panic_nb);
+ if (rc)
+ goto fail_platform_device_unregister;
+
return sclp_init();
+
+fail_platform_device_unregister:
+ platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+ platform_driver_unregister(&sclp_pdrv);
+ return rc;
}
arch_initcall(sclp_initcall);