aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/irq.c64
-rw-r--r--arch/s390/kernel/sclp.S2
-rw-r--r--arch/s390/kernel/smp.c27
4 files changed, 84 insertions, 21 deletions
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 29829747725..87acc38f73c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
static void show_trace(struct task_struct *task, unsigned long *stack)
{
+ const unsigned long frame_size =
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
register unsigned long __r15 asm ("15");
unsigned long sp;
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
+ sp = __show_trace(sp,
+ S390_lowcore.panic_stack + frame_size - 4096,
+ S390_lowcore.panic_stack + frame_size);
#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
+ sp = __show_trace(sp,
+ S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f7fb58903f6..408e866ae54 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void)
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
+
+void synchronize_irq(unsigned int irq)
+{
+ /*
+ * Not needed, the handler is protected by a lock and IRQs that occur
+ * after the handler is deleted are just NOPs.
+ */
+}
+EXPORT_SYMBOL_GPL(synchronize_irq);
+
+#ifndef CONFIG_PCI
+
+/* Only PCI devices have dynamically-defined IRQ handlers */
+
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(free_irq);
+
+void enable_irq(unsigned int irq)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(disable_irq);
+
+#endif /* !CONFIG_PCI */
+
+void disable_irq_nosync(unsigned int irq)
+{
+ disable_irq(irq);
+}
+EXPORT_SYMBOL_GPL(disable_irq_nosync);
+
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_on);
+
+int probe_irq_off(unsigned long val)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long val)
+{
+ return val;
+}
+EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index b6506ee32a3..29bd7bec417 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -225,7 +225,7 @@ _sclp_print:
ahi %r2,1
ltr %r0,%r0 # end of string?
jz .LfinalizemtoS4
- chi %r0,0x15 # end of line (NL)?
+ chi %r0,0x0a # end of line (NL)?
jz .LfinalizemtoS4
stc %r0,0(%r6,%r7) # copy to mto
la %r11,0(%r6,%r7)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 05674b66900..4f977d0d25c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -428,34 +428,27 @@ void smp_stop_cpu(void)
* This is the main routine where commands issued by other
* cpus are handled.
*/
-static void do_ext_call_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
+static void smp_handle_ext_call(void)
{
unsigned long bits;
- int cpu;
-
- cpu = smp_processor_id();
- if (ext_code.code == 0x1202)
- inc_irq_stat(IRQEXT_EXC);
- else
- inc_irq_stat(IRQEXT_EMS);
- /*
- * handle bit signal external calls
- */
- bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
+ /* handle bit signal external calls */
+ bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu();
-
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
-
if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt();
-
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
+}
+static void do_ext_call_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
+ smp_handle_ext_call();
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -760,6 +753,8 @@ int __cpu_disable(void)
{
unsigned long cregs[16];
+ /* Handle possible pending IPIs */
+ smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false);
/* Disable pseudo page faults on this cpu. */
pfault_fini();