diff options
author | David S. Miller <davem@davemloft.net> | 2010-04-14 02:04:29 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-14 02:04:29 -0700 |
commit | ec687886de00e1e63f3d821ccade9a61590408ed (patch) | |
tree | 093e303475754d30b679bd35ae9cc5522832057e /arch | |
parent | 035df35d968323f6f463c8789553e8589efcbcd4 (diff) |
sparc64: Run NMIs on the hardirq stack.
Otherwise we can overflow the main stack with the function tracer
enabled.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 19 | ||||
-rw-r--r-- | arch/sparc/kernel/kstack.h | 19 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 7 |
3 files changed, 27 insertions, 18 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 2b04c722cc3..830d70a3e20 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -47,6 +47,7 @@ #include "entry.h" #include "cpumap.h" +#include "kstack.h" #define NUM_IVECS (IMAP_INR + 1) @@ -713,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq) void *hardirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS]; -static __attribute__((always_inline)) void *set_hardirq_stack(void) -{ - void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; - - __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); - if (orig_sp < sp || - orig_sp > (sp + THREAD_SIZE)) { - sp += THREAD_SIZE - 192 - STACK_BIAS; - __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); - } - - return orig_sp; -} -static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) -{ - __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); -} - void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h index 5247283d1c0..53dfb92e09f 100644 --- a/arch/sparc/kernel/kstack.h +++ b/arch/sparc/kernel/kstack.h @@ -61,4 +61,23 @@ check_magic: } +static inline __attribute__((always_inline)) void *set_hardirq_stack(void) +{ + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; + + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); + if (orig_sp < sp || + orig_sp > (sp + THREAD_SIZE)) { + sp += THREAD_SIZE - 192 - STACK_BIAS; + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); + } + + return orig_sp; +} + +static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) +{ + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); +} + #endif /* _KSTACK_H */ diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 75a3d1a2535..a4bd7ba74c8 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -23,6 +23,8 @@ #include <asm/ptrace.h> #include <asm/pcr.h> +#include "kstack.h" + /* We don't have a real NMI on sparc64, but we can fake one * up using profiling counter overflow interrupts and interrupt * levels. @@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; + void *orig_sp; clear_softint(1 << irq); @@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) nmi_enter(); + orig_sp = set_hardirq_stack(); + if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; @@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) pcr_ops->write(pcr_enable); } + restore_hardirq_stack(orig_sp); + nmi_exit(); } |