diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
| -rw-r--r-- | arch/sh/kernel/irq.c | 254 |
1 files changed, 123 insertions, 131 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index c7ebd6aec95..65a1ecd77f9 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -11,11 +11,16 @@ #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> -#include <asm/irq.h> +#include <linux/ftrace.h> +#include <linux/delay.h> +#include <linux/ratelimit.h> #include <asm/processor.h> +#include <asm/machvec.h> #include <asm/uaccess.h> #include <asm/thread_info.h> -#include <asm/cpu/mmu_context.h> +#include <cpu/mmu_context.h> + +atomic_t irq_err_count; /* * 'what should we do if we get a hw irq event on an illegal vector'. @@ -24,44 +29,30 @@ */ void ack_bad_irq(unsigned int irq) { + atomic_inc(&irq_err_count); printk("unexpected IRQ trap at vector %02x\n", irq); } #if defined(CONFIG_PROC_FS) -int show_interrupts(struct seq_file *p, void *v) +/* + * /proc/interrupts printing for arch specific interrupts + */ +int arch_show_interrupts(struct seq_file *p, int prec) { - int i = *(loff_t *) v, j; - struct irqaction * action; - unsigned long flags; - - if (i == 0) { - seq_puts(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); - seq_putc(p, '\n'); - } + int j; + + seq_printf(p, "%*s: ", prec, "NMI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stat[j].__nmi_count); + seq_printf(p, " Non-maskable interrupts\n"); + + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); - if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; - if (!action) - goto unlock; - seq_printf(p, "%3d: ",i); - seq_printf(p, "%10u ", kstat_irqs(i)); - seq_printf(p, " %14s", irq_desc[i].chip->typename); - seq_printf(p, " %s", action->name); - - for (action=action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); - seq_putc(p, '\n'); -unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } return 0; } #endif -#ifdef CONFIG_4KSTACKS +#ifdef CONFIG_IRQSTACKS /* * per-CPU IRQ handling contexts (thread information and stack) */ @@ -70,57 +61,16 @@ union irq_ctx { u32 stack[THREAD_SIZE/sizeof(u32)]; }; -static union irq_ctx *hardirq_ctx[NR_CPUS]; -static union irq_ctx *softirq_ctx[NR_CPUS]; -#endif +static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; +static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; + +static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; -asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs regs) +static inline void handle_one_irq(unsigned int irq) { - int irq = r4; -#ifdef CONFIG_4KSTACKS union irq_ctx *curctx, *irqctx; -#endif - - irq_enter(); - -#ifdef CONFIG_DEBUG_STACKOVERFLOW - /* Debugging check for stack overflow: is there less than 1KB free? */ - { - long sp; - - __asm__ __volatile__ ("and r15, %0" : - "=r" (sp) : "0" (THREAD_SIZE - 1)); - - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - printk("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); - dump_stack(); - } - } -#endif -#ifdef CONFIG_CPU_HAS_INTEVT - __asm__ __volatile__ ( -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r2_bank, %0\n\t" -#else - "mov.l @%1, %0\n\t" -#endif - "shlr2 %0\n\t" - "shlr2 %0\n\t" - "shlr %0\n\t" - "add #-16, %0\n\t" - : "=z" (irq), "=r" (r4) - : "1" (INTEVT) - : "memory" - ); -#endif - - irq = irq_demux(irq); - -#ifdef CONFIG_4KSTACKS curctx = (union irq_ctx *)current_thread_info(); irqctx = hardirq_ctx[smp_processor_id()]; @@ -137,41 +87,31 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_sp = current_stack_pointer; + /* + * Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqctx->tinfo.preempt_count = + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); + __asm__ __volatile__ ( "mov %0, r4 \n" - "mov %1, r5 \n" - "mov r15, r9 \n" - "jsr @%2 \n" + "mov r15, r8 \n" + "jsr @%1 \n" /* swith to the irq stack */ - " mov %3, r15 \n" + " mov %2, r15 \n" /* restore the stack (ring zero) */ - "mov r9, r15 \n" + "mov r8, r15 \n" : /* no outputs */ - : "r" (irq), "r" (®s), "r" (__do_IRQ), "r" (isp) - /* XXX: A somewhat excessive clobber list? -PFM */ + : "r" (irq), "r" (generic_handle_irq), "r" (isp) : "memory", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "t", "pr" ); } else -#endif - __do_IRQ(irq, ®s); - - irq_exit(); - - return 1; + generic_handle_irq(irq); } -#ifdef CONFIG_4KSTACKS -/* - * These should really be __section__(".bss.page_aligned") as well, but - * gcc's 3.0 and earlier don't handle that correctly. - */ -static char softirq_stack[NR_CPUS * THREAD_SIZE] - __attribute__((__aligned__(THREAD_SIZE))); - -static char hardirq_stack[NR_CPUS * THREAD_SIZE] - __attribute__((__aligned__(THREAD_SIZE))); - /* * allocate per-cpu stacks for hardirq and for softirq processing */ @@ -195,7 +135,7 @@ void irq_ctx_init(int cpu) irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; - irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; + irqctx->tinfo.preempt_count = 0; irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); softirq_ctx[cpu] = irqctx; @@ -209,45 +149,97 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } -extern asmlinkage void __do_softirq(void); - -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags; struct thread_info *curctx; union irq_ctx *irqctx; u32 *isp; - if (in_interrupt()) - return; + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + irqctx->tinfo.previous_sp = current_stack_pointer; - local_irq_save(flags); + /* build the stack frame on the softirq stack */ + isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); - if (local_softirq_pending()) { - curctx = current_thread_info(); - irqctx = softirq_ctx[smp_processor_id()]; - irqctx->tinfo.task = curctx->task; - irqctx->tinfo.previous_sp = current_stack_pointer; + __asm__ __volatile__ ( + "mov r15, r9 \n" + "jsr @%0 \n" + /* switch to the softirq stack */ + " mov %1, r15 \n" + /* restore the thread stack */ + "mov r9, r15 \n" + : /* no outputs */ + : "r" (__do_softirq), "r" (isp) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); +} +#else +static inline void handle_one_irq(unsigned int irq) +{ + generic_handle_irq(irq); +} +#endif - /* build the stack frame on the softirq stack */ - isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); +asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); - __asm__ __volatile__ ( - "mov r15, r9 \n" - "jsr @%0 \n" - /* switch to the softirq stack */ - " mov %1, r15 \n" - /* restore the thread stack */ - "mov r9, r15 \n" - : /* no outputs */ - : "r" (__do_softirq), "r" (isp) - /* XXX: A somewhat excessive clobber list? -PFM */ - : "memory", "r0", "r1", "r2", "r3", "r4", - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); + irq_enter(); + + irq = irq_demux(irq_lookup(irq)); + + if (irq != NO_IRQ_IGNORE) { + handle_one_irq(irq); + irq_finish(irq); } - local_irq_restore(flags); + irq_exit(); + + set_irq_regs(old_regs); + + return IRQ_HANDLED; +} + +void __init init_IRQ(void) +{ + plat_irq_setup(); + + /* Perform the machine specific initialisation */ + if (sh_mv.mv_init_irq) + sh_mv.mv_init_irq(); + + intc_finalize(); + + irq_ctx_init(smp_processor_id()); +} + +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + unsigned int irq, cpu = smp_processor_id(); + + for_each_active_irq(irq) { + struct irq_data *data = irq_get_irq_data(irq); + + if (data->node == cpu) { + unsigned int newcpu = cpumask_any_and(data->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + cpumask_setall(data->affinity); + } + irq_set_affinity(irq, data->affinity); + } + } } -EXPORT_SYMBOL(do_softirq); #endif |
