diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
| -rw-r--r-- | arch/sh/kernel/irq.c | 190 |
1 files changed, 75 insertions, 115 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index d2d41d04665..65a1ecd77f9 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -12,6 +12,8 @@ #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/ftrace.h> +#include <linux/delay.h> +#include <linux/ratelimit.h> #include <asm/processor.h> #include <asm/machvec.h> #include <asm/uaccess.h> @@ -33,9 +35,9 @@ void ack_bad_irq(unsigned int irq) #if defined(CONFIG_PROC_FS) /* - * /proc/interrupts printing: + * /proc/interrupts printing for arch specific interrupts */ -static int show_other_interrupts(struct seq_file *p, int prec) +int arch_show_interrupts(struct seq_file *p, int prec) { int j; @@ -48,58 +50,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) return 0; } - -int show_interrupts(struct seq_file *p, void *v) -{ - unsigned long flags, any_count = 0; - int i = *(loff_t *)v, j, prec; - struct irqaction *action; - struct irq_desc *desc; - - if (i > nr_irqs) - return 0; - - for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) - j *= 10; - - if (i == nr_irqs) - return show_other_interrupts(p, prec); - - if (i == 0) { - seq_printf(p, "%*s", prec + 8, ""); - for_each_online_cpu(j) - seq_printf(p, "CPU%-8d", j); - seq_putc(p, '\n'); - } - - desc = irq_to_desc(i); - if (!desc) - return 0; - - raw_spin_lock_irqsave(&desc->lock, flags); - for_each_online_cpu(j) - any_count |= kstat_irqs_cpu(i, j); - action = desc->action; - if (!action && !any_count) - goto out; - - seq_printf(p, "%*d: ", prec, i); - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - seq_printf(p, " %14s", desc->chip->name); - seq_printf(p, "-%-8s", desc->name); - - if (action) { - seq_printf(p, " %s", action->name); - while ((action = action->next) != NULL) - seq_printf(p, ", %s", action->name); - } - - seq_putc(p, '\n'); -out: - raw_spin_unlock_irqrestore(&desc->lock, flags); - return 0; -} #endif #ifdef CONFIG_IRQSTACKS @@ -113,19 +63,14 @@ union irq_ctx { static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; -#endif -asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; + +static inline void handle_one_irq(unsigned int irq) { - struct pt_regs *old_regs = set_irq_regs(regs); -#ifdef CONFIG_IRQSTACKS union irq_ctx *curctx, *irqctx; -#endif - - irq_enter(); - irq = irq_demux(irq); -#ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); irqctx = hardirq_ctx[smp_processor_id()]; @@ -164,20 +109,9 @@ asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) "r5", "r6", "r7", "r8", "t", "pr" ); } else -#endif generic_handle_irq(irq); - - irq_exit(); - - set_irq_regs(old_regs); - return 1; } -#ifdef CONFIG_IRQSTACKS -static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; - -static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; - /* * allocate per-cpu stacks for hardirq and for softirq processing */ @@ -215,71 +149,97 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags; struct thread_info *curctx; union irq_ctx *irqctx; u32 *isp; - if (in_interrupt()) - return; - - local_irq_save(flags); + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + irqctx->tinfo.previous_sp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ + isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + + __asm__ __volatile__ ( + "mov r15, r9 \n" + "jsr @%0 \n" + /* switch to the softirq stack */ + " mov %1, r15 \n" + /* restore the thread stack */ + "mov r9, r15 \n" + : /* no outputs */ + : "r" (__do_softirq), "r" (isp) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); +} +#else +static inline void handle_one_irq(unsigned int irq) +{ + generic_handle_irq(irq); +} +#endif - if (local_softirq_pending()) { - curctx = current_thread_info(); - irqctx = softirq_ctx[smp_processor_id()]; - irqctx->tinfo.task = curctx->task; - irqctx->tinfo.previous_sp = current_stack_pointer; +asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); - /* build the stack frame on the softirq stack */ - isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + irq_enter(); - __asm__ __volatile__ ( - "mov r15, r9 \n" - "jsr @%0 \n" - /* switch to the softirq stack */ - " mov %1, r15 \n" - /* restore the thread stack */ - "mov r9, r15 \n" - : /* no outputs */ - : "r" (__do_softirq), "r" (isp) - : "memory", "r0", "r1", "r2", "r3", "r4", - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); + irq = irq_demux(irq_lookup(irq)); - /* - * Shouldnt happen, we returned above if in_interrupt(): - */ - WARN_ON_ONCE(softirq_count()); + if (irq != NO_IRQ_IGNORE) { + handle_one_irq(irq); + irq_finish(irq); } - local_irq_restore(flags); + irq_exit(); + + set_irq_regs(old_regs); + + return IRQ_HANDLED; } -#endif void __init init_IRQ(void) { plat_irq_setup(); - /* - * Pin any of the legacy IRQ vectors that haven't already been - * grabbed by the platform - */ - reserve_irq_legacy(); - /* Perform the machine specific initialisation */ if (sh_mv.mv_init_irq) sh_mv.mv_init_irq(); + intc_finalize(); + irq_ctx_init(smp_processor_id()); } -#ifdef CONFIG_SPARSE_IRQ -int __init arch_probe_nr_irqs(void) +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) { - nr_irqs = sh_mv.mv_nr_irqs; - return 0; + unsigned int irq, cpu = smp_processor_id(); + + for_each_active_irq(irq) { + struct irq_data *data = irq_get_irq_data(irq); + + if (data->node == cpu) { + unsigned int newcpu = cpumask_any_and(data->affinity, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + cpumask_setall(data->affinity); + } + irq_set_affinity(irq, data->affinity); + } + } } #endif |
