diff options
Diffstat (limited to 'arch/xtensa/kernel/irq.c')
| -rw-r--r-- | arch/xtensa/kernel/irq.c | 183 |
1 files changed, 110 insertions, 73 deletions
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4340ee076bd..3eee94f621e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -4,7 +4,7 @@ * Xtensa built-in interrupt controller and some generic functions copied * from i386. * - * Copyright (C) 2002 - 2006 Tensilica, Inc. + * Copyright (C) 2002 - 2013 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * @@ -18,31 +18,27 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> +#include <linux/irqchip.h> +#include <linux/irqchip/xtensa-mx.h> +#include <linux/irqchip/xtensa-pic.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <asm/mxregs.h> #include <asm/uaccess.h> #include <asm/platform.h> -static unsigned int cached_irq_mask; - atomic_t irq_err_count; -/* - * do_IRQ handles all normal device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - */ - -asmlinkage void do_IRQ(int irq, struct pt_regs *regs) +asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { - struct pt_regs *old_regs = set_irq_regs(regs); + int irq = irq_find_mapping(NULL, hwirq); - if (irq >= NR_IRQS) { + if (hwirq >= NR_IRQS) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", - __func__, irq); + __func__, hwirq); } - irq_enter(); - #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { @@ -57,95 +53,136 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs) } #endif generic_handle_irq(irq); - - irq_exit(); - set_irq_regs(old_regs); } int arch_show_interrupts(struct seq_file *p, int prec) { +#ifdef CONFIG_SMP + show_ipi_list(p, prec); +#endif seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } -static void xtensa_irq_mask(struct irq_data *d) +int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, + unsigned long int_irq, unsigned long ext_irq, + unsigned long *out_hwirq, unsigned int *out_type) { - cached_irq_mask &= ~(1 << d->irq); - set_sr (cached_irq_mask, INTENABLE); + if (WARN_ON(intsize < 1 || intsize > 2)) + return -EINVAL; + if (intsize == 2 && intspec[1] == 1) { + int_irq = xtensa_map_ext_irq(ext_irq); + if (int_irq < XCHAL_NUM_INTERRUPTS) + *out_hwirq = int_irq; + else + return -EINVAL; + } else { + *out_hwirq = int_irq; + } + *out_type = IRQ_TYPE_NONE; + return 0; } -static void xtensa_irq_unmask(struct irq_data *d) +int xtensa_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) { - cached_irq_mask |= 1 << d->irq; - set_sr (cached_irq_mask, INTENABLE); + struct irq_chip *irq_chip = d->host_data; + u32 mask = 1 << hw; + + if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_simple_irq, "level"); + irq_set_status_flags(irq, IRQ_LEVEL); + } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_edge_irq, "edge"); + irq_clear_status_flags(irq, IRQ_LEVEL); + } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_level_irq, "level"); + irq_set_status_flags(irq, IRQ_LEVEL); + } else if (mask & XCHAL_INTTYPE_MASK_TIMER) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "timer"); + irq_clear_status_flags(irq, IRQ_LEVEL); + } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ + /* XCHAL_INTTYPE_MASK_NMI */ + irq_set_chip_and_handler_name(irq, irq_chip, + handle_level_irq, "level"); + irq_set_status_flags(irq, IRQ_LEVEL); + } + return 0; } -static void xtensa_irq_enable(struct irq_data *d) +unsigned xtensa_map_ext_irq(unsigned ext_irq) { - variant_irq_enable(d->irq); - xtensa_irq_unmask(d->irq); -} + unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL; + unsigned i; -static void xtensa_irq_disable(struct irq_data *d) -{ - xtensa_irq_mask(d->irq); - variant_irq_disable(d->irq); + for (i = 0; mask; ++i, mask >>= 1) { + if ((mask & 1) && ext_irq-- == 0) + return i; + } + return XCHAL_NUM_INTERRUPTS; } -static void xtensa_irq_ack(struct irq_data *d) +unsigned xtensa_get_ext_irq_no(unsigned irq) { - set_sr(1 << d->irq, INTCLEAR); + unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL) & + ((1u << irq) - 1); + return hweight32(mask); } -static int xtensa_irq_retrigger(struct irq_data *d) +void __init init_IRQ(void) { - set_sr (1 << d->irq, INTSET); - return 1; -} - +#ifdef CONFIG_OF + irqchip_init(); +#else +#ifdef CONFIG_HAVE_SMP + xtensa_mx_init_legacy(NULL); +#else + xtensa_pic_init_legacy(NULL); +#endif +#endif -static struct irq_chip xtensa_irq_chip = { - .name = "xtensa", - .irq_enable = xtensa_irq_enable, - .irq_disable = xtensa_irq_disable, - .irq_mask = xtensa_irq_mask, - .irq_unmask = xtensa_irq_unmask, - .irq_ack = xtensa_irq_ack, - .irq_retrigger = xtensa_irq_retrigger, -}; +#ifdef CONFIG_SMP + ipi_init(); +#endif + variant_init_irq(); +} -void __init init_IRQ(void) +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) { - int index; - - for (index = 0; index < XTENSA_NR_IRQS; index++) { - int mask = 1 << index; + unsigned int i, cpu = smp_processor_id(); - if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) - irq_set_chip_and_handler(index, &xtensa_irq_chip, - handle_simple_irq); + for_each_active_irq(i) { + struct irq_data *data = irq_get_irq_data(i); + unsigned int newcpu; - else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) - irq_set_chip_and_handler(index, &xtensa_irq_chip, - handle_edge_irq); + if (irqd_is_per_cpu(data)) + continue; - else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) - irq_set_chip_and_handler(index, &xtensa_irq_chip, - handle_level_irq); + if (!cpumask_test_cpu(cpu, data->affinity)) + continue; - else if (mask & XCHAL_INTTYPE_MASK_TIMER) - irq_set_chip_and_handler(index, &xtensa_irq_chip, - handle_edge_irq); + newcpu = cpumask_any_and(data->affinity, cpu_online_mask); - else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ - /* XCHAL_INTTYPE_MASK_NMI */ + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + i, cpu); - irq_set_chip_and_handler(index, &xtensa_irq_chip, - handle_level_irq); + cpumask_setall(data->affinity); + } + irq_set_affinity(i, data->affinity); } - - cached_irq_mask = 0; - - variant_init_irq(); } +#endif /* CONFIG_HOTPLUG_CPU */ |
