diff options
Diffstat (limited to 'drivers/irqchip')
35 files changed, 6994 insertions, 308 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index a350969e5ef..bbb746e3550 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -10,6 +10,11 @@ config ARM_GIC config GIC_NON_BANKED bool +config ARM_NVIC + bool + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + config ARM_VIC bool select IRQ_DOMAIN @@ -25,6 +30,47 @@ config ARM_VIC_NR The maximum number of VICs available in the system, for power management. +config BRCMSTB_L2_IRQ + bool + depends on ARM + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + +config DW_APB_ICTL + bool + select IRQ_DOMAIN + +config IMGPDC_IRQ + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + +config CLPS711X_IRQCHIP + bool + depends on ARCH_CLPS711X + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + select SPARSE_IRQ + default y + +config ORION_IRQCHIP + bool + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + +config RENESAS_INTC_IRQPIN + bool + select IRQ_DOMAIN + +config RENESAS_IRQC + bool + select IRQ_DOMAIN + +config TB10X_IRQC + bool + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + config VERSATILE_FPGA_IRQ bool select IRQ_DOMAIN @@ -33,3 +79,15 @@ config VERSATILE_FPGA_IRQ_NR int default 4 depends on VERSATILE_FPGA_IRQ + +config XTENSA_MX + bool + select IRQ_DOMAIN + +config IRQ_CROSSBAR + bool + help + Support for a CROSSBAR ip that preceeds the main interrupt controller. + The primary irqchip invokes the crossbar's callback which inturn allocates + a free irq and configures the IP. Thus the peripheral interrupts are + routed to one of the free irqchip interrupt lines. diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 98e3b87bdf1..62a13e5ef98 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -2,10 +2,31 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_ARCH_MMP) += irq-mmp.o +obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o +obj-$(CONFIG_ARCH_MXS) += irq-mxs.o +obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o +obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o obj-$(CONFIG_METAG) += irq-metag-ext.o obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o -obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o +obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o +obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o +obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o +obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o +obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o +obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o +obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o +obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o +obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o +obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o +obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o +obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o +obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o +obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o +obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o +obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o +obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index 04d86a9803f..f8636a650cf 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -12,12 +12,11 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <asm/mach/irq.h> - -#include <plat/cpu.h> #include "irqchip.h" @@ -25,16 +24,18 @@ #define COMBINER_ENABLE_CLEAR 0x4 #define COMBINER_INT_STATUS 0xC +#define IRQ_IN_COMBINER 8 + static DEFINE_SPINLOCK(irq_controller_lock); struct combiner_chip_data { - unsigned int irq_offset; + unsigned int hwirq_offset; unsigned int irq_mask; void __iomem *base; + unsigned int parent_irq; }; static struct irq_domain *combiner_irq_domain; -static struct combiner_chip_data combiner_data[MAX_COMBINER_NR]; static inline void __iomem *combiner_base(struct irq_data *data) { @@ -75,11 +76,11 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) if (status == 0) goto out; - combiner_irq = __ffs(status); + combiner_irq = chip_data->hwirq_offset + __ffs(status); + cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); - cascade_irq = combiner_irq + (chip_data->irq_offset & ~31); - if (unlikely(cascade_irq >= NR_IRQS)) - do_bad_IRQ(cascade_irq, desc); + if (unlikely(!cascade_irq)) + handle_bad_irq(irq, desc); else generic_handle_irq(cascade_irq); @@ -87,42 +88,51 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static struct irq_chip combiner_chip = { - .name = "COMBINER", - .irq_mask = combiner_mask_irq, - .irq_unmask = combiner_unmask_irq, -}; - -static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) +#ifdef CONFIG_SMP +static int combiner_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) { - unsigned int max_nr; + struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d); + struct irq_chip *chip = irq_get_chip(chip_data->parent_irq); + struct irq_data *data = irq_get_irq_data(chip_data->parent_irq); - if (soc_is_exynos5250()) - max_nr = EXYNOS5_MAX_COMBINER_NR; + if (chip && chip->irq_set_affinity) + return chip->irq_set_affinity(data, mask_val, force); else - max_nr = EXYNOS4_MAX_COMBINER_NR; + return -EINVAL; +} +#endif - if (combiner_nr >= max_nr) - BUG(); - if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) +static struct irq_chip combiner_chip = { + .name = "COMBINER", + .irq_mask = combiner_mask_irq, + .irq_unmask = combiner_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = combiner_set_affinity, +#endif +}; + +static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, + unsigned int irq) +{ + if (irq_set_handler_data(irq, combiner_data) != 0) BUG(); irq_set_chained_handler(irq, combiner_handle_cascade_irq); } -static void __init combiner_init_one(unsigned int combiner_nr, - void __iomem *base) +static void __init combiner_init_one(struct combiner_chip_data *combiner_data, + unsigned int combiner_nr, + void __iomem *base, unsigned int irq) { - combiner_data[combiner_nr].base = base; - combiner_data[combiner_nr].irq_offset = irq_find_mapping( - combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER); - combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3); + combiner_data->base = base; + combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER; + combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3); + combiner_data->parent_irq = irq; /* Disable all interrupts */ - __raw_writel(combiner_data[combiner_nr].irq_mask, - base + COMBINER_ENABLE_CLEAR); + __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); } -#ifdef CONFIG_OF static int combiner_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, @@ -135,25 +145,17 @@ static int combiner_irq_domain_xlate(struct irq_domain *d, if (intsize < 2) return -EINVAL; - *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1]; + *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1]; *out_type = 0; return 0; } -#else -static int combiner_irq_domain_xlate(struct irq_domain *d, - struct device_node *controller, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, - unsigned int *out_type) -{ - return -EINVAL; -} -#endif static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { + struct combiner_chip_data *combiner_data = d->host_data; + irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); irq_set_chip_data(irq, &combiner_data[hw >> 3]); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); @@ -166,54 +168,43 @@ static struct irq_domain_ops combiner_irq_domain_ops = { .map = combiner_irq_domain_map, }; -void __init combiner_init(void __iomem *combiner_base, - struct device_node *np) +static void __init combiner_init(void __iomem *combiner_base, + struct device_node *np, + unsigned int max_nr) { - int i, irq, irq_base; - unsigned int max_nr, nr_irq; - - if (np) { - if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { - pr_warning("%s: number of combiners not specified, " - "setting default as %d.\n", - __func__, EXYNOS4_MAX_COMBINER_NR); - max_nr = EXYNOS4_MAX_COMBINER_NR; - } - } else { - max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR : - EXYNOS4_MAX_COMBINER_NR; - } - nr_irq = max_nr * MAX_IRQ_IN_COMBINER; + int i, irq; + unsigned int nr_irq; + struct combiner_chip_data *combiner_data; - irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0); - if (IS_ERR_VALUE(irq_base)) { - irq_base = COMBINER_IRQ(0, 0); - pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base); + nr_irq = max_nr * IRQ_IN_COMBINER; + + combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); + if (!combiner_data) { + pr_warning("%s: could not allocate combiner data\n", __func__); + return; } - combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0, - &combiner_irq_domain_ops, &combiner_data); + combiner_irq_domain = irq_domain_add_linear(np, nr_irq, + &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warning("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { - combiner_init_one(i, combiner_base + (i >> 2) * 0x10); - irq = IRQ_SPI(i); -#ifdef CONFIG_OF - if (np) - irq = irq_of_parse_and_map(np, i); -#endif - combiner_cascade_irq(i, irq); + irq = irq_of_parse_and_map(np, i); + + combiner_init_one(&combiner_data[i], i, + combiner_base + (i >> 2) * 0x10, irq); + combiner_cascade_irq(&combiner_data[i], irq); } } -#ifdef CONFIG_OF static int __init combiner_of_init(struct device_node *np, struct device_node *parent) { void __iomem *combiner_base; + unsigned int max_nr = 20; combiner_base = of_iomap(np, 0); if (!combiner_base) { @@ -221,10 +212,15 @@ static int __init combiner_of_init(struct device_node *np, return -ENXIO; } - combiner_init(combiner_base, np); + if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { + pr_info("%s: number of combiners not specified, " + "setting default as %d.\n", + __func__, max_nr); + } + + combiner_init(combiner_base, np, max_nr); return 0; } IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner", combiner_of_init); -#endif diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c new file mode 100644 index 00000000000..574aba0eba4 --- /dev/null +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -0,0 +1,541 @@ +/* + * Marvell Armada 370 and Armada XP SoC IRQ handling + * + * Copyright (C) 2012 Marvell + * + * Lior Amsalem <alior@marvell.com> + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * Ben Dooks <ben.dooks@codethink.co.uk> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/cpu.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/irqdomain.h> +#include <linux/slab.h> +#include <linux/msi.h> +#include <asm/mach/arch.h> +#include <asm/exception.h> +#include <asm/smp_plat.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" + +/* Interrupt Controller Registers Map */ +#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) +#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) + +#define ARMADA_370_XP_INT_CONTROL (0x00) +#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) +#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) +#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) +#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF + +#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) +#define ARMADA_375_PPI_CAUSE (0x10) + +#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4) +#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc) +#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8) + +#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) + +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + +#define IPI_DOORBELL_START (0) +#define IPI_DOORBELL_END (8) +#define IPI_DOORBELL_MASK 0xFF +#define PCI_MSI_DOORBELL_START (16) +#define PCI_MSI_DOORBELL_NR (16) +#define PCI_MSI_DOORBELL_END (32) +#define PCI_MSI_DOORBELL_MASK 0xFFFF0000 + +static void __iomem *per_cpu_int_base; +static void __iomem *main_int_base; +static struct irq_domain *armada_370_xp_mpic_domain; +#ifdef CONFIG_PCI_MSI +static struct irq_domain *armada_370_xp_msi_domain; +static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); +static DEFINE_MUTEX(msi_used_lock); +static phys_addr_t msi_doorbell_addr; +#endif + +/* + * In SMP mode: + * For shared global interrupts, mask/unmask global enable bit + * For CPU interrupts, mask/unmask the calling CPU's bit + */ +static void armada_370_xp_irq_mask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hwirq, main_int_base + + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + else + writel(hwirq, per_cpu_int_base + + ARMADA_370_XP_INT_SET_MASK_OFFS); +} + +static void armada_370_xp_irq_unmask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hwirq, main_int_base + + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + else + writel(hwirq, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +} + +#ifdef CONFIG_PCI_MSI + +static int armada_370_xp_alloc_msi(void) +{ + int hwirq; + + mutex_lock(&msi_used_lock); + hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); + if (hwirq >= PCI_MSI_DOORBELL_NR) + hwirq = -ENOSPC; + else + set_bit(hwirq, msi_used); + mutex_unlock(&msi_used_lock); + + return hwirq; +} + +static void armada_370_xp_free_msi(int hwirq) +{ + mutex_lock(&msi_used_lock); + if (!test_bit(hwirq, msi_used)) + pr_err("trying to free unused MSI#%d\n", hwirq); + else + clear_bit(hwirq, msi_used); + mutex_unlock(&msi_used_lock); +} + +static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, + struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct msi_msg msg; + int virq, hwirq; + + hwirq = armada_370_xp_alloc_msi(); + if (hwirq < 0) + return hwirq; + + virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); + if (!virq) { + armada_370_xp_free_msi(hwirq); + return -EINVAL; + } + + irq_set_msi_desc(virq, desc); + + msg.address_lo = msi_doorbell_addr; + msg.address_hi = 0; + msg.data = 0xf00 | (hwirq + 16); + + write_msi_msg(virq, &msg); + return 0; +} + +static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, + unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + unsigned long hwirq = d->hwirq; + + irq_dispose_mapping(irq); + armada_370_xp_free_msi(hwirq); +} + +static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, + int nvec, int type) +{ + /* We support MSI, but not MSI-X */ + if (type == PCI_CAP_ID_MSI) + return 0; + return -EINVAL; +} + +static struct irq_chip armada_370_xp_msi_irq_chip = { + .name = "armada_370_xp_msi_irq", + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, +}; + +static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, + handle_simple_irq); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { + .map = armada_370_xp_msi_map, +}; + +static int armada_370_xp_msi_init(struct device_node *node, + phys_addr_t main_int_phys_base) +{ + struct msi_chip *msi_chip; + u32 reg; + int ret; + + msi_doorbell_addr = main_int_phys_base + + ARMADA_370_XP_SW_TRIG_INT_OFFS; + + msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); + if (!msi_chip) + return -ENOMEM; + + msi_chip->setup_irq = armada_370_xp_setup_msi_irq; + msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; + msi_chip->check_device = armada_370_xp_check_msi_device; + msi_chip->of_node = node; + + armada_370_xp_msi_domain = + irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, + &armada_370_xp_msi_irq_ops, + NULL); + if (!armada_370_xp_msi_domain) { + kfree(msi_chip); + return -ENOMEM; + } + + ret = of_pci_msi_chip_add(msi_chip); + if (ret < 0) { + irq_domain_remove(armada_370_xp_msi_domain); + kfree(msi_chip); + return ret; + } + + reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) + | PCI_MSI_DOORBELL_MASK; + + writel(reg, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + /* Unmask IPI interrupt */ + writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + + return 0; +} +#else +static inline int armada_370_xp_msi_init(struct device_node *node, + phys_addr_t main_int_phys_base) +{ + return 0; +} +#endif + +#ifdef CONFIG_SMP +static DEFINE_RAW_SPINLOCK(irq_controller_lock); + +static int armada_xp_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long reg, mask; + int cpu; + + /* Select a single core from the affinity mask which is online */ + cpu = cpumask_any_and(mask_val, cpu_online_mask); + mask = 1UL << cpu_logical_map(cpu); + + raw_spin_lock(&irq_controller_lock); + reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; + writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); + raw_spin_unlock(&irq_controller_lock); + + return 0; +} +#endif + +static struct irq_chip armada_370_xp_irq_chip = { + .name = "armada_370_xp_irq", + .irq_mask = armada_370_xp_irq_mask, + .irq_mask_ack = armada_370_xp_irq_mask, + .irq_unmask = armada_370_xp_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = armada_xp_set_affinity, +#endif +}; + +static int armada_370_xp_mpic_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hw) +{ + armada_370_xp_irq_mask(irq_get_irq_data(virq)); + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hw, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + else + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + irq_set_status_flags(virq, IRQ_LEVEL); + + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { + irq_set_percpu_devid(virq); + irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, + handle_percpu_devid_irq); + + } else { + irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, + handle_level_irq); + } + set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + + return 0; +} + +#ifdef CONFIG_SMP +static void armada_mpic_send_doorbell(const struct cpumask *mask, + unsigned int irq) +{ + int cpu; + unsigned long map = 0; + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= 1 << cpu_logical_map(cpu); + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* submit softirq */ + writel((map << 8) | irq, main_int_base + + ARMADA_370_XP_SW_TRIG_INT_OFFS); +} + +static void armada_xp_mpic_smp_cpu_init(void) +{ + u32 control; + int nr_irqs, i; + + control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); + + /* Clear pending IPIs */ + writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + /* Enable first 8 IPIs */ + writel(IPI_DOORBELL_MASK, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + /* Unmask IPI interrupt */ + writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +} + +static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + armada_xp_mpic_smp_cpu_init(); + return NOTIFY_OK; +} + +static struct notifier_block armada_370_xp_mpic_cpu_notifier = { + .notifier_call = armada_xp_mpic_secondary_init, + .priority = 100, +}; + +#endif /* CONFIG_SMP */ + +static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { + .map = armada_370_xp_mpic_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +#ifdef CONFIG_PCI_MSI +static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) +{ + u32 msimask, msinr; + + msimask = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) + & PCI_MSI_DOORBELL_MASK; + + writel(~msimask, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + for (msinr = PCI_MSI_DOORBELL_START; + msinr < PCI_MSI_DOORBELL_END; msinr++) { + int irq; + + if (!(msimask & BIT(msinr))) + continue; + + irq = irq_find_mapping(armada_370_xp_msi_domain, + msinr - 16); + + if (is_chained) + generic_handle_irq(irq); + else + handle_IRQ(irq, regs); + } +} +#else +static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} +#endif + +static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, + struct irq_desc *desc) +{ + struct irq_chip *chip = irq_get_chip(irq); + unsigned long irqmap, irqn; + unsigned int cascade_irq; + + chained_irq_enter(chip, desc); + + irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); + + if (irqmap & BIT(0)) { + armada_370_xp_handle_msi_irq(NULL, true); + irqmap &= ~BIT(0); + } + + for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { + cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); + generic_handle_irq(cascade_irq); + } + + chained_irq_exit(chip, desc); +} + +static void __exception_irq_entry +armada_370_xp_handle_irq(struct pt_regs *regs) +{ + u32 irqstat, irqnr; + + do { + irqstat = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_CPU_INTACK_OFFS); + irqnr = irqstat & 0x3FF; + + if (irqnr > 1022) + break; + + if (irqnr > 1) { + irqnr = irq_find_mapping(armada_370_xp_mpic_domain, + irqnr); + handle_IRQ(irqnr, regs); + continue; + } + + /* MSI handling */ + if (irqnr == 1) + armada_370_xp_handle_msi_irq(regs, false); + +#ifdef CONFIG_SMP + /* IPI Handling */ + if (irqnr == 0) { + u32 ipimask, ipinr; + + ipimask = readl_relaxed(per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) + & IPI_DOORBELL_MASK; + + writel(~ipimask, per_cpu_int_base + + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + + /* Handle all pending doorbells */ + for (ipinr = IPI_DOORBELL_START; + ipinr < IPI_DOORBELL_END; ipinr++) { + if (ipimask & (0x1 << ipinr)) + handle_IPI(ipinr, regs); + } + continue; + } +#endif + + } while (1); +} + +static int __init armada_370_xp_mpic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct resource main_int_res, per_cpu_int_res; + int parent_irq, nr_irqs, i; + u32 control; + + BUG_ON(of_address_to_resource(node, 0, &main_int_res)); + BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); + + BUG_ON(!request_mem_region(main_int_res.start, + resource_size(&main_int_res), + node->full_name)); + BUG_ON(!request_mem_region(per_cpu_int_res.start, + resource_size(&per_cpu_int_res), + node->full_name)); + + main_int_base = ioremap(main_int_res.start, + resource_size(&main_int_res)); + BUG_ON(!main_int_base); + + per_cpu_int_base = ioremap(per_cpu_int_res.start, + resource_size(&per_cpu_int_res)); + BUG_ON(!per_cpu_int_base); + + control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + + armada_370_xp_mpic_domain = + irq_domain_add_linear(node, nr_irqs, + &armada_370_xp_mpic_irq_ops, NULL); + + BUG_ON(!armada_370_xp_mpic_domain); + +#ifdef CONFIG_SMP + armada_xp_mpic_smp_cpu_init(); +#endif + + armada_370_xp_msi_init(node, main_int_res.start); + + parent_irq = irq_of_parse_and_map(node, 0); + if (parent_irq <= 0) { + irq_set_default_host(armada_370_xp_mpic_domain); + set_handle_irq(armada_370_xp_handle_irq); +#ifdef CONFIG_SMP + set_smp_cross_call(armada_mpic_send_doorbell); + register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); +#endif + } else { + irq_set_chained_handler(parent_irq, + armada_370_xp_mpic_handle_cascade_irq); + } + + return 0; +} + +IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init); diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 16c78f1c5ef..5916d6cdafa 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -49,9 +49,11 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irqdomain.h> -#include <linux/irqchip/bcm2835.h> #include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" /* Put the bank and irq (32 bits) into the hwirq */ #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) @@ -93,6 +95,8 @@ struct armctrl_ic { }; static struct armctrl_ic intc __read_mostly; +static void __exception_irq_entry bcm2835_handle_irq( + struct pt_regs *regs); static void armctrl_mask_irq(struct irq_data *d) { @@ -164,17 +168,9 @@ static int __init armctrl_of_init(struct device_node *node, set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } - return 0; -} - -static struct of_device_id irq_of_match[] __initconst = { - { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }, - { } -}; -void __init bcm2835_init_irq(void) -{ - of_irq_init(irq_of_match); + set_handle_irq(bcm2835_handle_irq); + return 0; } /* @@ -200,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs, handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); } -asmlinkage void __exception_irq_entry bcm2835_handle_irq( +static void __exception_irq_entry bcm2835_handle_irq( struct pt_regs *regs) { u32 stat, irq; @@ -222,3 +218,5 @@ asmlinkage void __exception_irq_entry bcm2835_handle_irq( } } } + +IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c new file mode 100644 index 00000000000..c15c840987d --- /dev/null +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -0,0 +1,202 @@ +/* + * Generic Broadcom Set Top Box Level 2 Interrupt controller driver + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> + +#include <asm/mach/irq.h> + +#include "irqchip.h" + +/* Register offsets in the L2 interrupt controller */ +#define CPU_STATUS 0x00 +#define CPU_SET 0x04 +#define CPU_CLEAR 0x08 +#define CPU_MASK_STATUS 0x0c +#define CPU_MASK_SET 0x10 +#define CPU_MASK_CLEAR 0x14 + +/* L2 intc private data structure */ +struct brcmstb_l2_intc_data { + int parent_irq; + void __iomem *base; + struct irq_domain *domain; + bool can_wake; + u32 saved_mask; /* for suspend/resume */ +}; + +static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) +{ + struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 status; + + chained_irq_enter(chip, desc); + + status = __raw_readl(b->base + CPU_STATUS) & + ~(__raw_readl(b->base + CPU_MASK_STATUS)); + + if (status == 0) { + do_bad_IRQ(irq, desc); + goto out; + } + + do { + irq = ffs(status) - 1; + /* ack at our level */ + __raw_writel(1 << irq, b->base + CPU_CLEAR); + status &= ~(1 << irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } while (status); +out: + chained_irq_exit(chip, desc); +} + +static void brcmstb_l2_intc_suspend(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_l2_intc_data *b = gc->private; + + irq_gc_lock(gc); + /* Save the current mask */ + b->saved_mask = __raw_readl(b->base + CPU_MASK_STATUS); + + if (b->can_wake) { + /* Program the wakeup mask */ + __raw_writel(~gc->wake_active, b->base + CPU_MASK_SET); + __raw_writel(gc->wake_active, b->base + CPU_MASK_CLEAR); + } + irq_gc_unlock(gc); +} + +static void brcmstb_l2_intc_resume(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_l2_intc_data *b = gc->private; + + irq_gc_lock(gc); + /* Clear unmasked non-wakeup interrupts */ + __raw_writel(~b->saved_mask & ~gc->wake_active, b->base + CPU_CLEAR); + + /* Restore the saved mask */ + __raw_writel(b->saved_mask, b->base + CPU_MASK_SET); + __raw_writel(~b->saved_mask, b->base + CPU_MASK_CLEAR); + irq_gc_unlock(gc); +} + +int __init brcmstb_l2_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct brcmstb_l2_intc_data *data; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->base = of_iomap(np, 0); + if (!data->base) { + pr_err("failed to remap intc L2 registers\n"); + ret = -ENOMEM; + goto out_free; + } + + /* Disable all interrupts by default */ + __raw_writel(0xffffffff, data->base + CPU_MASK_SET); + __raw_writel(0xffffffff, data->base + CPU_CLEAR); + + data->parent_irq = irq_of_parse_and_map(np, 0); + if (data->parent_irq < 0) { + pr_err("failed to find parent interrupt\n"); + ret = data->parent_irq; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, 32, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, + np->full_name, handle_edge_irq, clr, 0, 0); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_handler_data(data->parent_irq, data); + irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle); + + gc = irq_get_domain_generic_chip(data->domain, 0); + gc->reg_base = data->base; + gc->private = data; + ct = gc->chip_types; + + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->regs.ack = CPU_CLEAR; + + ct->chip.irq_mask = irq_gc_mask_disable_reg; + ct->regs.disable = CPU_MASK_SET; + + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; + ct->regs.enable = CPU_MASK_CLEAR; + + ct->chip.irq_suspend = brcmstb_l2_intc_suspend; + ct->chip.irq_resume = brcmstb_l2_intc_resume; + + if (of_property_read_bool(np, "brcm,irq-can-wake")) { + data->can_wake = true; + /* This IRQ chip can wake the system, set all child interrupts + * in wake_enabled mask + */ + gc->wake_enabled = 0xffffffff; + ct->chip.irq_set_wake = irq_gc_set_wake; + } + + pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", + data->base, data->parent_irq); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(data->base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init); diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c new file mode 100644 index 00000000000..33340dc97d1 --- /dev/null +++ b/drivers/irqchip/irq-clps711x.c @@ -0,0 +1,243 @@ +/* + * CLPS711X IRQ driver + * + * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> + +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" + +#define CLPS711X_INTSR1 (0x0240) +#define CLPS711X_INTMR1 (0x0280) +#define CLPS711X_BLEOI (0x0600) +#define CLPS711X_MCEOI (0x0640) +#define CLPS711X_TEOI (0x0680) +#define CLPS711X_TC1EOI (0x06c0) +#define CLPS711X_TC2EOI (0x0700) +#define CLPS711X_RTCEOI (0x0740) +#define CLPS711X_UMSEOI (0x0780) +#define CLPS711X_COEOI (0x07c0) +#define CLPS711X_INTSR2 (0x1240) +#define CLPS711X_INTMR2 (0x1280) +#define CLPS711X_SRXEOF (0x1600) +#define CLPS711X_KBDEOI (0x1700) +#define CLPS711X_INTSR3 (0x2240) +#define CLPS711X_INTMR3 (0x2280) + +static const struct { +#define CLPS711X_FLAG_EN (1 << 0) +#define CLPS711X_FLAG_FIQ (1 << 1) + unsigned int flags; + phys_addr_t eoi; +} clps711x_irqs[] = { + [1] = { CLPS711X_FLAG_FIQ, CLPS711X_BLEOI, }, + [3] = { CLPS711X_FLAG_FIQ, CLPS711X_MCEOI, }, + [4] = { CLPS711X_FLAG_EN, CLPS711X_COEOI, }, + [5] = { CLPS711X_FLAG_EN, }, + [6] = { CLPS711X_FLAG_EN, }, + [7] = { CLPS711X_FLAG_EN, }, + [8] = { CLPS711X_FLAG_EN, CLPS711X_TC1EOI, }, + [9] = { CLPS711X_FLAG_EN, CLPS711X_TC2EOI, }, + [10] = { CLPS711X_FLAG_EN, CLPS711X_RTCEOI, }, + [11] = { CLPS711X_FLAG_EN, CLPS711X_TEOI, }, + [12] = { CLPS711X_FLAG_EN, }, + [13] = { CLPS711X_FLAG_EN, }, + [14] = { CLPS711X_FLAG_EN, CLPS711X_UMSEOI, }, + [15] = { CLPS711X_FLAG_EN, CLPS711X_SRXEOF, }, + [16] = { CLPS711X_FLAG_EN, CLPS711X_KBDEOI, }, + [17] = { CLPS711X_FLAG_EN, }, + [18] = { CLPS711X_FLAG_EN, }, + [28] = { CLPS711X_FLAG_EN, }, + [29] = { CLPS711X_FLAG_EN, }, + [32] = { CLPS711X_FLAG_FIQ, }, +}; + +static struct { + void __iomem *base; + void __iomem *intmr[3]; + void __iomem *intsr[3]; + struct irq_domain *domain; + struct irq_domain_ops ops; +} *clps711x_intc; + +static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) +{ + u32 irqnr, irqstat; + + do { + irqstat = readw_relaxed(clps711x_intc->intmr[0]) & + readw_relaxed(clps711x_intc->intsr[0]); + if (irqstat) { + irqnr = irq_find_mapping(clps711x_intc->domain, + fls(irqstat) - 1); + handle_IRQ(irqnr, regs); + } + + irqstat = readw_relaxed(clps711x_intc->intmr[1]) & + readw_relaxed(clps711x_intc->intsr[1]); + if (irqstat) { + irqnr = irq_find_mapping(clps711x_intc->domain, + fls(irqstat) - 1 + 16); + handle_IRQ(irqnr, regs); + } + } while (irqstat); +} + +static void clps711x_intc_eoi(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hwirq].eoi); +} + +static void clps711x_intc_mask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + void __iomem *intmr = clps711x_intc->intmr[hwirq / 16]; + u32 tmp; + + tmp = readl_relaxed(intmr); + tmp &= ~(1 << (hwirq % 16)); + writel_relaxed(tmp, intmr); +} + +static void clps711x_intc_unmask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + void __iomem *intmr = clps711x_intc->intmr[hwirq / 16]; + u32 tmp; + + tmp = readl_relaxed(intmr); + tmp |= 1 << (hwirq % 16); + writel_relaxed(tmp, intmr); +} + +static struct irq_chip clps711x_intc_chip = { + .name = "clps711x-intc", + .irq_eoi = clps711x_intc_eoi, + .irq_mask = clps711x_intc_mask, + .irq_unmask = clps711x_intc_unmask, +}; + +static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + irq_flow_handler_t handler = handle_level_irq; + unsigned int flags = IRQF_VALID | IRQF_PROBE; + + if (!clps711x_irqs[hw].flags) + return 0; + + if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) { + handler = handle_bad_irq; + flags |= IRQF_NOAUTOEN; + } else if (clps711x_irqs[hw].eoi) { + handler = handle_fasteoi_irq; + } + + /* Clear down pending interrupt */ + if (clps711x_irqs[hw].eoi) + writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi); + + irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler); + set_irq_flags(virq, flags); + + return 0; +} + +static int __init _clps711x_intc_init(struct device_node *np, + phys_addr_t base, resource_size_t size) +{ + int err; + + clps711x_intc = kzalloc(sizeof(*clps711x_intc), GFP_KERNEL); + if (!clps711x_intc) + return -ENOMEM; + + clps711x_intc->base = ioremap(base, size); + if (!clps711x_intc->base) { + err = -ENOMEM; + goto out_kfree; + } + + clps711x_intc->intsr[0] = clps711x_intc->base + CLPS711X_INTSR1; + clps711x_intc->intmr[0] = clps711x_intc->base + CLPS711X_INTMR1; + clps711x_intc->intsr[1] = clps711x_intc->base + CLPS711X_INTSR2; + clps711x_intc->intmr[1] = clps711x_intc->base + CLPS711X_INTMR2; + clps711x_intc->intsr[2] = clps711x_intc->base + CLPS711X_INTSR3; + clps711x_intc->intmr[2] = clps711x_intc->base + CLPS711X_INTMR3; + + /* Mask all interrupts */ + writel_relaxed(0, clps711x_intc->intmr[0]); + writel_relaxed(0, clps711x_intc->intmr[1]); + writel_relaxed(0, clps711x_intc->intmr[2]); + + err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); + if (IS_ERR_VALUE(err)) + goto out_iounmap; + + clps711x_intc->ops.map = clps711x_intc_irq_map; + clps711x_intc->ops.xlate = irq_domain_xlate_onecell; + clps711x_intc->domain = + irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs), + 0, 0, &clps711x_intc->ops, NULL); + if (!clps711x_intc->domain) { + err = -ENOMEM; + goto out_irqfree; + } + + irq_set_default_host(clps711x_intc->domain); + set_handle_irq(clps711x_irqh); + +#ifdef CONFIG_FIQ + init_FIQ(0); +#endif + + return 0; + +out_irqfree: + irq_free_descs(0, ARRAY_SIZE(clps711x_irqs)); + +out_iounmap: + iounmap(clps711x_intc->base); + +out_kfree: + kfree(clps711x_intc); + + return err; +} + +void __init clps711x_intc_init(phys_addr_t base, resource_size_t size) +{ + BUG_ON(_clps711x_intc_init(NULL, base, size)); +} + +#ifdef CONFIG_IRQCHIP +static int __init clps711x_intc_init_dt(struct device_node *np, + struct device_node *parent) +{ + struct resource res; + int err; + + err = of_address_to_resource(np, 0, &res); + if (err) + return err; + + return _clps711x_intc_init(np, res.start, resource_size(&res)); +} +IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt); +#endif diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c new file mode 100644 index 00000000000..3d15d16a708 --- /dev/null +++ b/drivers/irqchip/irq-crossbar.c @@ -0,0 +1,208 @@ +/* + * drivers/irqchip/irq-crossbar.c + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * Author: Sricharan R <r.sricharan@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/irqchip/arm-gic.h> + +#define IRQ_FREE -1 +#define GIC_IRQ_START 32 + +/* + * @int_max: maximum number of supported interrupts + * @irq_map: array of interrupts to crossbar number mapping + * @crossbar_base: crossbar base address + * @register_offsets: offsets for each irq number + */ +struct crossbar_device { + uint int_max; + uint *irq_map; + void __iomem *crossbar_base; + int *register_offsets; + void (*write) (int, int); +}; + +static struct crossbar_device *cb; + +static inline void crossbar_writel(int irq_no, int cb_no) +{ + writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writew(int irq_no, int cb_no) +{ + writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writeb(int irq_no, int cb_no) +{ + writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline int allocate_free_irq(int cb_no) +{ + int i; + + for (i = 0; i < cb->int_max; i++) { + if (cb->irq_map[i] == IRQ_FREE) { + cb->irq_map[i] = cb_no; + return i; + } + } + + return -ENODEV; +} + +static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); + return 0; +} + +static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; + + if (hw > GIC_IRQ_START) + cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; +} + +static int crossbar_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned long ret; + + ret = allocate_free_irq(intspec[1]); + + if (IS_ERR_VALUE(ret)) + return ret; + + *out_hwirq = ret + GIC_IRQ_START; + return 0; +} + +const struct irq_domain_ops routable_irq_domain_ops = { + .map = crossbar_domain_map, + .unmap = crossbar_domain_unmap, + .xlate = crossbar_domain_xlate +}; + +static int __init crossbar_of_init(struct device_node *node) +{ + int i, size, max, reserved = 0, entry; + const __be32 *irqsr; + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + + if (!cb) + return -ENOMEM; + + cb->crossbar_base = of_iomap(node, 0); + if (!cb->crossbar_base) + goto err1; + + of_property_read_u32(node, "ti,max-irqs", &max); + cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->irq_map) + goto err2; + + cb->int_max = max; + + for (i = 0; i < max; i++) + cb->irq_map[i] = IRQ_FREE; + + /* Get and mark reserved irqs */ + irqsr = of_get_property(node, "ti,irqs-reserved", &size); + if (irqsr) { + size /= sizeof(__be32); + + for (i = 0; i < size; i++) { + of_property_read_u32_index(node, + "ti,irqs-reserved", + i, &entry); + if (entry > max) { + pr_err("Invalid reserved entry\n"); + goto err3; + } + cb->irq_map[entry] = 0; + } + } + + cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->register_offsets) + goto err3; + + of_property_read_u32(node, "ti,reg-size", &size); + + switch (size) { + case 1: + cb->write = crossbar_writeb; + break; + case 2: + cb->write = crossbar_writew; + break; + case 4: + cb->write = crossbar_writel; + break; + default: + pr_err("Invalid reg-size property\n"); + goto err4; + break; + } + + /* + * Register offsets are not linear because of the + * reserved irqs. so find and store the offsets once. + */ + for (i = 0; i < max; i++) { + if (!cb->irq_map[i]) + continue; + + cb->register_offsets[i] = reserved; + reserved += size; + } + + register_routable_domain_ops(&routable_irq_domain_ops); + return 0; + +err4: + kfree(cb->register_offsets); +err3: + kfree(cb->irq_map); +err2: + iounmap(cb->crossbar_base); +err1: + kfree(cb); + return -ENOMEM; +} + +static const struct of_device_id crossbar_match[] __initconst = { + { .compatible = "ti,irq-crossbar" }, + {} +}; + +int __init irqcrossbar_init(void) +{ + struct device_node *np; + np = of_find_matching_node(NULL, crossbar_match); + if (!np) + return -ENODEV; + + crossbar_of_init(np); + return 0; +} diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c new file mode 100644 index 00000000000..31e231e1f56 --- /dev/null +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -0,0 +1,150 @@ +/* + * Synopsys DW APB ICTL irqchip driver. + * + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * based on GPL'ed 2.6 kernel sources + * (c) Marvell International Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include "irqchip.h" + +#define APB_INT_ENABLE_L 0x00 +#define APB_INT_ENABLE_H 0x04 +#define APB_INT_MASK_L 0x08 +#define APB_INT_MASK_H 0x0c +#define APB_INT_FINALSTATUS_L 0x30 +#define APB_INT_FINALSTATUS_H 0x34 + +static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_get_chip(irq); + struct irq_chip_generic *gc = irq_get_handler_data(irq); + struct irq_domain *d = gc->private; + u32 stat; + int n; + + chained_irq_enter(chip, desc); + + for (n = 0; n < gc->num_ct; n++) { + stat = readl_relaxed(gc->reg_base + + APB_INT_FINALSTATUS_L + 4 * n); + while (stat) { + u32 hwirq = ffs(stat) - 1; + generic_handle_irq(irq_find_mapping(d, + gc->irq_base + hwirq + 32 * n)); + stat &= ~(1 << hwirq); + } + } + + chained_irq_exit(chip, desc); +} + +static int __init dw_apb_ictl_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct resource r; + struct irq_domain *domain; + struct irq_chip_generic *gc; + void __iomem *iobase; + int ret, nrirqs, irq; + u32 reg; + + /* Map the parent interrupt for the chained handler */ + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) { + pr_err("%s: unable to parse irq\n", np->full_name); + return -EINVAL; + } + + ret = of_address_to_resource(np, 0, &r); + if (ret) { + pr_err("%s: unable to get resource\n", np->full_name); + return ret; + } + + if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { + pr_err("%s: unable to request mem region\n", np->full_name); + return -ENOMEM; + } + + iobase = ioremap(r.start, resource_size(&r)); + if (!iobase) { + pr_err("%s: unable to map resource\n", np->full_name); + ret = -ENOMEM; + goto err_release; + } + + /* + * DW IP can be configured to allow 2-64 irqs. We can determine + * the number of irqs supported by writing into enable register + * and look for bits not set, as corresponding flip-flops will + * have been removed by sythesis tool. + */ + + /* mask and enable all interrupts */ + writel(~0, iobase + APB_INT_MASK_L); + writel(~0, iobase + APB_INT_MASK_H); + writel(~0, iobase + APB_INT_ENABLE_L); + writel(~0, iobase + APB_INT_ENABLE_H); + + reg = readl(iobase + APB_INT_ENABLE_H); + if (reg) + nrirqs = 32 + fls(reg); + else + nrirqs = fls(readl(iobase + APB_INT_ENABLE_L)); + + domain = irq_domain_add_linear(np, nrirqs, + &irq_generic_chip_ops, NULL); + if (!domain) { + pr_err("%s: unable to add irq domain\n", np->full_name); + ret = -ENOMEM; + goto err_unmap; + } + + ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1, + np->name, handle_level_irq, clr, 0, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: unable to alloc irq domain gc\n", np->full_name); + goto err_unmap; + } + + gc = irq_get_domain_generic_chip(domain, 0); + gc->private = domain; + gc->reg_base = iobase; + + gc->chip_types[0].regs.mask = APB_INT_MASK_L; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; + + if (nrirqs > 32) { + gc->chip_types[1].regs.mask = APB_INT_MASK_H; + gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit; + gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit; + } + + irq_set_handler_data(irq, gc); + irq_set_chained_handler(irq, dw_apb_ictl_handler); + + return 0; + +err_unmap: + iounmap(iobase); +err_release: + release_mem_region(r.start, resource_size(&r)); + return ret; +} +IRQCHIP_DECLARE(dw_apb_ictl, + "snps,dw-apb-ictl", dw_apb_ictl_init); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 644d7246842..7c131cf7cc1 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/list.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <linux/cpu_pm.h> #include <linux/cpumask.h> #include <linux/io.h> @@ -38,18 +39,19 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/slab.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/arm-gic.h> +#include <asm/cputype.h> #include <asm/irq.h> #include <asm/exception.h> #include <asm/smp_plat.h> -#include <asm/mach/irq.h> #include "irqchip.h" union gic_base { void __iomem *common_base; - void __percpu __iomem **percpu_base; + void __percpu * __iomem *percpu_base; }; struct gic_chip_data { @@ -127,7 +129,7 @@ static inline void gic_set_base_accessor(struct gic_chip_data *data, #else #define gic_data_dist_base(d) ((d)->dist_base.common_base) #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) -#define gic_set_base_accessor(d,f) +#define gic_set_base_accessor(d, f) #endif static inline void __iomem *gic_dist_base(struct irq_data *d) @@ -236,7 +238,8 @@ static int gic_retrigger(struct irq_data *d) if (gic_arch_extn.irq_retrigger) return gic_arch_extn.irq_retrigger(d); - return -ENXIO; + /* the genirq layer expects 0 if we can't retrigger in hardware */ + return 0; } #ifdef CONFIG_SMP @@ -244,17 +247,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int shift = (gic_irq(d) % 4) * 8; - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu, shift = (gic_irq(d) % 4) * 8; u32 val, mask, bit; + if (!force) + cpu = cpumask_any_and(mask_val, cpu_online_mask); + else + cpu = cpumask_first(mask_val); + if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; + raw_spin_lock(&irq_controller_lock); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; - - raw_spin_lock(&irq_controller_lock); val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); raw_spin_unlock(&irq_controller_lock); @@ -278,7 +284,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on) #define gic_set_wake NULL #endif -static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; struct gic_chip_data *gic = &gic_data[0]; @@ -286,7 +292,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs do { irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); - irqnr = irqstat & ~0x1c00; + irqnr = irqstat & GICC_IAR_INT_ID_MASK; if (likely(irqnr > 15 && irqnr < 1021)) { irqnr = irq_find_mapping(gic->domain, irqnr); @@ -323,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); if (unlikely(gic_irq < 32 || gic_irq > 1020)) - do_bad_IRQ(cascade_irq, desc); + handle_bad_irq(cascade_irq, desc); else generic_handle_irq(cascade_irq); @@ -412,7 +418,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) writel_relaxed(1, base + GIC_DIST_CTRL); } -static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) +static void gic_cpu_init(struct gic_chip_data *gic) { void __iomem *dist_base = gic_data_dist_base(gic); void __iomem *base = gic_data_cpu_base(gic); @@ -451,6 +457,12 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) writel_relaxed(1, base + GIC_CPU_CTRL); } +void gic_cpu_if_down(void) +{ + void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); + writel_relaxed(0, cpu_base + GIC_CPU_CTRL); +} + #ifdef CONFIG_CPU_PM /* * Saves the GIC distributor registers during suspend or idle. Must be called @@ -641,26 +653,170 @@ static void __init gic_pm_init(struct gic_chip_data *gic) #endif #ifdef CONFIG_SMP -void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { int cpu; - unsigned long map = 0; + unsigned long flags, map = 0; + + raw_spin_lock_irqsave(&irq_controller_lock, flags); /* Convert our logical CPU mask into a physical one. */ for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); + map |= gic_cpu_map[cpu]; /* * Ensure that stores to Normal memory are visible to the - * other CPUs before issuing the IPI. + * other CPUs before they observe us issuing the IPI. */ - dsb(); + dmb(ishst); /* this always happens on GIC0 */ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); + + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } #endif +#ifdef CONFIG_BL_SWITCHER +/* + * gic_send_sgi - send a SGI directly to given CPU interface number + * + * cpu_id: the ID for the destination CPU interface + * irq: the IPI number to send a SGI for + */ +void gic_send_sgi(unsigned int cpu_id, unsigned int irq) +{ + BUG_ON(cpu_id >= NR_GIC_CPU_IF); + cpu_id = 1 << cpu_id; + /* this always happens on GIC0 */ + writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); +} + +/* + * gic_get_cpu_id - get the CPU interface ID for the specified CPU + * + * @cpu: the logical CPU number to get the GIC ID for. + * + * Return the CPU interface ID for the given logical CPU number, + * or -1 if the CPU number is too large or the interface ID is + * unknown (more than one bit set). + */ +int gic_get_cpu_id(unsigned int cpu) +{ + unsigned int cpu_bit; + + if (cpu >= NR_GIC_CPU_IF) + return -1; + cpu_bit = gic_cpu_map[cpu]; + if (cpu_bit & (cpu_bit - 1)) + return -1; + return __ffs(cpu_bit); +} + +/* + * gic_migrate_target - migrate IRQs to another CPU interface + * + * @new_cpu_id: the CPU target ID to migrate IRQs to + * + * Migrate all peripheral interrupts with a target matching the current CPU + * to the interface corresponding to @new_cpu_id. The CPU interface mapping + * is also updated. Targets to other CPU interfaces are unchanged. + * This must be called with IRQs locally disabled. + */ +void gic_migrate_target(unsigned int new_cpu_id) +{ + unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; + void __iomem *dist_base; + int i, ror_val, cpu = smp_processor_id(); + u32 val, cur_target_mask, active_mask; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + if (!dist_base) + return; + gic_irqs = gic_data[gic_nr].gic_irqs; + + cur_cpu_id = __ffs(gic_cpu_map[cpu]); + cur_target_mask = 0x01010101 << cur_cpu_id; + ror_val = (cur_cpu_id - new_cpu_id) & 31; + + raw_spin_lock(&irq_controller_lock); + + /* Update the target interface for this logical CPU */ + gic_cpu_map[cpu] = 1 << new_cpu_id; + + /* + * Find all the peripheral interrupts targetting the current + * CPU interface and migrate them to the new CPU interface. + * We skip DIST_TARGET 0 to 7 as they are read-only. + */ + for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { + val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); + active_mask = val & cur_target_mask; + if (active_mask) { + val &= ~active_mask; + val |= ror32(active_mask, ror_val); + writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); + } + } + + raw_spin_unlock(&irq_controller_lock); + + /* + * Now let's migrate and clear any potential SGIs that might be + * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET + * is a banked register, we can only forward the SGI using + * GIC_DIST_SOFTINT. The original SGI source is lost but Linux + * doesn't use that information anyway. + * + * For the same reason we do not adjust SGI source information + * for previously sent SGIs by us to other CPUs either. + */ + for (i = 0; i < 16; i += 4) { + int j; + val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); + if (!val) + continue; + writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); + for (j = i; j < i + 4; j++) { + if (val & 0xff) + writel_relaxed((1 << (new_cpu_id + 16)) | j, + dist_base + GIC_DIST_SOFTINT); + val >>= 8; + } + } +} + +/* + * gic_get_sgir_physaddr - get the physical address for the SGI register + * + * REturn the physical address of the SGI register to be used + * by some early assembly code when the kernel is not yet available. + */ +static unsigned long gic_dist_physaddr; + +unsigned long gic_get_sgir_physaddr(void) +{ + if (!gic_dist_physaddr) + return 0; + return gic_dist_physaddr + GIC_DIST_SOFTINT; +} + +void __init gic_init_physaddr(struct device_node *node) +{ + struct resource res; + if (of_address_to_resource(node, 0, &res) == 0) { + gic_dist_physaddr = res.start; + pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); + } +} + +#else +#define gic_init_physaddr(node) do { } while (0) +#endif + static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -673,16 +829,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + gic_routable_irq_domain_ops->map(d, irq, hw); } irq_set_chip_data(irq, d->host_data); return 0; } +static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + gic_routable_irq_domain_ops->unmap(d, irq); +} + static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { + unsigned long ret = 0; + if (d->of_node != controller) return -EINVAL; if (intsize < 3) @@ -692,18 +857,78 @@ static int gic_irq_domain_xlate(struct irq_domain *d, *out_hwirq = intspec[1] + 16; /* For SPIs, we need to add 16 more to get the GIC irq ID number */ - if (!intspec[0]) - *out_hwirq += 16; + if (!intspec[0]) { + ret = gic_routable_irq_domain_ops->xlate(d, controller, + intspec, + intsize, + out_hwirq, + out_type); + + if (IS_ERR_VALUE(ret)) + return ret; + } *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; - return 0; + + return ret; } -const struct irq_domain_ops gic_irq_domain_ops = { +#ifdef CONFIG_SMP +static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + gic_cpu_init(&gic_data[0]); + return NOTIFY_OK; +} + +/* + * Notifier for enabling the GIC CPU interface. Set an arbitrarily high + * priority because the GIC needs to be up before the ARM generic timers. + */ +static struct notifier_block gic_cpu_notifier = { + .notifier_call = gic_secondary_init, + .priority = 100, +}; +#endif + +static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, + .unmap = gic_irq_domain_unmap, .xlate = gic_irq_domain_xlate, }; +/* Default functions for routable irq domain */ +static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + return 0; +} + +static void gic_routable_irq_domain_unmap(struct irq_domain *d, + unsigned int irq) +{ +} + +static int gic_routable_irq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + *out_hwirq += 16; + return 0; +} + +const struct irq_domain_ops gic_default_routable_irq_domain_ops = { + .map = gic_routable_irq_domain_map, + .unmap = gic_routable_irq_domain_unmap, + .xlate = gic_routable_irq_domain_xlate, +}; + +const struct irq_domain_ops *gic_routable_irq_domain_ops = + &gic_default_routable_irq_domain_ops; + void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __iomem *dist_base, void __iomem *cpu_base, u32 percpu_offset, struct device_node *node) @@ -711,6 +936,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, irq_hw_number_t hwirq_base; struct gic_chip_data *gic; int gic_irqs, irq_base, i; + int nr_routable_irqs; BUG_ON(gic_nr >= MAX_GIC_NR); @@ -729,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, } for_each_possible_cpu(cpu) { - unsigned long offset = percpu_offset * cpu_logical_map(cpu); + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + unsigned long offset = percpu_offset * core_id; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; } @@ -776,22 +1004,35 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic->gic_irqs = gic_irqs; gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; + + if (of_property_read_u32(node, "arm,routable-irqs", + &nr_routable_irqs)) { + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { + WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", + irq_start); + irq_base = irq_start; + } + + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); + } else { + gic->domain = irq_domain_add_linear(node, nr_routable_irqs, + &gic_irq_domain_ops, + gic); } - gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); + if (WARN_ON(!gic->domain)) return; + if (gic_nr == 0) { #ifdef CONFIG_SMP - set_smp_cross_call(gic_raise_softirq); + set_smp_cross_call(gic_raise_softirq); + register_cpu_notifier(&gic_cpu_notifier); #endif - - set_handle_irq(gic_handle_irq); + set_handle_irq(gic_handle_irq); + } gic_chip.flags |= gic_arch_extn.flags; gic_dist_init(gic); @@ -799,17 +1040,11 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_pm_init(gic); } -void __cpuinit gic_secondary_init(unsigned int gic_nr) -{ - BUG_ON(gic_nr >= MAX_GIC_NR); - - gic_cpu_init(&gic_data[gic_nr]); -} - #ifdef CONFIG_OF -static int gic_cnt __initdata = 0; +static int gic_cnt __initdata; -int __init gic_of_init(struct device_node *node, struct device_node *parent) +static int __init +gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *cpu_base; void __iomem *dist_base; @@ -829,6 +1064,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) percpu_offset = 0; gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); + if (!gic_cnt) + gic_init_physaddr(node); if (parent) { irq = irq_of_parse_and_map(node, 0); @@ -837,8 +1074,10 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) gic_cnt++; return 0; } +IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c new file mode 100644 index 00000000000..8071c2eb024 --- /dev/null +++ b/drivers/irqchip/irq-imgpdc.c @@ -0,0 +1,499 @@ +/* + * IMG PowerDown Controller (PDC) + * + * Copyright 2010-2013 Imagination Technologies Ltd. + * + * Exposes the syswake and PDC peripheral wake interrupts to the system. + * + */ + +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +/* PDC interrupt register numbers */ + +#define PDC_IRQ_STATUS 0x310 +#define PDC_IRQ_ENABLE 0x314 +#define PDC_IRQ_CLEAR 0x318 +#define PDC_IRQ_ROUTE 0x31c +#define PDC_SYS_WAKE_BASE 0x330 +#define PDC_SYS_WAKE_STRIDE 0x8 +#define PDC_SYS_WAKE_CONFIG_BASE 0x334 +#define PDC_SYS_WAKE_CONFIG_STRIDE 0x8 + +/* PDC interrupt register field masks */ + +#define PDC_IRQ_SYS3 0x08 +#define PDC_IRQ_SYS2 0x04 +#define PDC_IRQ_SYS1 0x02 +#define PDC_IRQ_SYS0 0x01 +#define PDC_IRQ_ROUTE_WU_EN_SYS3 0x08000000 +#define PDC_IRQ_ROUTE_WU_EN_SYS2 0x04000000 +#define PDC_IRQ_ROUTE_WU_EN_SYS1 0x02000000 +#define PDC_IRQ_ROUTE_WU_EN_SYS0 0x01000000 +#define PDC_IRQ_ROUTE_WU_EN_WD 0x00040000 +#define PDC_IRQ_ROUTE_WU_EN_IR 0x00020000 +#define PDC_IRQ_ROUTE_WU_EN_RTC 0x00010000 +#define PDC_IRQ_ROUTE_EXT_EN_SYS3 0x00000800 +#define PDC_IRQ_ROUTE_EXT_EN_SYS2 0x00000400 +#define PDC_IRQ_ROUTE_EXT_EN_SYS1 0x00000200 +#define PDC_IRQ_ROUTE_EXT_EN_SYS0 0x00000100 +#define PDC_IRQ_ROUTE_EXT_EN_WD 0x00000004 +#define PDC_IRQ_ROUTE_EXT_EN_IR 0x00000002 +#define PDC_IRQ_ROUTE_EXT_EN_RTC 0x00000001 +#define PDC_SYS_WAKE_RESET 0x00000010 +#define PDC_SYS_WAKE_INT_MODE 0x0000000e +#define PDC_SYS_WAKE_INT_MODE_SHIFT 1 +#define PDC_SYS_WAKE_PIN_VAL 0x00000001 + +/* PDC interrupt constants */ + +#define PDC_SYS_WAKE_INT_LOW 0x0 +#define PDC_SYS_WAKE_INT_HIGH 0x1 +#define PDC_SYS_WAKE_INT_DOWN 0x2 +#define PDC_SYS_WAKE_INT_UP 0x3 +#define PDC_SYS_WAKE_INT_CHANGE 0x6 +#define PDC_SYS_WAKE_INT_NONE 0x4 + +/** + * struct pdc_intc_priv - private pdc interrupt data. + * @nr_perips: Number of peripheral interrupt signals. + * @nr_syswakes: Number of syswake signals. + * @perip_irqs: List of peripheral IRQ numbers handled. + * @syswake_irq: Shared PDC syswake IRQ number. + * @domain: IRQ domain for PDC peripheral and syswake IRQs. + * @pdc_base: Base of PDC registers. + * @irq_route: Cached version of PDC_IRQ_ROUTE register. + * @lock: Lock to protect the PDC syswake registers and the cached + * values of those registers in this struct. + */ +struct pdc_intc_priv { + unsigned int nr_perips; + unsigned int nr_syswakes; + unsigned int *perip_irqs; + unsigned int syswake_irq; + struct irq_domain *domain; + void __iomem *pdc_base; + + u32 irq_route; + raw_spinlock_t lock; +}; + +static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs, + unsigned int data) +{ + iowrite32(data, priv->pdc_base + reg_offs); +} + +static unsigned int pdc_read(struct pdc_intc_priv *priv, + unsigned int reg_offs) +{ + return ioread32(priv->pdc_base + reg_offs); +} + +/* Generic IRQ callbacks */ + +#define SYS0_HWIRQ 8 + +static unsigned int hwirq_is_syswake(irq_hw_number_t hw) +{ + return hw >= SYS0_HWIRQ; +} + +static unsigned int hwirq_to_syswake(irq_hw_number_t hw) +{ + return hw - SYS0_HWIRQ; +} + +static irq_hw_number_t syswake_to_hwirq(unsigned int syswake) +{ + return SYS0_HWIRQ + syswake; +} + +static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data) +{ + return (struct pdc_intc_priv *)data->domain->host_data; +} + +/* + * perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains + * wake bits, therefore we cannot use the generic irqchip mask callbacks as they + * cache the mask. + */ + +static void perip_irq_mask(struct irq_data *data) +{ + struct pdc_intc_priv *priv = irqd_to_priv(data); + + raw_spin_lock(&priv->lock); + priv->irq_route &= ~data->mask; + pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route); + raw_spin_unlock(&priv->lock); +} + +static void perip_irq_unmask(struct irq_data *data) +{ + struct pdc_intc_priv *priv = irqd_to_priv(data); + + raw_spin_lock(&priv->lock); + priv->irq_route |= data->mask; + pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route); + raw_spin_unlock(&priv->lock); +} + +static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct pdc_intc_priv *priv = irqd_to_priv(data); + unsigned int syswake = hwirq_to_syswake(data->hwirq); + unsigned int irq_mode; + unsigned int soc_sys_wake_regoff, soc_sys_wake; + + /* translate to syswake IRQ mode */ + switch (flow_type) { + case IRQ_TYPE_EDGE_BOTH: + irq_mode = PDC_SYS_WAKE_INT_CHANGE; + break; + case IRQ_TYPE_EDGE_RISING: + irq_mode = PDC_SYS_WAKE_INT_UP; + break; + case IRQ_TYPE_EDGE_FALLING: + irq_mode = PDC_SYS_WAKE_INT_DOWN; + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_mode = PDC_SYS_WAKE_INT_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + irq_mode = PDC_SYS_WAKE_INT_LOW; + break; + default: + return -EINVAL; + } + + raw_spin_lock(&priv->lock); + + /* set the IRQ mode */ + soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE; + soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff); + soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE; + soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT; + pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake); + + /* and update the handler */ + irq_setup_alt_chip(data, flow_type); + + raw_spin_unlock(&priv->lock); + + return 0; +} + +/* applies to both peripheral and syswake interrupts */ +static int pdc_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct pdc_intc_priv *priv = irqd_to_priv(data); + irq_hw_number_t hw = data->hwirq; + unsigned int mask = (1 << 16) << hw; + unsigned int dst_irq; + + raw_spin_lock(&priv->lock); + if (on) + priv->irq_route |= mask; + else + priv->irq_route &= ~mask; + pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route); + raw_spin_unlock(&priv->lock); + + /* control the destination IRQ wakeup too for standby mode */ + if (hwirq_is_syswake(hw)) + dst_irq = priv->syswake_irq; + else + dst_irq = priv->perip_irqs[hw]; + irq_set_irq_wake(dst_irq, on); + + return 0; +} + +static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc) +{ + struct pdc_intc_priv *priv; + unsigned int i, irq_no; + + priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); + + /* find the peripheral number */ + for (i = 0; i < priv->nr_perips; ++i) + if (irq == priv->perip_irqs[i]) + goto found; + + /* should never get here */ + return; +found: + + /* pass on the interrupt */ + irq_no = irq_linear_revmap(priv->domain, i); + generic_handle_irq(irq_no); +} + +static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) +{ + struct pdc_intc_priv *priv; + unsigned int syswake, irq_no; + unsigned int status; + + priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); + + status = pdc_read(priv, PDC_IRQ_STATUS) & + pdc_read(priv, PDC_IRQ_ENABLE); + status &= (1 << priv->nr_syswakes) - 1; + + for (syswake = 0; status; status >>= 1, ++syswake) { + /* Has this sys_wake triggered? */ + if (!(status & 1)) + continue; + + irq_no = irq_linear_revmap(priv->domain, + syswake_to_hwirq(syswake)); + generic_handle_irq(irq_no); + } +} + +static void pdc_intc_setup(struct pdc_intc_priv *priv) +{ + int i; + unsigned int soc_sys_wake_regoff; + unsigned int soc_sys_wake; + + /* + * Mask all syswake interrupts before routing, or we could receive an + * interrupt before we're ready to handle it. + */ + pdc_write(priv, PDC_IRQ_ENABLE, 0); + + /* + * Enable routing of all syswakes + * Disable all wake sources + */ + priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) - + PDC_IRQ_ROUTE_EXT_EN_SYS0); + pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route); + + /* Initialise syswake IRQ */ + for (i = 0; i < priv->nr_syswakes; ++i) { + /* set the IRQ mode to none */ + soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE; + soc_sys_wake = PDC_SYS_WAKE_INT_NONE + << PDC_SYS_WAKE_INT_MODE_SHIFT; + pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake); + } +} + +static int pdc_intc_probe(struct platform_device *pdev) +{ + struct pdc_intc_priv *priv; + struct device_node *node = pdev->dev.of_node; + struct resource *res_regs; + struct irq_chip_generic *gc; + unsigned int i; + int irq, ret; + u32 val; + + if (!node) + return -ENOENT; + + /* Get registers */ + res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_regs == NULL) { + dev_err(&pdev->dev, "cannot find registers resource\n"); + return -ENOENT; + } + + /* Allocate driver data */ + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + raw_spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); + + /* Ioremap the registers */ + priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start, + res_regs->end - res_regs->start); + if (!priv->pdc_base) + return -EIO; + + /* Get number of peripherals */ + ret = of_property_read_u32(node, "num-perips", &val); + if (ret) { + dev_err(&pdev->dev, "No num-perips node property found\n"); + return -EINVAL; + } + if (val > SYS0_HWIRQ) { + dev_err(&pdev->dev, "num-perips (%u) out of range\n", val); + return -EINVAL; + } + priv->nr_perips = val; + + /* Get number of syswakes */ + ret = of_property_read_u32(node, "num-syswakes", &val); + if (ret) { + dev_err(&pdev->dev, "No num-syswakes node property found\n"); + return -EINVAL; + } + if (val > SYS0_HWIRQ) { + dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val); + return -EINVAL; + } + priv->nr_syswakes = val; + + /* Get peripheral IRQ numbers */ + priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips, + GFP_KERNEL); + if (!priv->perip_irqs) { + dev_err(&pdev->dev, "cannot allocate perip IRQ list\n"); + return -ENOMEM; + } + for (i = 0; i < priv->nr_perips; ++i) { + irq = platform_get_irq(pdev, 1 + i); + if (irq < 0) { + dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i); + return irq; + } + priv->perip_irqs[i] = irq; + } + /* check if too many were provided */ + if (platform_get_irq(pdev, 1 + i) >= 0) { + dev_err(&pdev->dev, "surplus perip IRQs detected\n"); + return -EINVAL; + } + + /* Get syswake IRQ number */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "cannot find syswake IRQ\n"); + return irq; + } + priv->syswake_irq = irq; + + /* Set up an IRQ domain */ + priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, + priv); + if (unlikely(!priv->domain)) { + dev_err(&pdev->dev, "cannot add IRQ domain\n"); + return -ENOMEM; + } + + /* + * Set up 2 generic irq chips with 2 chip types. + * The first one for peripheral irqs (only 1 chip type used) + * The second one for syswake irqs (edge and level chip types) + */ + ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc", + handle_level_irq, 0, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (ret) + goto err_generic; + + /* peripheral interrupt chip */ + + gc = irq_get_domain_generic_chip(priv->domain, 0); + gc->unused = ~(BIT(priv->nr_perips) - 1); + gc->reg_base = priv->pdc_base; + /* + * IRQ_ROUTE contains wake bits, so we can't use the generic versions as + * they cache the mask + */ + gc->chip_types[0].regs.mask = PDC_IRQ_ROUTE; + gc->chip_types[0].chip.irq_mask = perip_irq_mask; + gc->chip_types[0].chip.irq_unmask = perip_irq_unmask; + gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake; + + /* syswake interrupt chip */ + + gc = irq_get_domain_generic_chip(priv->domain, 8); + gc->unused = ~(BIT(priv->nr_syswakes) - 1); + gc->reg_base = priv->pdc_base; + + /* edge interrupts */ + gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH; + gc->chip_types[0].handler = handle_edge_irq; + gc->chip_types[0].regs.ack = PDC_IRQ_CLEAR; + gc->chip_types[0].regs.mask = PDC_IRQ_ENABLE; + gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_set_type = syswake_irq_set_type; + gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake; + /* for standby we pass on to the shared syswake IRQ */ + gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; + + /* level interrupts */ + gc->chip_types[1].type = IRQ_TYPE_LEVEL_MASK; + gc->chip_types[1].handler = handle_level_irq; + gc->chip_types[1].regs.ack = PDC_IRQ_CLEAR; + gc->chip_types[1].regs.mask = PDC_IRQ_ENABLE; + gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[1].chip.irq_set_type = syswake_irq_set_type; + gc->chip_types[1].chip.irq_set_wake = pdc_irq_set_wake; + /* for standby we pass on to the shared syswake IRQ */ + gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND; + + /* Set up the hardware to enable interrupt routing */ + pdc_intc_setup(priv); + + /* Setup chained handlers for the peripheral IRQs */ + for (i = 0; i < priv->nr_perips; ++i) { + irq = priv->perip_irqs[i]; + irq_set_handler_data(irq, priv); + irq_set_chained_handler(irq, pdc_intc_perip_isr); + } + + /* Setup chained handler for the syswake IRQ */ + irq_set_handler_data(priv->syswake_irq, priv); + irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr); + + dev_info(&pdev->dev, + "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n", + priv->nr_perips, + priv->nr_syswakes); + + return 0; +err_generic: + irq_domain_remove(priv->domain); + return ret; +} + +static int pdc_intc_remove(struct platform_device *pdev) +{ + struct pdc_intc_priv *priv = platform_get_drvdata(pdev); + + irq_domain_remove(priv->domain); + return 0; +} + +static const struct of_device_id pdc_intc_match[] = { + { .compatible = "img,pdc-intc" }, + {} +}; + +static struct platform_driver pdc_intc_driver = { + .driver = { + .name = "pdc-intc", + .of_match_table = pdc_intc_match, + }, + .probe = pdc_intc_probe, + .remove = pdc_intc_remove, +}; + +static int __init pdc_intc_init(void) +{ + return platform_driver_register(&pdc_intc_driver); +} +core_initcall(pdc_intc_init); diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c index 92c41ab4dbf..2cb474ad880 100644 --- a/drivers/irqchip/irq-metag-ext.c +++ b/drivers/irqchip/irq-metag-ext.c @@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data, * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ - cpu = cpumask_any(cpumask); + cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c index 8e94d7a3b20..c16c186d97d 100644 --- a/drivers/irqchip/irq-metag.c +++ b/drivers/irqchip/irq-metag.c @@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data, * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ - cpu = cpumask_any(cpumask); + cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c new file mode 100644 index 00000000000..1c3e2c9b46b --- /dev/null +++ b/drivers/irqchip/irq-mmp.c @@ -0,0 +1,493 @@ +/* + * linux/arch/arm/mach-mmp/irq.c + * + * Generic IRQ handling, GPIO IRQ demultiplexing, etc. + * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd. + * + * Author: Bin Yang <bin.yang@marvell.com> + * Haojian Zhuang <haojian.zhuang@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <asm/exception.h> +#include <asm/hardirq.h> + +#include "irqchip.h" + +#define MAX_ICU_NR 16 + +#define PJ1_INT_SEL 0x10c +#define PJ4_INT_SEL 0x104 + +/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */ +#define SEL_INT_PENDING (1 << 6) +#define SEL_INT_NUM_MASK 0x3f + +struct icu_chip_data { + int nr_irqs; + unsigned int virq_base; + unsigned int cascade_irq; + void __iomem *reg_status; + void __iomem *reg_mask; + unsigned int conf_enable; + unsigned int conf_disable; + unsigned int conf_mask; + unsigned int clr_mfp_irq_base; + unsigned int clr_mfp_hwirq; + struct irq_domain *domain; +}; + +struct mmp_intc_conf { + unsigned int conf_enable; + unsigned int conf_disable; + unsigned int conf_mask; +}; + +static void __iomem *mmp_icu_base; +static struct icu_chip_data icu_data[MAX_ICU_NR]; +static int max_icu_nr; + +extern void mmp2_clear_pmic_int(void); + +static void icu_mask_ack_irq(struct irq_data *d) +{ + struct irq_domain *domain = d->domain; + struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; + int hwirq; + u32 r; + + hwirq = d->irq - data->virq_base; + if (data == &icu_data[0]) { + r = readl_relaxed(mmp_icu_base + (hwirq << 2)); + r &= ~data->conf_mask; + r |= data->conf_disable; + writel_relaxed(r, mmp_icu_base + (hwirq << 2)); + } else { +#ifdef CONFIG_CPU_MMP2 + if ((data->virq_base == data->clr_mfp_irq_base) + && (hwirq == data->clr_mfp_hwirq)) + mmp2_clear_pmic_int(); +#endif + r = readl_relaxed(data->reg_mask) | (1 << hwirq); + writel_relaxed(r, data->reg_mask); + } +} + +static void icu_mask_irq(struct irq_data *d) +{ + struct irq_domain *domain = d->domain; + struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; + int hwirq; + u32 r; + + hwirq = d->irq - data->virq_base; + if (data == &icu_data[0]) { + r = readl_relaxed(mmp_icu_base + (hwirq << 2)); + r &= ~data->conf_mask; + r |= data->conf_disable; + writel_relaxed(r, mmp_icu_base + (hwirq << 2)); + } else { + r = readl_relaxed(data->reg_mask) | (1 << hwirq); + writel_relaxed(r, data->reg_mask); + } +} + +static void icu_unmask_irq(struct irq_data *d) +{ + struct irq_domain *domain = d->domain; + struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; + int hwirq; + u32 r; + + hwirq = d->irq - data->virq_base; + if (data == &icu_data[0]) { + r = readl_relaxed(mmp_icu_base + (hwirq << 2)); + r &= ~data->conf_mask; + r |= data->conf_enable; + writel_relaxed(r, mmp_icu_base + (hwirq << 2)); + } else { + r = readl_relaxed(data->reg_mask) & ~(1 << hwirq); + writel_relaxed(r, data->reg_mask); + } +} + +struct irq_chip icu_irq_chip = { + .name = "icu_irq", + .irq_mask = icu_mask_irq, + .irq_mask_ack = icu_mask_ack_irq, + .irq_unmask = icu_unmask_irq, +}; + +static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc) +{ + struct irq_domain *domain; + struct icu_chip_data *data; + int i; + unsigned long mask, status, n; + + for (i = 1; i < max_icu_nr; i++) { + if (irq == icu_data[i].cascade_irq) { + domain = icu_data[i].domain; + data = (struct icu_chip_data *)domain->host_data; + break; + } + } + if (i >= max_icu_nr) { + pr_err("Spurious irq %d in MMP INTC\n", irq); + return; + } + + mask = readl_relaxed(data->reg_mask); + while (1) { + status = readl_relaxed(data->reg_status) & ~mask; + if (status == 0) + break; + for_each_set_bit(n, &status, BITS_PER_LONG) { + generic_handle_irq(icu_data[i].virq_base + n); + } + } +} + +static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); + set_irq_flags(irq, IRQF_VALID); + return 0; +} + +static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + *out_hwirq = intspec[0]; + return 0; +} + +const struct irq_domain_ops mmp_irq_domain_ops = { + .map = mmp_irq_domain_map, + .xlate = mmp_irq_domain_xlate, +}; + +static struct mmp_intc_conf mmp_conf = { + .conf_enable = 0x51, + .conf_disable = 0x0, + .conf_mask = 0x7f, +}; + +static struct mmp_intc_conf mmp2_conf = { + .conf_enable = 0x20, + .conf_disable = 0x0, + .conf_mask = 0x7f, +}; + +static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) +{ + int irq, hwirq; + + hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL); + if (!(hwirq & SEL_INT_PENDING)) + return; + hwirq &= SEL_INT_NUM_MASK; + irq = irq_find_mapping(icu_data[0].domain, hwirq); + handle_IRQ(irq, regs); +} + +static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs) +{ + int irq, hwirq; + + hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL); + if (!(hwirq & SEL_INT_PENDING)) + return; + hwirq &= SEL_INT_NUM_MASK; + irq = irq_find_mapping(icu_data[0].domain, hwirq); + handle_IRQ(irq, regs); +} + +/* MMP (ARMv5) */ +void __init icu_init_irq(void) +{ + int irq; + + max_icu_nr = 1; + mmp_icu_base = ioremap(0xd4282000, 0x1000); + icu_data[0].conf_enable = mmp_conf.conf_enable; + icu_data[0].conf_disable = mmp_conf.conf_disable; + icu_data[0].conf_mask = mmp_conf.conf_mask; + icu_data[0].nr_irqs = 64; + icu_data[0].virq_base = 0; + icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, + &irq_domain_simple_ops, + &icu_data[0]); + for (irq = 0; irq < 64; irq++) { + icu_mask_irq(irq_get_irq_data(irq)); + irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); + set_irq_flags(irq, IRQF_VALID); + } + irq_set_default_host(icu_data[0].domain); + set_handle_irq(mmp_handle_irq); +} + +/* MMP2 (ARMv7) */ +void __init mmp2_init_icu(void) +{ + int irq, end; + + max_icu_nr = 8; + mmp_icu_base = ioremap(0xd4282000, 0x1000); + icu_data[0].conf_enable = mmp2_conf.conf_enable; + icu_data[0].conf_disable = mmp2_conf.conf_disable; + icu_data[0].conf_mask = mmp2_conf.conf_mask; + icu_data[0].nr_irqs = 64; + icu_data[0].virq_base = 0; + icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, + &irq_domain_simple_ops, + &icu_data[0]); + icu_data[1].reg_status = mmp_icu_base + 0x150; + icu_data[1].reg_mask = mmp_icu_base + 0x168; + icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base + + icu_data[0].nr_irqs; + icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */ + icu_data[1].nr_irqs = 2; + icu_data[1].cascade_irq = 4; + icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs; + icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, + icu_data[1].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[1]); + icu_data[2].reg_status = mmp_icu_base + 0x154; + icu_data[2].reg_mask = mmp_icu_base + 0x16c; + icu_data[2].nr_irqs = 2; + icu_data[2].cascade_irq = 5; + icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs; + icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, + icu_data[2].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[2]); + icu_data[3].reg_status = mmp_icu_base + 0x180; + icu_data[3].reg_mask = mmp_icu_base + 0x17c; + icu_data[3].nr_irqs = 3; + icu_data[3].cascade_irq = 9; + icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs; + icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, + icu_data[3].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[3]); + icu_data[4].reg_status = mmp_icu_base + 0x158; + icu_data[4].reg_mask = mmp_icu_base + 0x170; + icu_data[4].nr_irqs = 5; + icu_data[4].cascade_irq = 17; + icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs; + icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, + icu_data[4].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[4]); + icu_data[5].reg_status = mmp_icu_base + 0x15c; + icu_data[5].reg_mask = mmp_icu_base + 0x174; + icu_data[5].nr_irqs = 15; + icu_data[5].cascade_irq = 35; + icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs; + icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, + icu_data[5].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[5]); + icu_data[6].reg_status = mmp_icu_base + 0x160; + icu_data[6].reg_mask = mmp_icu_base + 0x178; + icu_data[6].nr_irqs = 2; + icu_data[6].cascade_irq = 51; + icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs; + icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, + icu_data[6].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[6]); + icu_data[7].reg_status = mmp_icu_base + 0x188; + icu_data[7].reg_mask = mmp_icu_base + 0x184; + icu_data[7].nr_irqs = 2; + icu_data[7].cascade_irq = 55; + icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs; + icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, + icu_data[7].virq_base, 0, + &irq_domain_simple_ops, + &icu_data[7]); + end = icu_data[7].virq_base + icu_data[7].nr_irqs; + for (irq = 0; irq < end; irq++) { + icu_mask_irq(irq_get_irq_data(irq)); + if (irq == icu_data[1].cascade_irq || + irq == icu_data[2].cascade_irq || + irq == icu_data[3].cascade_irq || + irq == icu_data[4].cascade_irq || + irq == icu_data[5].cascade_irq || + irq == icu_data[6].cascade_irq || + irq == icu_data[7].cascade_irq) { + irq_set_chip(irq, &icu_irq_chip); + irq_set_chained_handler(irq, icu_mux_irq_demux); + } else { + irq_set_chip_and_handler(irq, &icu_irq_chip, + handle_level_irq); + } + set_irq_flags(irq, IRQF_VALID); + } + irq_set_default_host(icu_data[0].domain); + set_handle_irq(mmp2_handle_irq); +} + +#ifdef CONFIG_OF +static int __init mmp_init_bases(struct device_node *node) +{ + int ret, nr_irqs, irq, i = 0; + + ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs); + if (ret) { + pr_err("Not found mrvl,intc-nr-irqs property\n"); + return ret; + } + + mmp_icu_base = of_iomap(node, 0); + if (!mmp_icu_base) { + pr_err("Failed to get interrupt controller register\n"); + return -ENOMEM; + } + + icu_data[0].virq_base = 0; + icu_data[0].domain = irq_domain_add_linear(node, nr_irqs, + &mmp_irq_domain_ops, + &icu_data[0]); + for (irq = 0; irq < nr_irqs; irq++) { + ret = irq_create_mapping(icu_data[0].domain, irq); + if (!ret) { + pr_err("Failed to mapping hwirq\n"); + goto err; + } + if (!irq) + icu_data[0].virq_base = ret; + } + icu_data[0].nr_irqs = nr_irqs; + return 0; +err: + if (icu_data[0].virq_base) { + for (i = 0; i < irq; i++) + irq_dispose_mapping(icu_data[0].virq_base + i); + } + irq_domain_remove(icu_data[0].domain); + iounmap(mmp_icu_base); + return -EINVAL; +} + +static int __init mmp_of_init(struct device_node *node, + struct device_node *parent) +{ + int ret; + + ret = mmp_init_bases(node); + if (ret < 0) + return ret; + + icu_data[0].conf_enable = mmp_conf.conf_enable; + icu_data[0].conf_disable = mmp_conf.conf_disable; + icu_data[0].conf_mask = mmp_conf.conf_mask; + irq_set_default_host(icu_data[0].domain); + set_handle_irq(mmp_handle_irq); + max_icu_nr = 1; + return 0; +} +IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init); + +static int __init mmp2_of_init(struct device_node *node, + struct device_node *parent) +{ + int ret; + + ret = mmp_init_bases(node); + if (ret < 0) + return ret; + + icu_data[0].conf_enable = mmp2_conf.conf_enable; + icu_data[0].conf_disable = mmp2_conf.conf_disable; + icu_data[0].conf_mask = mmp2_conf.conf_mask; + irq_set_default_host(icu_data[0].domain); + set_handle_irq(mmp2_handle_irq); + max_icu_nr = 1; + return 0; +} +IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init); + +static int __init mmp2_mux_of_init(struct device_node *node, + struct device_node *parent) +{ + struct resource res; + int i, ret, irq, j = 0; + u32 nr_irqs, mfp_irq; + + if (!parent) + return -ENODEV; + + i = max_icu_nr; + ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", + &nr_irqs); + if (ret) { + pr_err("Not found mrvl,intc-nr-irqs property\n"); + return -EINVAL; + } + ret = of_address_to_resource(node, 0, &res); + if (ret < 0) { + pr_err("Not found reg property\n"); + return -EINVAL; + } + icu_data[i].reg_status = mmp_icu_base + res.start; + ret = of_address_to_resource(node, 1, &res); + if (ret < 0) { + pr_err("Not found reg property\n"); + return -EINVAL; + } + icu_data[i].reg_mask = mmp_icu_base + res.start; + icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0); + if (!icu_data[i].cascade_irq) + return -EINVAL; + + icu_data[i].virq_base = 0; + icu_data[i].domain = irq_domain_add_linear(node, nr_irqs, + &mmp_irq_domain_ops, + &icu_data[i]); + for (irq = 0; irq < nr_irqs; irq++) { + ret = irq_create_mapping(icu_data[i].domain, irq); + if (!ret) { + pr_err("Failed to mapping hwirq\n"); + goto err; + } + if (!irq) + icu_data[i].virq_base = ret; + } + icu_data[i].nr_irqs = nr_irqs; + if (!of_property_read_u32(node, "mrvl,clr-mfp-irq", + &mfp_irq)) { + icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base; + icu_data[i].clr_mfp_hwirq = mfp_irq; + } + irq_set_chained_handler(icu_data[i].cascade_irq, + icu_mux_irq_demux); + max_icu_nr++; + return 0; +err: + if (icu_data[i].virq_base) { + for (j = 0; j < irq; j++) + irq_dispose_mapping(icu_data[i].virq_base + j); + } + irq_domain_remove(icu_data[i].domain); + return -EINVAL; +} +IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init); +#endif diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c new file mode 100644 index 00000000000..00b3cc908f7 --- /dev/null +++ b/drivers/irqchip/irq-moxart.c @@ -0,0 +1,117 @@ +/* + * MOXA ART SoCs IRQ chip driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> + +#include <asm/exception.h> + +#include "irqchip.h" + +#define IRQ_SOURCE_REG 0 +#define IRQ_MASK_REG 0x04 +#define IRQ_CLEAR_REG 0x08 +#define IRQ_MODE_REG 0x0c +#define IRQ_LEVEL_REG 0x10 +#define IRQ_STATUS_REG 0x14 + +#define FIQ_SOURCE_REG 0x20 +#define FIQ_MASK_REG 0x24 +#define FIQ_CLEAR_REG 0x28 +#define FIQ_MODE_REG 0x2c +#define FIQ_LEVEL_REG 0x30 +#define FIQ_STATUS_REG 0x34 + + +struct moxart_irq_data { + void __iomem *base; + struct irq_domain *domain; + unsigned int interrupt_mask; +}; + +static struct moxart_irq_data intc; + +static void __exception_irq_entry handle_irq(struct pt_regs *regs) +{ + u32 irqstat; + int hwirq; + + irqstat = readl(intc.base + IRQ_STATUS_REG); + + while (irqstat) { + hwirq = ffs(irqstat) - 1; + handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); + irqstat &= ~(1 << hwirq); + } +} + +static int __init moxart_of_intc_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + int ret; + struct irq_chip_generic *gc; + + intc.base = of_iomap(node, 0); + if (!intc.base) { + pr_err("%s: unable to map IC registers\n", + node->full_name); + return -EINVAL; + } + + intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, + intc.base); + if (!intc.domain) { + pr_err("%s: unable to create IRQ domain\n", node->full_name); + return -EINVAL; + } + + ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, + "MOXARTINTC", handle_edge_irq, + clr, 0, IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: could not allocate generic chip\n", + node->full_name); + irq_domain_remove(intc.domain); + return -EINVAL; + } + + ret = of_property_read_u32(node, "interrupt-mask", + &intc.interrupt_mask); + if (ret) + pr_err("%s: could not read interrupt-mask DT property\n", + node->full_name); + + gc = irq_get_domain_generic_chip(intc.domain, 0); + + gc->reg_base = intc.base; + gc->chip_types[0].regs.mask = IRQ_MASK_REG; + gc->chip_types[0].regs.ack = IRQ_CLEAR_REG; + gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + + writel(0, intc.base + IRQ_MASK_REG); + writel(0xffffffff, intc.base + IRQ_CLEAR_REG); + + writel(intc.interrupt_mask, intc.base + IRQ_MODE_REG); + writel(intc.interrupt_mask, intc.base + IRQ_LEVEL_REG); + + set_handle_irq(handle_irq); + + return 0; +} +IRQCHIP_DECLARE(moxa_moxart_ic, "moxa,moxart-ic", moxart_of_intc_init); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c new file mode 100644 index 00000000000..4044ff28766 --- /dev/null +++ b/drivers/irqchip/irq-mxs.c @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/stmp_device.h> +#include <asm/exception.h> + +#include "irqchip.h" + +#define HW_ICOLL_VECTOR 0x0000 +#define HW_ICOLL_LEVELACK 0x0010 +#define HW_ICOLL_CTRL 0x0020 +#define HW_ICOLL_STAT_OFFSET 0x0070 +#define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10) +#define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10) +#define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004 +#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 + +#define ICOLL_NUM_IRQS 128 + +static void __iomem *icoll_base; +static struct irq_domain *icoll_domain; + +static void icoll_ack_irq(struct irq_data *d) +{ + /* + * The Interrupt Collector is able to prioritize irqs. + * Currently only level 0 is used. So acking can use + * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. + */ + __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, + icoll_base + HW_ICOLL_LEVELACK); +} + +static void icoll_mask_irq(struct irq_data *d) +{ + __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, + icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); +} + +static void icoll_unmask_irq(struct irq_data *d) +{ + __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, + icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq)); +} + +static struct irq_chip mxs_icoll_chip = { + .irq_ack = icoll_ack_irq, + .irq_mask = icoll_mask_irq, + .irq_unmask = icoll_unmask_irq, +}; + +asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) +{ + u32 irqnr; + + irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); + __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); + irqnr = irq_find_mapping(icoll_domain, irqnr); + handle_IRQ(irqnr, regs); +} + +static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static struct irq_domain_ops icoll_irq_domain_ops = { + .map = icoll_irq_domain_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int __init icoll_of_init(struct device_node *np, + struct device_node *interrupt_parent) +{ + icoll_base = of_iomap(np, 0); + WARN_ON(!icoll_base); + + /* + * Interrupt Collector reset, which initializes the priority + * for each irq to level 0. + */ + stmp_reset_block(icoll_base + HW_ICOLL_CTRL); + + icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS, + &icoll_irq_domain_ops, NULL); + return icoll_domain ? 0 : -ENODEV; +} +IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init); diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c new file mode 100644 index 00000000000..70bdf6edb7b --- /dev/null +++ b/drivers/irqchip/irq-nvic.c @@ -0,0 +1,117 @@ +/* + * drivers/irq/irq-nvic.c + * + * Copyright (C) 2008 ARM Limited, All Rights Reserved. + * Copyright (C) 2013 Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the Nested Vectored Interrupt Controller found on the + * ARMv7-M CPUs (Cortex-M3/M4) + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> + +#include <asm/v7m.h> +#include <asm/exception.h> + +#include "irqchip.h" + +#define NVIC_ISER 0x000 +#define NVIC_ICER 0x080 +#define NVIC_IPR 0x300 + +#define NVIC_MAX_BANKS 16 +/* + * Each bank handles 32 irqs. Only the 16th (= last) bank handles only + * 16 irqs. + */ +#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16) + +static struct irq_domain *nvic_irq_domain; + +asmlinkage void __exception_irq_entry +nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) +{ + unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq); + + handle_IRQ(irq, regs); +} + +static void nvic_eoi(struct irq_data *d) +{ + /* + * This is a no-op as end of interrupt is signaled by the exception + * return sequence. + */ +} + +static int __init nvic_of_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + unsigned int irqs, i, ret, numbanks; + void __iomem *nvic_base; + + numbanks = (readl_relaxed(V7M_SCS_ICTR) & + V7M_SCS_ICTR_INTLINESNUM_MASK) + 1; + + nvic_base = of_iomap(node, 0); + if (!nvic_base) { + pr_warn("unable to map nvic registers\n"); + return -ENOMEM; + } + + irqs = numbanks * 32; + if (irqs > NVIC_MAX_IRQ) + irqs = NVIC_MAX_IRQ; + + nvic_irq_domain = + irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL); + if (!nvic_irq_domain) { + pr_warn("Failed to allocate irq domain\n"); + return -ENOMEM; + } + + ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1, + "nvic_irq", handle_fasteoi_irq, + clr, 0, IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_warn("Failed to allocate irq chips\n"); + irq_domain_remove(nvic_irq_domain); + return ret; + } + + for (i = 0; i < numbanks; ++i) { + struct irq_chip_generic *gc; + + gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i); + gc->reg_base = nvic_base + 4 * i; + gc->chip_types[0].regs.enable = NVIC_ISER; + gc->chip_types[0].regs.disable = NVIC_ICER; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + gc->chip_types[0].chip.irq_eoi = nvic_eoi; + + /* disable interrupts */ + writel_relaxed(~0, gc->reg_base + NVIC_ICER); + } + + /* Set priority on all interrupts */ + for (i = 0; i < irqs; i += 4) + writel_relaxed(0, nvic_base + NVIC_IPR + i); + + return 0; +} +IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init); diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c new file mode 100644 index 00000000000..34d18b48bb7 --- /dev/null +++ b/drivers/irqchip/irq-orion.c @@ -0,0 +1,208 @@ +/* + * Marvell Orion SoCs IRQ chip driver. + * + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" + +/* + * Orion SoC main interrupt controller + */ +#define ORION_IRQS_PER_CHIP 32 + +#define ORION_IRQ_CAUSE 0x00 +#define ORION_IRQ_MASK 0x04 +#define ORION_IRQ_FIQ_MASK 0x08 +#define ORION_IRQ_ENDP_MASK 0x0c + +static struct irq_domain *orion_irq_domain; + +static void +__exception_irq_entry orion_handle_irq(struct pt_regs *regs) +{ + struct irq_domain_chip_generic *dgc = orion_irq_domain->gc; + int n, base = 0; + + for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) { + struct irq_chip_generic *gc = + irq_get_domain_generic_chip(orion_irq_domain, base); + u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) & + gc->mask_cache; + while (stat) { + u32 hwirq = __fls(stat); + u32 irq = irq_find_mapping(orion_irq_domain, + gc->irq_base + hwirq); + handle_IRQ(irq, regs); + stat &= ~(1 << hwirq); + } + } +} + +static int __init orion_irq_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + int n, ret, base, num_chips = 0; + struct resource r; + + /* count number of irq chips by valid reg addresses */ + while (of_address_to_resource(np, num_chips, &r) == 0) + num_chips++; + + orion_irq_domain = irq_domain_add_linear(np, + num_chips * ORION_IRQS_PER_CHIP, + &irq_generic_chip_ops, NULL); + if (!orion_irq_domain) + panic("%s: unable to add irq domain\n", np->name); + + ret = irq_alloc_domain_generic_chips(orion_irq_domain, + ORION_IRQS_PER_CHIP, 1, np->name, + handle_level_irq, clr, 0, + IRQ_GC_INIT_MASK_CACHE); + if (ret) + panic("%s: unable to alloc irq domain gc\n", np->name); + + for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) { + struct irq_chip_generic *gc = + irq_get_domain_generic_chip(orion_irq_domain, base); + + of_address_to_resource(np, n, &r); + + if (!request_mem_region(r.start, resource_size(&r), np->name)) + panic("%s: unable to request mem region %d", + np->name, n); + + gc->reg_base = ioremap(r.start, resource_size(&r)); + if (!gc->reg_base) + panic("%s: unable to map resource %d", np->name, n); + + gc->chip_types[0].regs.mask = ORION_IRQ_MASK; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + + /* mask all interrupts */ + writel(0, gc->reg_base + ORION_IRQ_MASK); + } + + set_handle_irq(orion_handle_irq); + return 0; +} +IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init); + +/* + * Orion SoC bridge interrupt controller + */ +#define ORION_BRIDGE_IRQ_CAUSE 0x00 +#define ORION_BRIDGE_IRQ_MASK 0x04 + +static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct irq_domain *d = irq_get_handler_data(irq); + + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); + u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & + gc->mask_cache; + + while (stat) { + u32 hwirq = __fls(stat); + + generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); + stat &= ~(1 << hwirq); + } +} + +/* + * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register. + * To avoid interrupt events on stale irqs, we clear them before unmask. + */ +static unsigned int orion_bridge_irq_startup(struct irq_data *d) +{ + struct irq_chip_type *ct = irq_data_get_chip_type(d); + + ct->chip.irq_ack(d); + ct->chip.irq_unmask(d); + return 0; +} + +static int __init orion_bridge_irq_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct resource r; + struct irq_domain *domain; + struct irq_chip_generic *gc; + int ret, irq, nrirqs = 32; + + /* get optional number of interrupts provided */ + of_property_read_u32(np, "marvell,#interrupts", &nrirqs); + + domain = irq_domain_add_linear(np, nrirqs, + &irq_generic_chip_ops, NULL); + if (!domain) { + pr_err("%s: unable to add irq domain\n", np->name); + return -ENOMEM; + } + + ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, + handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: unable to alloc irq domain gc\n", np->name); + return ret; + } + + ret = of_address_to_resource(np, 0, &r); + if (ret) { + pr_err("%s: unable to get resource\n", np->name); + return ret; + } + + if (!request_mem_region(r.start, resource_size(&r), np->name)) { + pr_err("%s: unable to request mem region\n", np->name); + return -ENOMEM; + } + + /* Map the parent interrupt for the chained handler */ + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) { + pr_err("%s: unable to parse irq\n", np->name); + return -EINVAL; + } + + gc = irq_get_domain_generic_chip(domain, 0); + gc->reg_base = ioremap(r.start, resource_size(&r)); + if (!gc->reg_base) { + pr_err("%s: unable to map resource\n", np->name); + return -ENOMEM; + } + + gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; + gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; + gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup; + gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + + /* mask and clear all interrupts */ + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); + + irq_set_handler_data(irq, domain); + irq_set_chained_handler(irq, orion_bridge_irq_handler); + + return 0; +} +IRQCHIP_DECLARE(orion_bridge_intc, + "marvell,orion-bridge-intc", orion_bridge_irq_init); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c new file mode 100644 index 00000000000..3ee78f02e5d --- /dev/null +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -0,0 +1,556 @@ +/* + * Renesas INTC External IRQ Pin Driver + * + * Copyright (C) 2013 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_data/irq-renesas-intc-irqpin.h> + +#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */ + +#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */ +#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */ +#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */ +#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */ +#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */ +#define INTC_IRQPIN_REG_NR 5 + +/* INTC external IRQ PIN hardware register access: + * + * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*) + * PRIO is read-write 32-bit with 4-bits per IRQ (**) + * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***) + * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***) + * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***) + * + * (*) May be accessed by more than one driver instance - lock needed + * (**) Read-modify-write access by one driver instance - lock needed + * (***) Accessed by one driver instance only - no locking needed + */ + +struct intc_irqpin_iomem { + void __iomem *iomem; + unsigned long (*read)(void __iomem *iomem); + void (*write)(void __iomem *iomem, unsigned long data); + int width; +}; + +struct intc_irqpin_irq { + int hw_irq; + int requested_irq; + int domain_irq; + struct intc_irqpin_priv *p; +}; + +struct intc_irqpin_priv { + struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR]; + struct intc_irqpin_irq irq[INTC_IRQPIN_MAX]; + struct renesas_intc_irqpin_config config; + unsigned int number_of_irqs; + struct platform_device *pdev; + struct irq_chip irq_chip; + struct irq_domain *irq_domain; + bool shared_irqs; + u8 shared_irq_mask; +}; + +static unsigned long intc_irqpin_read32(void __iomem *iomem) +{ + return ioread32(iomem); +} + +static unsigned long intc_irqpin_read8(void __iomem *iomem) +{ + return ioread8(iomem); +} + +static void intc_irqpin_write32(void __iomem *iomem, unsigned long data) +{ + iowrite32(data, iomem); +} + +static void intc_irqpin_write8(void __iomem *iomem, unsigned long data) +{ + iowrite8(data, iomem); +} + +static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p, + int reg) +{ + struct intc_irqpin_iomem *i = &p->iomem[reg]; + + return i->read(i->iomem); +} + +static inline void intc_irqpin_write(struct intc_irqpin_priv *p, + int reg, unsigned long data) +{ + struct intc_irqpin_iomem *i = &p->iomem[reg]; + + i->write(i->iomem, data); +} + +static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p, + int reg, int hw_irq) +{ + return BIT((p->iomem[reg].width - 1) - hw_irq); +} + +static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p, + int reg, int hw_irq) +{ + intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq)); +} + +static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */ + +static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, + int reg, int shift, + int width, int value) +{ + unsigned long flags; + unsigned long tmp; + + raw_spin_lock_irqsave(&intc_irqpin_lock, flags); + + tmp = intc_irqpin_read(p, reg); + tmp &= ~(((1 << width) - 1) << shift); + tmp |= value << shift; + intc_irqpin_write(p, reg, tmp); + + raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags); +} + +static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, + int irq, int do_mask) +{ + /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ + int bitfield_width = 4; + int shift = 32 - (irq + 1) * bitfield_width; + + intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, + shift, bitfield_width, + do_mask ? 0 : (1 << bitfield_width) - 1); +} + +static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) +{ + /* The SENSE register is assumed to be 32-bit. */ + int bitfield_width = p->config.sense_bitfield_width; + int shift = 32 - (irq + 1) * bitfield_width; + + dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); + + if (value >= (1 << bitfield_width)) + return -EINVAL; + + intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift, + bitfield_width, value); + return 0; +} + +static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str) +{ + dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n", + str, i->requested_irq, i->hw_irq, i->domain_irq); +} + +static void intc_irqpin_irq_enable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "enable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq); +} + +static void intc_irqpin_irq_disable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "disable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); +} + +static void intc_irqpin_shared_irq_enable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "shared enable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq); + + p->shared_irq_mask &= ~BIT(hw_irq); +} + +static void intc_irqpin_shared_irq_disable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "shared disable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); + + p->shared_irq_mask |= BIT(hw_irq); +} + +static void intc_irqpin_irq_enable_force(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int irq = p->irq[irqd_to_hwirq(d)].requested_irq; + + intc_irqpin_irq_enable(d); + + /* enable interrupt through parent interrupt controller, + * assumes non-shared interrupt with 1:1 mapping + * needed for busted IRQs on some SoCs like sh73a0 + */ + irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq)); +} + +static void intc_irqpin_irq_disable_force(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int irq = p->irq[irqd_to_hwirq(d)].requested_irq; + + /* disable interrupt through parent interrupt controller, + * assumes non-shared interrupt with 1:1 mapping + * needed for busted IRQs on some SoCs like sh73a0 + */ + irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq)); + intc_irqpin_irq_disable(d); +} + +#define INTC_IRQ_SENSE_VALID 0x10 +#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID) + +static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = { + [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00), + [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01), + [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02), + [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03), + [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04), +}; + +static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type) +{ + unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK]; + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + + if (!(value & INTC_IRQ_SENSE_VALID)) + return -EINVAL; + + return intc_irqpin_set_sense(p, irqd_to_hwirq(d), + value ^ INTC_IRQ_SENSE_VALID); +} + +static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id) +{ + struct intc_irqpin_irq *i = dev_id; + struct intc_irqpin_priv *p = i->p; + unsigned long bit; + + intc_irqpin_dbg(i, "demux1"); + bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq); + + if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) { + intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit); + intc_irqpin_dbg(i, "demux2"); + generic_handle_irq(i->domain_irq); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) +{ + struct intc_irqpin_priv *p = dev_id; + unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE); + irqreturn_t status = IRQ_NONE; + int k; + + for (k = 0; k < 8; k++) { + if (reg_source & BIT(7 - k)) { + if (BIT(k) & p->shared_irq_mask) + continue; + + status |= intc_irqpin_irq_handler(irq, &p->irq[k]); + } + } + + return status; +} + +static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct intc_irqpin_priv *p = h->host_data; + + p->irq[hw].domain_irq = virq; + p->irq[hw].hw_irq = hw; + + intc_irqpin_dbg(&p->irq[hw], "map"); + irq_set_chip_data(virq, h->host_data); + irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); + set_irq_flags(virq, IRQF_VALID); /* kill me now */ + return 0; +} + +static struct irq_domain_ops intc_irqpin_irq_domain_ops = { + .map = intc_irqpin_irq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int intc_irqpin_probe(struct platform_device *pdev) +{ + struct renesas_intc_irqpin_config *pdata = pdev->dev.platform_data; + struct intc_irqpin_priv *p; + struct intc_irqpin_iomem *i; + struct resource *io[INTC_IRQPIN_REG_NR]; + struct resource *irq; + struct irq_chip *irq_chip; + void (*enable_fn)(struct irq_data *d); + void (*disable_fn)(struct irq_data *d); + const char *name = dev_name(&pdev->dev); + int ref_irq; + int ret; + int k; + + p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); + if (!p) { + dev_err(&pdev->dev, "failed to allocate driver data\n"); + ret = -ENOMEM; + goto err0; + } + + /* deal with driver instance configuration */ + if (pdata) { + memcpy(&p->config, pdata, sizeof(*pdata)); + } else { + of_property_read_u32(pdev->dev.of_node, "sense-bitfield-width", + &p->config.sense_bitfield_width); + p->config.control_parent = of_property_read_bool(pdev->dev.of_node, + "control-parent"); + } + if (!p->config.sense_bitfield_width) + p->config.sense_bitfield_width = 4; /* default to 4 bits */ + + p->pdev = pdev; + platform_set_drvdata(pdev, p); + + /* get hold of manadatory IOMEM */ + for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { + io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k); + if (!io[k]) { + dev_err(&pdev->dev, "not enough IOMEM resources\n"); + ret = -EINVAL; + goto err0; + } + } + + /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ + for (k = 0; k < INTC_IRQPIN_MAX; k++) { + irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); + if (!irq) + break; + + p->irq[k].p = p; + p->irq[k].requested_irq = irq->start; + } + + p->number_of_irqs = k; + if (p->number_of_irqs < 1) { + dev_err(&pdev->dev, "not enough IRQ resources\n"); + ret = -EINVAL; + goto err0; + } + + /* ioremap IOMEM and setup read/write callbacks */ + for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { + i = &p->iomem[k]; + + switch (resource_size(io[k])) { + case 1: + i->width = 8; + i->read = intc_irqpin_read8; + i->write = intc_irqpin_write8; + break; + case 4: + i->width = 32; + i->read = intc_irqpin_read32; + i->write = intc_irqpin_write32; + break; + default: + dev_err(&pdev->dev, "IOMEM size mismatch\n"); + ret = -EINVAL; + goto err0; + } + + i->iomem = devm_ioremap_nocache(&pdev->dev, io[k]->start, + resource_size(io[k])); + if (!i->iomem) { + dev_err(&pdev->dev, "failed to remap IOMEM\n"); + ret = -ENXIO; + goto err0; + } + } + + /* mask all interrupts using priority */ + for (k = 0; k < p->number_of_irqs; k++) + intc_irqpin_mask_unmask_prio(p, k, 1); + + /* clear all pending interrupts */ + intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0); + + /* scan for shared interrupt lines */ + ref_irq = p->irq[0].requested_irq; + p->shared_irqs = true; + for (k = 1; k < p->number_of_irqs; k++) { + if (ref_irq != p->irq[k].requested_irq) { + p->shared_irqs = false; + break; + } + } + + /* use more severe masking method if requested */ + if (p->config.control_parent) { + enable_fn = intc_irqpin_irq_enable_force; + disable_fn = intc_irqpin_irq_disable_force; + } else if (!p->shared_irqs) { + enable_fn = intc_irqpin_irq_enable; + disable_fn = intc_irqpin_irq_disable; + } else { + enable_fn = intc_irqpin_shared_irq_enable; + disable_fn = intc_irqpin_shared_irq_disable; + } + + irq_chip = &p->irq_chip; + irq_chip->name = name; + irq_chip->irq_mask = disable_fn; + irq_chip->irq_unmask = enable_fn; + irq_chip->irq_enable = enable_fn; + irq_chip->irq_disable = disable_fn; + irq_chip->irq_set_type = intc_irqpin_irq_set_type; + irq_chip->flags = IRQCHIP_SKIP_SET_WAKE; + + p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, + p->number_of_irqs, + p->config.irq_base, + &intc_irqpin_irq_domain_ops, p); + if (!p->irq_domain) { + ret = -ENXIO; + dev_err(&pdev->dev, "cannot initialize irq domain\n"); + goto err0; + } + + if (p->shared_irqs) { + /* request one shared interrupt */ + if (devm_request_irq(&pdev->dev, p->irq[0].requested_irq, + intc_irqpin_shared_irq_handler, + IRQF_SHARED, name, p)) { + dev_err(&pdev->dev, "failed to request low IRQ\n"); + ret = -ENOENT; + goto err1; + } + } else { + /* request interrupts one by one */ + for (k = 0; k < p->number_of_irqs; k++) { + if (devm_request_irq(&pdev->dev, + p->irq[k].requested_irq, + intc_irqpin_irq_handler, + 0, name, &p->irq[k])) { + dev_err(&pdev->dev, + "failed to request low IRQ\n"); + ret = -ENOENT; + goto err1; + } + } + } + + /* unmask all interrupts on prio level */ + for (k = 0; k < p->number_of_irqs; k++) + intc_irqpin_mask_unmask_prio(p, k, 0); + + dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); + + /* warn in case of mismatch if irq base is specified */ + if (p->config.irq_base) { + if (p->config.irq_base != p->irq[0].domain_irq) + dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n", + p->config.irq_base, p->irq[0].domain_irq); + } + + return 0; + +err1: + irq_domain_remove(p->irq_domain); +err0: + return ret; +} + +static int intc_irqpin_remove(struct platform_device *pdev) +{ + struct intc_irqpin_priv *p = platform_get_drvdata(pdev); + + irq_domain_remove(p->irq_domain); + + return 0; +} + +static const struct of_device_id intc_irqpin_dt_ids[] = { + { .compatible = "renesas,intc-irqpin", }, + {}, +}; +MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); + +static struct platform_driver intc_irqpin_device_driver = { + .probe = intc_irqpin_probe, + .remove = intc_irqpin_remove, + .driver = { + .name = "renesas_intc_irqpin", + .of_match_table = intc_irqpin_dt_ids, + .owner = THIS_MODULE, + } +}; + +static int __init intc_irqpin_init(void) +{ + return platform_driver_register(&intc_irqpin_device_driver); +} +postcore_initcall(intc_irqpin_init); + +static void __exit intc_irqpin_exit(void) +{ + platform_driver_unregister(&intc_irqpin_device_driver); +} +module_exit(intc_irqpin_exit); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c new file mode 100644 index 00000000000..8777065012a --- /dev/null +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -0,0 +1,302 @@ +/* + * Renesas IRQC Driver + * + * Copyright (C) 2013 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_data/irq-renesas-irqc.h> + +#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */ + +#define IRQC_REQ_STS 0x00 +#define IRQC_EN_STS 0x04 +#define IRQC_EN_SET 0x08 +#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10)) +#define DETECT_STATUS 0x100 +#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04)) + +struct irqc_irq { + int hw_irq; + int requested_irq; + int domain_irq; + struct irqc_priv *p; +}; + +struct irqc_priv { + void __iomem *iomem; + void __iomem *cpu_int_base; + struct irqc_irq irq[IRQC_IRQ_MAX]; + struct renesas_irqc_config config; + unsigned int number_of_irqs; + struct platform_device *pdev; + struct irq_chip irq_chip; + struct irq_domain *irq_domain; +}; + +static void irqc_dbg(struct irqc_irq *i, char *str) +{ + dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n", + str, i->requested_irq, i->hw_irq, i->domain_irq); +} + +static void irqc_irq_enable(struct irq_data *d) +{ + struct irqc_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + irqc_dbg(&p->irq[hw_irq], "enable"); + iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET); +} + +static void irqc_irq_disable(struct irq_data *d) +{ + struct irqc_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + irqc_dbg(&p->irq[hw_irq], "disable"); + iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS); +} + +static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { + [IRQ_TYPE_LEVEL_LOW] = 0x01, + [IRQ_TYPE_LEVEL_HIGH] = 0x02, + [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */ + [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */ + [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */ +}; + +static int irqc_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct irqc_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; + unsigned long tmp; + + irqc_dbg(&p->irq[hw_irq], "sense"); + + if (!value) + return -EINVAL; + + tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq)); + tmp &= ~0x3f; + tmp |= value; + iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq)); + return 0; +} + +static irqreturn_t irqc_irq_handler(int irq, void *dev_id) +{ + struct irqc_irq *i = dev_id; + struct irqc_priv *p = i->p; + unsigned long bit = BIT(i->hw_irq); + + irqc_dbg(i, "demux1"); + + if (ioread32(p->iomem + DETECT_STATUS) & bit) { + iowrite32(bit, p->iomem + DETECT_STATUS); + irqc_dbg(i, "demux2"); + generic_handle_irq(i->domain_irq); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct irqc_priv *p = h->host_data; + + p->irq[hw].domain_irq = virq; + p->irq[hw].hw_irq = hw; + + irqc_dbg(&p->irq[hw], "map"); + irq_set_chip_data(virq, h->host_data); + irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); + set_irq_flags(virq, IRQF_VALID); /* kill me now */ + return 0; +} + +static struct irq_domain_ops irqc_irq_domain_ops = { + .map = irqc_irq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int irqc_probe(struct platform_device *pdev) +{ + struct renesas_irqc_config *pdata = pdev->dev.platform_data; + struct irqc_priv *p; + struct resource *io; + struct resource *irq; + struct irq_chip *irq_chip; + const char *name = dev_name(&pdev->dev); + int ret; + int k; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + dev_err(&pdev->dev, "failed to allocate driver data\n"); + ret = -ENOMEM; + goto err0; + } + + /* deal with driver instance configuration */ + if (pdata) + memcpy(&p->config, pdata, sizeof(*pdata)); + + p->pdev = pdev; + platform_set_drvdata(pdev, p); + + /* get hold of manadatory IOMEM */ + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io) { + dev_err(&pdev->dev, "not enough IOMEM resources\n"); + ret = -EINVAL; + goto err1; + } + + /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ + for (k = 0; k < IRQC_IRQ_MAX; k++) { + irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); + if (!irq) + break; + + p->irq[k].p = p; + p->irq[k].requested_irq = irq->start; + } + + p->number_of_irqs = k; + if (p->number_of_irqs < 1) { + dev_err(&pdev->dev, "not enough IRQ resources\n"); + ret = -EINVAL; + goto err1; + } + + /* ioremap IOMEM and setup read/write callbacks */ + p->iomem = ioremap_nocache(io->start, resource_size(io)); + if (!p->iomem) { + dev_err(&pdev->dev, "failed to remap IOMEM\n"); + ret = -ENXIO; + goto err2; + } + + p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ + + irq_chip = &p->irq_chip; + irq_chip->name = name; + irq_chip->irq_mask = irqc_irq_disable; + irq_chip->irq_unmask = irqc_irq_enable; + irq_chip->irq_set_type = irqc_irq_set_type; + irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; + + p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, + p->number_of_irqs, + p->config.irq_base, + &irqc_irq_domain_ops, p); + if (!p->irq_domain) { + ret = -ENXIO; + dev_err(&pdev->dev, "cannot initialize irq domain\n"); + goto err2; + } + + /* request interrupts one by one */ + for (k = 0; k < p->number_of_irqs; k++) { + if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, + 0, name, &p->irq[k])) { + dev_err(&pdev->dev, "failed to request IRQ\n"); + ret = -ENOENT; + goto err3; + } + } + + dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); + + /* warn in case of mismatch if irq base is specified */ + if (p->config.irq_base) { + if (p->config.irq_base != p->irq[0].domain_irq) + dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n", + p->config.irq_base, p->irq[0].domain_irq); + } + + return 0; +err3: + while (--k >= 0) + free_irq(p->irq[k].requested_irq, &p->irq[k]); + + irq_domain_remove(p->irq_domain); +err2: + iounmap(p->iomem); +err1: + kfree(p); +err0: + return ret; +} + +static int irqc_remove(struct platform_device *pdev) +{ + struct irqc_priv *p = platform_get_drvdata(pdev); + int k; + + for (k = 0; k < p->number_of_irqs; k++) + free_irq(p->irq[k].requested_irq, &p->irq[k]); + + irq_domain_remove(p->irq_domain); + iounmap(p->iomem); + kfree(p); + return 0; +} + +static const struct of_device_id irqc_dt_ids[] = { + { .compatible = "renesas,irqc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, irqc_dt_ids); + +static struct platform_driver irqc_device_driver = { + .probe = irqc_probe, + .remove = irqc_remove, + .driver = { + .name = "renesas_irqc", + .of_match_table = irqc_dt_ids, + .owner = THIS_MODULE, + } +}; + +static int __init irqc_init(void) +{ + return platform_driver_register(&irqc_device_driver); +} +postcore_initcall(irqc_init); + +static void __exit irqc_exit(void) +{ + platform_driver_unregister(&irqc_device_driver); +} +module_exit(irqc_exit); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas IRQC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c new file mode 100644 index 00000000000..78a6accd205 --- /dev/null +++ b/drivers/irqchip/irq-s3c24xx.c @@ -0,0 +1,1354 @@ +/* + * S3C24XX IRQ handling + * + * Copyright (c) 2003-2004 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/device.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> + +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#include <mach/regs-irq.h> +#include <mach/regs-gpio.h> + +#include <plat/cpu.h> +#include <plat/regs-irqtype.h> +#include <plat/pm.h> + +#include "irqchip.h" + +#define S3C_IRQTYPE_NONE 0 +#define S3C_IRQTYPE_EINT 1 +#define S3C_IRQTYPE_EDGE 2 +#define S3C_IRQTYPE_LEVEL 3 + +struct s3c_irq_data { + unsigned int type; + unsigned long offset; + unsigned long parent_irq; + + /* data gets filled during init */ + struct s3c_irq_intc *intc; + unsigned long sub_bits; + struct s3c_irq_intc *sub_intc; +}; + +/* + * Sructure holding the controller data + * @reg_pending register holding pending irqs + * @reg_intpnd special register intpnd in main intc + * @reg_mask mask register + * @domain irq_domain of the controller + * @parent parent controller for ext and sub irqs + * @irqs irq-data, always s3c_irq_data[32] + */ +struct s3c_irq_intc { + void __iomem *reg_pending; + void __iomem *reg_intpnd; + void __iomem *reg_mask; + struct irq_domain *domain; + struct s3c_irq_intc *parent; + struct s3c_irq_data *irqs; +}; + +/* + * Array holding pointers to the global controller structs + * [0] ... main_intc + * [1] ... sub_intc + * [2] ... main_intc2 on s3c2416 + */ +static struct s3c_irq_intc *s3c_intc[3]; + +static void s3c_irq_mask(struct irq_data *data) +{ + struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); + struct s3c_irq_intc *intc = irq_data->intc; + struct s3c_irq_intc *parent_intc = intc->parent; + struct s3c_irq_data *parent_data; + unsigned long mask; + unsigned int irqno; + + mask = __raw_readl(intc->reg_mask); + mask |= (1UL << irq_data->offset); + __raw_writel(mask, intc->reg_mask); + + if (parent_intc) { + parent_data = &parent_intc->irqs[irq_data->parent_irq]; + + /* check to see if we need to mask the parent IRQ + * The parent_irq is always in main_intc, so the hwirq + * for find_mapping does not need an offset in any case. + */ + if ((mask & parent_data->sub_bits) == parent_data->sub_bits) { + irqno = irq_find_mapping(parent_intc->domain, + irq_data->parent_irq); + s3c_irq_mask(irq_get_irq_data(irqno)); + } + } +} + +static void s3c_irq_unmask(struct irq_data *data) +{ + struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); + struct s3c_irq_intc *intc = irq_data->intc; + struct s3c_irq_intc *parent_intc = intc->parent; + unsigned long mask; + unsigned int irqno; + + mask = __raw_readl(intc->reg_mask); + mask &= ~(1UL << irq_data->offset); + __raw_writel(mask, intc->reg_mask); + + if (parent_intc) { + irqno = irq_find_mapping(parent_intc->domain, + irq_data->parent_irq); + s3c_irq_unmask(irq_get_irq_data(irqno)); + } +} + +static inline void s3c_irq_ack(struct irq_data *data) +{ + struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); + struct s3c_irq_intc *intc = irq_data->intc; + unsigned long bitval = 1UL << irq_data->offset; + + __raw_writel(bitval, intc->reg_pending); + if (intc->reg_intpnd) + __raw_writel(bitval, intc->reg_intpnd); +} + +static int s3c_irq_type(struct irq_data *data, unsigned int type) +{ + switch (type) { + case IRQ_TYPE_NONE: + break; + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_EDGE_BOTH: + irq_set_handler(data->irq, handle_edge_irq); + break; + case IRQ_TYPE_LEVEL_LOW: + case IRQ_TYPE_LEVEL_HIGH: + irq_set_handler(data->irq, handle_level_irq); + break; + default: + pr_err("No such irq type %d", type); + return -EINVAL; + } + + return 0; +} + +static int s3c_irqext_type_set(void __iomem *gpcon_reg, + void __iomem *extint_reg, + unsigned long gpcon_offset, + unsigned long extint_offset, + unsigned int type) +{ + unsigned long newvalue = 0, value; + + /* Set the GPIO to external interrupt mode */ + value = __raw_readl(gpcon_reg); + value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset); + __raw_writel(value, gpcon_reg); + + /* Set the external interrupt to pointed trigger type */ + switch (type) + { + case IRQ_TYPE_NONE: + pr_warn("No edge setting!\n"); + break; + + case IRQ_TYPE_EDGE_RISING: + newvalue = S3C2410_EXTINT_RISEEDGE; + break; + + case IRQ_TYPE_EDGE_FALLING: + newvalue = S3C2410_EXTINT_FALLEDGE; + break; + + case IRQ_TYPE_EDGE_BOTH: + newvalue = S3C2410_EXTINT_BOTHEDGE; + break; + + case IRQ_TYPE_LEVEL_LOW: + newvalue = S3C2410_EXTINT_LOWLEV; + break; + + case IRQ_TYPE_LEVEL_HIGH: + newvalue = S3C2410_EXTINT_HILEV; + break; + + default: + pr_err("No such irq type %d", type); + return -EINVAL; + } + + value = __raw_readl(extint_reg); + value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset); + __raw_writel(value, extint_reg); + + return 0; +} + +static int s3c_irqext_type(struct irq_data *data, unsigned int type) +{ + void __iomem *extint_reg; + void __iomem *gpcon_reg; + unsigned long gpcon_offset, extint_offset; + + if ((data->hwirq >= 4) && (data->hwirq <= 7)) { + gpcon_reg = S3C2410_GPFCON; + extint_reg = S3C24XX_EXTINT0; + gpcon_offset = (data->hwirq) * 2; + extint_offset = (data->hwirq) * 4; + } else if ((data->hwirq >= 8) && (data->hwirq <= 15)) { + gpcon_reg = S3C2410_GPGCON; + extint_reg = S3C24XX_EXTINT1; + gpcon_offset = (data->hwirq - 8) * 2; + extint_offset = (data->hwirq - 8) * 4; + } else if ((data->hwirq >= 16) && (data->hwirq <= 23)) { + gpcon_reg = S3C2410_GPGCON; + extint_reg = S3C24XX_EXTINT2; + gpcon_offset = (data->hwirq - 8) * 2; + extint_offset = (data->hwirq - 16) * 4; + } else { + return -EINVAL; + } + + return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, + extint_offset, type); +} + +static int s3c_irqext0_type(struct irq_data *data, unsigned int type) +{ + void __iomem *extint_reg; + void __iomem *gpcon_reg; + unsigned long gpcon_offset, extint_offset; + + if ((data->hwirq >= 0) && (data->hwirq <= 3)) { + gpcon_reg = S3C2410_GPFCON; + extint_reg = S3C24XX_EXTINT0; + gpcon_offset = (data->hwirq) * 2; + extint_offset = (data->hwirq) * 4; + } else { + return -EINVAL; + } + + return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, + extint_offset, type); +} + +static struct irq_chip s3c_irq_chip = { + .name = "s3c", + .irq_ack = s3c_irq_ack, + .irq_mask = s3c_irq_mask, + .irq_unmask = s3c_irq_unmask, + .irq_set_type = s3c_irq_type, + .irq_set_wake = s3c_irq_wake +}; + +static struct irq_chip s3c_irq_level_chip = { + .name = "s3c-level", + .irq_mask = s3c_irq_mask, + .irq_unmask = s3c_irq_unmask, + .irq_ack = s3c_irq_ack, + .irq_set_type = s3c_irq_type, +}; + +static struct irq_chip s3c_irqext_chip = { + .name = "s3c-ext", + .irq_mask = s3c_irq_mask, + .irq_unmask = s3c_irq_unmask, + .irq_ack = s3c_irq_ack, + .irq_set_type = s3c_irqext_type, + .irq_set_wake = s3c_irqext_wake +}; + +static struct irq_chip s3c_irq_eint0t4 = { + .name = "s3c-ext0", + .irq_ack = s3c_irq_ack, + .irq_mask = s3c_irq_mask, + .irq_unmask = s3c_irq_unmask, + .irq_set_wake = s3c_irq_wake, + .irq_set_type = s3c_irqext0_type, +}; + +static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); + struct s3c_irq_intc *intc = irq_data->intc; + struct s3c_irq_intc *sub_intc = irq_data->sub_intc; + unsigned long src; + unsigned long msk; + unsigned int n; + unsigned int offset; + + /* we're using individual domains for the non-dt case + * and one big domain for the dt case where the subintc + * starts at hwirq number 32. + */ + offset = (intc->domain->of_node) ? 32 : 0; + + chained_irq_enter(chip, desc); + + src = __raw_readl(sub_intc->reg_pending); + msk = __raw_readl(sub_intc->reg_mask); + + src &= ~msk; + src &= irq_data->sub_bits; + + while (src) { + n = __ffs(src); + src &= ~(1 << n); + irq = irq_find_mapping(sub_intc->domain, offset + n); + generic_handle_irq(irq); + } + + chained_irq_exit(chip, desc); +} + +static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, + struct pt_regs *regs, int intc_offset) +{ + int pnd; + int offset; + int irq; + + pnd = __raw_readl(intc->reg_intpnd); + if (!pnd) + return false; + + /* non-dt machines use individual domains */ + if (!intc->domain->of_node) + intc_offset = 0; + + /* We have a problem that the INTOFFSET register does not always + * show one interrupt. Occasionally we get two interrupts through + * the prioritiser, and this causes the INTOFFSET register to show + * what looks like the logical-or of the two interrupt numbers. + * + * Thanks to Klaus, Shannon, et al for helping to debug this problem + */ + offset = __raw_readl(intc->reg_intpnd + 4); + + /* Find the bit manually, when the offset is wrong. + * The pending register only ever contains the one bit of the next + * interrupt to handle. + */ + if (!(pnd & (1 << offset))) + offset = __ffs(pnd); + + irq = irq_find_mapping(intc->domain, intc_offset + offset); + handle_IRQ(irq, regs); + return true; +} + +asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs) +{ + do { + if (likely(s3c_intc[0])) + if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) + continue; + + if (s3c_intc[2]) + if (s3c24xx_handle_intc(s3c_intc[2], regs, 64)) + continue; + + break; + } while (1); +} + +#ifdef CONFIG_FIQ +/** + * s3c24xx_set_fiq - set the FIQ routing + * @irq: IRQ number to route to FIQ on processor. + * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing. + * + * Change the state of the IRQ to FIQ routing depending on @irq and @on. If + * @on is true, the @irq is checked to see if it can be routed and the + * interrupt controller updated to route the IRQ. If @on is false, the FIQ + * routing is cleared, regardless of which @irq is specified. + */ +int s3c24xx_set_fiq(unsigned int irq, bool on) +{ + u32 intmod; + unsigned offs; + + if (on) { + offs = irq - FIQ_START; + if (offs > 31) + return -EINVAL; + + intmod = 1 << offs; + } else { + intmod = 0; + } + + __raw_writel(intmod, S3C2410_INTMOD); + return 0; +} + +EXPORT_SYMBOL_GPL(s3c24xx_set_fiq); +#endif + +static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct s3c_irq_intc *intc = h->host_data; + struct s3c_irq_data *irq_data = &intc->irqs[hw]; + struct s3c_irq_intc *parent_intc; + struct s3c_irq_data *parent_irq_data; + unsigned int irqno; + + /* attach controller pointer to irq_data */ + irq_data->intc = intc; + irq_data->offset = hw; + + parent_intc = intc->parent; + + /* set handler and flags */ + switch (irq_data->type) { + case S3C_IRQTYPE_NONE: + return 0; + case S3C_IRQTYPE_EINT: + /* On the S3C2412, the EINT0to3 have a parent irq + * but need the s3c_irq_eint0t4 chip + */ + if (parent_intc && (!soc_is_s3c2412() || hw >= 4)) + irq_set_chip_and_handler(virq, &s3c_irqext_chip, + handle_edge_irq); + else + irq_set_chip_and_handler(virq, &s3c_irq_eint0t4, + handle_edge_irq); + break; + case S3C_IRQTYPE_EDGE: + if (parent_intc || intc->reg_pending == S3C2416_SRCPND2) + irq_set_chip_and_handler(virq, &s3c_irq_level_chip, + handle_edge_irq); + else + irq_set_chip_and_handler(virq, &s3c_irq_chip, + handle_edge_irq); + break; + case S3C_IRQTYPE_LEVEL: + if (parent_intc) + irq_set_chip_and_handler(virq, &s3c_irq_level_chip, + handle_level_irq); + else + irq_set_chip_and_handler(virq, &s3c_irq_chip, + handle_level_irq); + break; + default: + pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type); + return -EINVAL; + } + + irq_set_chip_data(virq, irq_data); + + set_irq_flags(virq, IRQF_VALID); + + if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { + if (irq_data->parent_irq > 31) { + pr_err("irq-s3c24xx: parent irq %lu is out of range\n", + irq_data->parent_irq); + goto err; + } + + parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; + parent_irq_data->sub_intc = intc; + parent_irq_data->sub_bits |= (1UL << hw); + + /* attach the demuxer to the parent irq */ + irqno = irq_find_mapping(parent_intc->domain, + irq_data->parent_irq); + if (!irqno) { + pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", + irq_data->parent_irq); + goto err; + } + irq_set_chained_handler(irqno, s3c_irq_demux); + } + + return 0; + +err: + set_irq_flags(virq, 0); + + /* the only error can result from bad mapping data*/ + return -EINVAL; +} + +static struct irq_domain_ops s3c24xx_irq_ops = { + .map = s3c24xx_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +static void s3c24xx_clear_intc(struct s3c_irq_intc *intc) +{ + void __iomem *reg_source; + unsigned long pend; + unsigned long last; + int i; + + /* if intpnd is set, read the next pending irq from there */ + reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending; + + last = 0; + for (i = 0; i < 4; i++) { + pend = __raw_readl(reg_source); + + if (pend == 0 || pend == last) + break; + + __raw_writel(pend, intc->reg_pending); + if (intc->reg_intpnd) + __raw_writel(pend, intc->reg_intpnd); + + pr_info("irq: clearing pending status %08x\n", (int)pend); + last = pend; + } +} + +static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np, + struct s3c_irq_data *irq_data, + struct s3c_irq_intc *parent, + unsigned long address) +{ + struct s3c_irq_intc *intc; + void __iomem *base = (void *)0xf6000000; /* static mapping */ + int irq_num; + int irq_start; + int ret; + + intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); + if (!intc) + return ERR_PTR(-ENOMEM); + + intc->irqs = irq_data; + + if (parent) + intc->parent = parent; + + /* select the correct data for the controller. + * Need to hard code the irq num start and offset + * to preserve the static mapping for now + */ + switch (address) { + case 0x4a000000: + pr_debug("irq: found main intc\n"); + intc->reg_pending = base; + intc->reg_mask = base + 0x08; + intc->reg_intpnd = base + 0x10; + irq_num = 32; + irq_start = S3C2410_IRQ(0); + break; + case 0x4a000018: + pr_debug("irq: found subintc\n"); + intc->reg_pending = base + 0x18; + intc->reg_mask = base + 0x1c; + irq_num = 29; + irq_start = S3C2410_IRQSUB(0); + break; + case 0x4a000040: + pr_debug("irq: found intc2\n"); + intc->reg_pending = base + 0x40; + intc->reg_mask = base + 0x48; + intc->reg_intpnd = base + 0x50; + irq_num = 8; + irq_start = S3C2416_IRQ(0); + break; + case 0x560000a4: + pr_debug("irq: found eintc\n"); + base = (void *)0xfd000000; + + intc->reg_mask = base + 0xa4; + intc->reg_pending = base + 0xa8; + irq_num = 24; + irq_start = S3C2410_IRQ(32); + break; + default: + pr_err("irq: unsupported controller address\n"); + ret = -EINVAL; + goto err; + } + + /* now that all the data is complete, init the irq-domain */ + s3c24xx_clear_intc(intc); + intc->domain = irq_domain_add_legacy(np, irq_num, irq_start, + 0, &s3c24xx_irq_ops, + intc); + if (!intc->domain) { + pr_err("irq: could not create irq-domain\n"); + ret = -EINVAL; + goto err; + } + + set_handle_irq(s3c24xx_handle_irq); + + return intc; + +err: + kfree(intc); + return ERR_PTR(ret); +} + +static struct s3c_irq_data init_eint[32] = { + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ +}; + +#ifdef CONFIG_CPU_S3C2410 +static struct s3c_irq_data init_s3c2410base[32] = { + { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + +static struct s3c_irq_data init_s3c2410subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ +}; + +void __init s3c2410_init_irq(void) +{ +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0], + s3c_intc[0], 0x4a000018); + s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); +} +#endif + +#ifdef CONFIG_CPU_S3C2412 +static struct s3c_irq_data init_s3c2412base[32] = { + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + +static struct s3c_irq_data init_s3c2412eint[32] = { + { .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ + { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ +}; + +static struct s3c_irq_data init_s3c2412subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ + { .type = S3C_IRQTYPE_NONE, }, + { .type = S3C_IRQTYPE_NONE, }, + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */ +}; + +void __init s3c2412_init_irq(void) +{ + pr_info("S3C2412: IRQ Support\n"); + +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4); + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0], + s3c_intc[0], 0x4a000018); +} +#endif + +#ifdef CONFIG_CPU_S3C2416 +static struct s3c_irq_data init_s3c2416base[32] = { + { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ + { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ + { .type = S3C_IRQTYPE_NONE, }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_NONE, }, + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + +static struct s3c_irq_data init_s3c2416subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ +}; + +static struct s3c_irq_data init_s3c2416_second[32] = { + { .type = S3C_IRQTYPE_EDGE }, /* 2D */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE }, /* PCM0 */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_EDGE }, /* I2S0 */ +}; + +void __init s3c2416_init_irq(void) +{ + pr_info("S3C2416: IRQ Support\n"); + +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0], + s3c_intc[0], 0x4a000018); + + s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0], + NULL, 0x4a000040); +} + +#endif + +#ifdef CONFIG_CPU_S3C2440 +static struct s3c_irq_data init_s3c2440base[32] = { + { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + +static struct s3c_irq_data init_s3c2440subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ +}; + +void __init s3c2440_init_irq(void) +{ + pr_info("S3C2440: IRQ Support\n"); + +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0], + s3c_intc[0], 0x4a000018); +} +#endif + +#ifdef CONFIG_CPU_S3C2442 +static struct s3c_irq_data init_s3c2442base[32] = { + { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + +static struct s3c_irq_data init_s3c2442subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ +}; + +void __init s3c2442_init_irq(void) +{ + pr_info("S3C2442: IRQ Support\n"); + +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0], + s3c_intc[0], 0x4a000018); +} +#endif + +#ifdef CONFIG_CPU_S3C2443 +static struct s3c_irq_data init_s3c2443base[32] = { + { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ + { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ + { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ + { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ + { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ + { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ + { .type = S3C_IRQTYPE_EDGE, }, /* CFON */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ + { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ + { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ + { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ + { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ + { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ +}; + + +static struct s3c_irq_data init_s3c2443subint[32] = { + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ + { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ + { .type = S3C_IRQTYPE_NONE }, /* reserved */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ + { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ +}; + +void __init s3c2443_init_irq(void) +{ + pr_info("S3C2443: IRQ Support\n"); + +#ifdef CONFIG_FIQ + init_FIQ(FIQ_START); +#endif + + s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL, + 0x4a000000); + if (IS_ERR(s3c_intc[0])) { + pr_err("irq: could not create main interrupt controller\n"); + return; + } + + s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); + s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0], + s3c_intc[0], 0x4a000018); +} +#endif + +#ifdef CONFIG_OF +static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + unsigned int ctrl_num = hw / 32; + unsigned int intc_hw = hw % 32; + struct s3c_irq_intc *intc = s3c_intc[ctrl_num]; + struct s3c_irq_intc *parent_intc = intc->parent; + struct s3c_irq_data *irq_data = &intc->irqs[intc_hw]; + + /* attach controller pointer to irq_data */ + irq_data->intc = intc; + irq_data->offset = intc_hw; + + if (!parent_intc) + irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq); + else + irq_set_chip_and_handler(virq, &s3c_irq_level_chip, + handle_edge_irq); + + irq_set_chip_data(virq, irq_data); + + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +/* Translate our of irq notation + * format: <ctrl_num ctrl_irq parent_irq type> + */ +static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) +{ + struct s3c_irq_intc *intc; + struct s3c_irq_intc *parent_intc; + struct s3c_irq_data *irq_data; + struct s3c_irq_data *parent_irq_data; + int irqno; + + if (WARN_ON(intsize < 4)) + return -EINVAL; + + if (intspec[0] > 2 || !s3c_intc[intspec[0]]) { + pr_err("controller number %d invalid\n", intspec[0]); + return -EINVAL; + } + intc = s3c_intc[intspec[0]]; + + *out_hwirq = intspec[0] * 32 + intspec[2]; + *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; + + parent_intc = intc->parent; + if (parent_intc) { + irq_data = &intc->irqs[intspec[2]]; + irq_data->parent_irq = intspec[1]; + parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; + parent_irq_data->sub_intc = intc; + parent_irq_data->sub_bits |= (1UL << intspec[2]); + + /* parent_intc is always s3c_intc[0], so no offset */ + irqno = irq_create_mapping(parent_intc->domain, intspec[1]); + if (irqno < 0) { + pr_err("irq: could not map parent interrupt\n"); + return irqno; + } + + irq_set_chained_handler(irqno, s3c_irq_demux); + } + + return 0; +} + +static struct irq_domain_ops s3c24xx_irq_ops_of = { + .map = s3c24xx_irq_map_of, + .xlate = s3c24xx_irq_xlate_of, +}; + +struct s3c24xx_irq_of_ctrl { + char *name; + unsigned long offset; + struct s3c_irq_intc **handle; + struct s3c_irq_intc **parent; + struct irq_domain_ops *ops; +}; + +static int __init s3c_init_intc_of(struct device_node *np, + struct device_node *interrupt_parent, + struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl) +{ + struct s3c_irq_intc *intc; + struct s3c24xx_irq_of_ctrl *ctrl; + struct irq_domain *domain; + void __iomem *reg_base; + int i; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("irq-s3c24xx: could not map irq registers\n"); + return -EINVAL; + } + + domain = irq_domain_add_linear(np, num_ctrl * 32, + &s3c24xx_irq_ops_of, NULL); + if (!domain) { + pr_err("irq: could not create irq-domain\n"); + return -EINVAL; + } + + for (i = 0; i < num_ctrl; i++) { + ctrl = &s3c_ctrl[i]; + + pr_debug("irq: found controller %s\n", ctrl->name); + + intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; + + intc->domain = domain; + intc->irqs = kzalloc(sizeof(struct s3c_irq_data) * 32, + GFP_KERNEL); + if (!intc->irqs) { + kfree(intc); + return -ENOMEM; + } + + if (ctrl->parent) { + intc->reg_pending = reg_base + ctrl->offset; + intc->reg_mask = reg_base + ctrl->offset + 0x4; + + if (*(ctrl->parent)) { + intc->parent = *(ctrl->parent); + } else { + pr_warn("irq: parent of %s missing\n", + ctrl->name); + kfree(intc->irqs); + kfree(intc); + continue; + } + } else { + intc->reg_pending = reg_base + ctrl->offset; + intc->reg_mask = reg_base + ctrl->offset + 0x08; + intc->reg_intpnd = reg_base + ctrl->offset + 0x10; + } + + s3c24xx_clear_intc(intc); + s3c_intc[i] = intc; + } + + set_handle_irq(s3c24xx_handle_irq); + + return 0; +} + +static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = { + { + .name = "intc", + .offset = 0, + }, { + .name = "subintc", + .offset = 0x18, + .parent = &s3c_intc[0], + } +}; + +int __init s3c2410_init_intc_of(struct device_node *np, + struct device_node *interrupt_parent) +{ + return s3c_init_intc_of(np, interrupt_parent, + s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl)); +} +IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of); + +static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = { + { + .name = "intc", + .offset = 0, + }, { + .name = "subintc", + .offset = 0x18, + .parent = &s3c_intc[0], + }, { + .name = "intc2", + .offset = 0x40, + } +}; + +int __init s3c2416_init_intc_of(struct device_node *np, + struct device_node *interrupt_parent) +{ + return s3c_init_intc_of(np, interrupt_parent, + s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl)); +} +IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of); +#endif diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c new file mode 100644 index 00000000000..5e54f6d71e7 --- /dev/null +++ b/drivers/irqchip/irq-sirfsoc.c @@ -0,0 +1,130 @@ +/* + * interrupt controller support for CSR SiRFprimaII + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/irqdomain.h> +#include <linux/syscore_ops.h> +#include <asm/mach/irq.h> +#include <asm/exception.h> +#include "irqchip.h" + +#define SIRFSOC_INT_RISC_MASK0 0x0018 +#define SIRFSOC_INT_RISC_MASK1 0x001C +#define SIRFSOC_INT_RISC_LEVEL0 0x0020 +#define SIRFSOC_INT_RISC_LEVEL1 0x0024 +#define SIRFSOC_INIT_IRQ_ID 0x0038 + +#define SIRFSOC_NUM_IRQS 64 + +static struct irq_domain *sirfsoc_irqdomain; + +static __init void +sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + int ret; + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + unsigned int set = IRQ_LEVEL; + + ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", + handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE); + + gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); + gc->reg_base = base; + ct = gc->chip_types; + ct->chip.irq_mask = irq_gc_mask_clr_bit; + ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->regs.mask = SIRFSOC_INT_RISC_MASK0; +} + +static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) +{ + void __iomem *base = sirfsoc_irqdomain->host_data; + u32 irqstat, irqnr; + + irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID); + irqnr = irq_find_mapping(sirfsoc_irqdomain, irqstat & 0xff); + + handle_IRQ(irqnr, regs); +} + +static int __init sirfsoc_irq_init(struct device_node *np, + struct device_node *parent) +{ + void __iomem *base = of_iomap(np, 0); + if (!base) + panic("unable to map intc cpu registers\n"); + + sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, + &irq_generic_chip_ops, base); + + sirfsoc_alloc_gc(base, 0, 32); + sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); + + writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0); + writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1); + + writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0); + writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1); + + set_handle_irq(sirfsoc_handle_irq); + + return 0; +} +IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init); + +struct sirfsoc_irq_status { + u32 mask0; + u32 mask1; + u32 level0; + u32 level1; +}; + +static struct sirfsoc_irq_status sirfsoc_irq_st; + +static int sirfsoc_irq_suspend(void) +{ + void __iomem *base = sirfsoc_irqdomain->host_data; + + sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0); + sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1); + sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0); + sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1); + + return 0; +} + +static void sirfsoc_irq_resume(void) +{ + void __iomem *base = sirfsoc_irqdomain->host_data; + + writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0); + writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1); + writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0); + writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1); +} + +static struct syscore_ops sirfsoc_irq_syscore_ops = { + .suspend = sirfsoc_irq_suspend, + .resume = sirfsoc_irq_resume, +}; + +static int __init sirfsoc_irq_pm_init(void) +{ + if (!sirfsoc_irqdomain) + return 0; + + register_syscore_ops(&sirfsoc_irq_syscore_ops); + return 0; +} +device_initcall(sirfsoc_irq_pm_init); diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c new file mode 100644 index 00000000000..6fcef4a95a1 --- /dev/null +++ b/drivers/irqchip/irq-sun4i.c @@ -0,0 +1,161 @@ +/* + * Allwinner A1X SoCs IRQ chip driver. + * + * Copyright (C) 2012 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * Based on code from + * Allwinner Technology Co., Ltd. <www.allwinnertech.com> + * Benn Huang <benn@allwinnertech.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" + +#define SUN4I_IRQ_VECTOR_REG 0x00 +#define SUN4I_IRQ_PROTECTION_REG 0x08 +#define SUN4I_IRQ_NMI_CTRL_REG 0x0c +#define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x) +#define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x) +#define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x) +#define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x) + +static void __iomem *sun4i_irq_base; +static struct irq_domain *sun4i_irq_domain; + +static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs); + +static void sun4i_irq_ack(struct irq_data *irqd) +{ + unsigned int irq = irqd_to_hwirq(irqd); + + if (irq != 0) + return; /* Only IRQ 0 / the ENMI needs to be acked */ + + writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)); +} + +static void sun4i_irq_mask(struct irq_data *irqd) +{ + unsigned int irq = irqd_to_hwirq(irqd); + unsigned int irq_off = irq % 32; + int reg = irq / 32; + u32 val; + + val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); + writel(val & ~(1 << irq_off), + sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); +} + +static void sun4i_irq_unmask(struct irq_data *irqd) +{ + unsigned int irq = irqd_to_hwirq(irqd); + unsigned int irq_off = irq % 32; + int reg = irq / 32; + u32 val; + + val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); + writel(val | (1 << irq_off), + sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); +} + +static struct irq_chip sun4i_irq_chip = { + .name = "sun4i_irq", + .irq_eoi = sun4i_irq_ack, + .irq_mask = sun4i_irq_mask, + .irq_unmask = sun4i_irq_unmask, + .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, +}; + +static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq); + set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + + return 0; +} + +static struct irq_domain_ops sun4i_irq_ops = { + .map = sun4i_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int __init sun4i_of_init(struct device_node *node, + struct device_node *parent) +{ + sun4i_irq_base = of_iomap(node, 0); + if (!sun4i_irq_base) + panic("%s: unable to map IC registers\n", + node->full_name); + + /* Disable all interrupts */ + writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); + writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1)); + writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2)); + + /* Unmask all the interrupts, ENABLE_REG(x) is used for masking */ + writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0)); + writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1)); + writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2)); + + /* Clear all the pending interrupts */ + writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)); + writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1)); + writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2)); + + /* Enable protection mode */ + writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG); + + /* Configure the external interrupt source type */ + writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG); + + sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, + &sun4i_irq_ops, NULL); + if (!sun4i_irq_domain) + panic("%s: unable to create IRQ domain\n", node->full_name); + + set_handle_irq(sun4i_handle_irq); + + return 0; +} +IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init); + +static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) +{ + u32 irq, hwirq; + + /* + * hwirq == 0 can mean one of 3 things: + * 1) no more irqs pending + * 2) irq 0 pending + * 3) spurious irq + * So if we immediately get a reading of 0, check the irq-pending reg + * to differentiate between 2 and 3. We only do this once to avoid + * the extra check in the common case of 1 hapening after having + * read the vector-reg once. + */ + hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; + if (hwirq == 0 && + !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0))) + return; + + do { + irq = irq_find_mapping(sun4i_irq_domain, hwirq); + handle_IRQ(irq, regs); + hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; + } while (hwirq != 0); +} diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c new file mode 100644 index 00000000000..12f547a44ae --- /dev/null +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -0,0 +1,208 @@ +/* + * Allwinner A20/A31 SoCs NMI IRQ chip driver. + * + * Carlo Caione <carlo.caione@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/irqchip/chained_irq.h> +#include "irqchip.h" + +#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003 + +enum { + SUNXI_SRC_TYPE_LEVEL_LOW = 0, + SUNXI_SRC_TYPE_EDGE_FALLING, + SUNXI_SRC_TYPE_LEVEL_HIGH, + SUNXI_SRC_TYPE_EDGE_RISING, +}; + +struct sunxi_sc_nmi_reg_offs { + u32 ctrl; + u32 pend; + u32 enable; +}; + +static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = { + .ctrl = 0x00, + .pend = 0x04, + .enable = 0x08, +}; + +static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { + .ctrl = 0x00, + .pend = 0x04, + .enable = 0x34, +}; + +static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, + u32 val) +{ + irq_reg_writel(val, gc->reg_base + off); +} + +static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) +{ + return irq_reg_readl(gc->reg_base + off); +} + +static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irq_domain *domain = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_get_chip(irq); + unsigned int virq = irq_find_mapping(domain, 0); + + chained_irq_enter(chip, desc); + generic_handle_irq(virq); + chained_irq_exit(chip, desc); +} + +static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = gc->chip_types; + u32 src_type_reg; + u32 ctrl_off = ct->regs.type; + unsigned int src_type; + unsigned int i; + + irq_gc_lock(gc); + + switch (flow_type & IRQF_TRIGGER_MASK) { + case IRQ_TYPE_EDGE_FALLING: + src_type = SUNXI_SRC_TYPE_EDGE_FALLING; + break; + case IRQ_TYPE_EDGE_RISING: + src_type = SUNXI_SRC_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_LEVEL_HIGH: + src_type = SUNXI_SRC_TYPE_LEVEL_HIGH; + break; + case IRQ_TYPE_NONE: + case IRQ_TYPE_LEVEL_LOW: + src_type = SUNXI_SRC_TYPE_LEVEL_LOW; + break; + default: + irq_gc_unlock(gc); + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", + __func__, data->irq); + return -EBADR; + } + + irqd_set_trigger_type(data, flow_type); + irq_setup_alt_chip(data, flow_type); + + for (i = 0; i <= gc->num_ct; i++, ct++) + if (ct->type & flow_type) + ctrl_off = ct->regs.type; + + src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off); + src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; + src_type_reg |= src_type; + sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); + + irq_gc_unlock(gc); + + return IRQ_SET_MASK_OK; +} + +static int __init sunxi_sc_nmi_irq_init(struct device_node *node, + struct sunxi_sc_nmi_reg_offs *reg_offs) +{ + struct irq_domain *domain; + struct irq_chip_generic *gc; + unsigned int irq; + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + int ret; + + + domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); + if (!domain) { + pr_err("%s: Could not register interrupt domain.\n", node->name); + return -ENOMEM; + } + + ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, + handle_fasteoi_irq, clr, 0, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: Could not allocate generic interrupt chip.\n", + node->name); + goto fail_irqd_remove; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + pr_err("%s: unable to parse irq\n", node->name); + ret = -EINVAL; + goto fail_irqd_remove; + } + + gc = irq_get_domain_generic_chip(domain, 0); + gc->reg_base = of_iomap(node, 0); + if (!gc->reg_base) { + pr_err("%s: unable to map resource\n", node->name); + ret = -ENOMEM; + goto fail_irqd_remove; + } + + gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; + gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; + gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; + gc->chip_types[0].regs.ack = reg_offs->pend; + gc->chip_types[0].regs.mask = reg_offs->enable; + gc->chip_types[0].regs.type = reg_offs->ctrl; + + gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; + gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type; + gc->chip_types[1].regs.ack = reg_offs->pend; + gc->chip_types[1].regs.mask = reg_offs->enable; + gc->chip_types[1].regs.type = reg_offs->ctrl; + gc->chip_types[1].handler = handle_edge_irq; + + sunxi_sc_nmi_write(gc, reg_offs->enable, 0); + sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1); + + irq_set_handler_data(irq, domain); + irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq); + + return 0; + +fail_irqd_remove: + irq_domain_remove(domain); + + return ret; +} + +static int __init sun6i_sc_nmi_irq_init(struct device_node *node, + struct device_node *parent) +{ + return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs); +} +IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); + +static int __init sun7i_sc_nmi_irq_init(struct device_node *node, + struct device_node *parent) +{ + return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs); +} +IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); diff --git a/drivers/irqchip/irq-sunxi.c b/drivers/irqchip/irq-sunxi.c deleted file mode 100644 index 10974fa4265..00000000000 --- a/drivers/irqchip/irq-sunxi.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Allwinner A1X SoCs IRQ chip driver. - * - * Copyright (C) 2012 Maxime Ripard - * - * Maxime Ripard <maxime.ripard@free-electrons.com> - * - * Based on code from - * Allwinner Technology Co., Ltd. <www.allwinnertech.com> - * Benn Huang <benn@allwinnertech.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> - -#include <linux/irqchip/sunxi.h> - -#define SUNXI_IRQ_VECTOR_REG 0x00 -#define SUNXI_IRQ_PROTECTION_REG 0x08 -#define SUNXI_IRQ_NMI_CTRL_REG 0x0c -#define SUNXI_IRQ_PENDING_REG(x) (0x10 + 0x4 * x) -#define SUNXI_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x) -#define SUNXI_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x) -#define SUNXI_IRQ_MASK_REG(x) (0x50 + 0x4 * x) - -static void __iomem *sunxi_irq_base; -static struct irq_domain *sunxi_irq_domain; - -void sunxi_irq_ack(struct irq_data *irqd) -{ - unsigned int irq = irqd_to_hwirq(irqd); - unsigned int irq_off = irq % 32; - int reg = irq / 32; - u32 val; - - val = readl(sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg)); - writel(val | (1 << irq_off), - sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg)); -} - -static void sunxi_irq_mask(struct irq_data *irqd) -{ - unsigned int irq = irqd_to_hwirq(irqd); - unsigned int irq_off = irq % 32; - int reg = irq / 32; - u32 val; - - val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg)); - writel(val & ~(1 << irq_off), - sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg)); -} - -static void sunxi_irq_unmask(struct irq_data *irqd) -{ - unsigned int irq = irqd_to_hwirq(irqd); - unsigned int irq_off = irq % 32; - int reg = irq / 32; - u32 val; - - val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg)); - writel(val | (1 << irq_off), - sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg)); -} - -static struct irq_chip sunxi_irq_chip = { - .name = "sunxi_irq", - .irq_ack = sunxi_irq_ack, - .irq_mask = sunxi_irq_mask, - .irq_unmask = sunxi_irq_unmask, -}; - -static int sunxi_irq_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_and_handler(virq, &sunxi_irq_chip, - handle_level_irq); - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); - - return 0; -} - -static struct irq_domain_ops sunxi_irq_ops = { - .map = sunxi_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static int __init sunxi_of_init(struct device_node *node, - struct device_node *parent) -{ - sunxi_irq_base = of_iomap(node, 0); - if (!sunxi_irq_base) - panic("%s: unable to map IC registers\n", - node->full_name); - - /* Disable all interrupts */ - writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(0)); - writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(1)); - writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(2)); - - /* Mask all the interrupts */ - writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(0)); - writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(1)); - writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(2)); - - /* Clear all the pending interrupts */ - writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(0)); - writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(1)); - writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(2)); - - /* Enable protection mode */ - writel(0x01, sunxi_irq_base + SUNXI_IRQ_PROTECTION_REG); - - /* Configure the external interrupt source type */ - writel(0x00, sunxi_irq_base + SUNXI_IRQ_NMI_CTRL_REG); - - sunxi_irq_domain = irq_domain_add_linear(node, 3 * 32, - &sunxi_irq_ops, NULL); - if (!sunxi_irq_domain) - panic("%s: unable to create IRQ domain\n", node->full_name); - - return 0; -} - -static struct of_device_id sunxi_irq_dt_ids[] __initconst = { - { .compatible = "allwinner,sunxi-ic", .data = sunxi_of_init }, - { } -}; - -void __init sunxi_init_irq(void) -{ - of_irq_init(sunxi_irq_dt_ids); -} - -asmlinkage void __exception_irq_entry sunxi_handle_irq(struct pt_regs *regs) -{ - u32 irq, hwirq; - - hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2; - while (hwirq != 0) { - irq = irq_find_mapping(sunxi_irq_domain, hwirq); - handle_IRQ(irq, regs); - hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2; - } -} diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c new file mode 100644 index 00000000000..7c44c99bf1f --- /dev/null +++ b/drivers/irqchip/irq-tb10x.c @@ -0,0 +1,195 @@ +/* + * Abilis Systems interrupt controller driver + * + * Copyright (C) Abilis Systems 2012 + * + * Author: Christian Ruppert <christian.ruppert@abilis.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include "irqchip.h" + +#define AB_IRQCTL_INT_ENABLE 0x00 +#define AB_IRQCTL_INT_STATUS 0x04 +#define AB_IRQCTL_SRC_MODE 0x08 +#define AB_IRQCTL_SRC_POLARITY 0x0C +#define AB_IRQCTL_INT_MODE 0x10 +#define AB_IRQCTL_INT_POLARITY 0x14 +#define AB_IRQCTL_INT_FORCE 0x18 + +#define AB_IRQCTL_MAXIRQ 32 + +static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, + u32 val) +{ + irq_reg_writel(val, gc->reg_base + reg); +} + +static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) +{ + return irq_reg_readl(gc->reg_base + reg); +} + +static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + uint32_t im, mod, pol; + + im = data->mask; + + irq_gc_lock(gc); + + mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im; + pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im; + + switch (flow_type & IRQF_TRIGGER_MASK) { + case IRQ_TYPE_EDGE_FALLING: + pol ^= im; + break; + case IRQ_TYPE_LEVEL_HIGH: + mod ^= im; + break; + case IRQ_TYPE_NONE: + flow_type = IRQ_TYPE_LEVEL_LOW; + case IRQ_TYPE_LEVEL_LOW: + mod ^= im; + pol ^= im; + break; + case IRQ_TYPE_EDGE_RISING: + break; + default: + irq_gc_unlock(gc); + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", + __func__, data->irq); + return -EBADR; + } + + irqd_set_trigger_type(data, flow_type); + irq_setup_alt_chip(data, flow_type); + + ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod); + ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol); + ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im); + + irq_gc_unlock(gc); + + return IRQ_SET_MASK_OK; +} + +static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_domain *domain = irq_desc_get_handler_data(desc); + + generic_handle_irq(irq_find_mapping(domain, irq)); +} + +static int __init of_tb10x_init_irq(struct device_node *ictl, + struct device_node *parent) +{ + int i, ret, nrirqs = of_irq_count(ictl); + struct resource mem; + struct irq_chip_generic *gc; + struct irq_domain *domain; + void __iomem *reg_base; + + if (of_address_to_resource(ictl, 0, &mem)) { + pr_err("%s: No registers declared in DeviceTree.\n", + ictl->name); + return -EINVAL; + } + + if (!request_mem_region(mem.start, resource_size(&mem), + ictl->name)) { + pr_err("%s: Request mem region failed.\n", ictl->name); + return -EBUSY; + } + + reg_base = ioremap(mem.start, resource_size(&mem)); + if (!reg_base) { + ret = -EBUSY; + pr_err("%s: ioremap failed.\n", ictl->name); + goto ioremap_fail; + } + + domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, + &irq_generic_chip_ops, NULL); + if (!domain) { + ret = -ENOMEM; + pr_err("%s: Could not register interrupt domain.\n", + ictl->name); + goto irq_domain_add_fail; + } + + ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, + 2, ictl->name, handle_level_irq, + IRQ_NOREQUEST, IRQ_NOPROBE, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("%s: Could not allocate generic interrupt chip.\n", + ictl->name); + goto gc_alloc_fail; + } + + gc = domain->gc->gc[0]; + gc->reg_base = reg_base; + + gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_set_type = tb10x_irq_set_type; + gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE; + + gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; + gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[1].chip.irq_set_type = tb10x_irq_set_type; + gc->chip_types[1].regs.ack = AB_IRQCTL_INT_STATUS; + gc->chip_types[1].regs.mask = AB_IRQCTL_INT_ENABLE; + gc->chip_types[1].handler = handle_edge_irq; + + for (i = 0; i < nrirqs; i++) { + unsigned int irq = irq_of_parse_and_map(ictl, i); + + irq_set_handler_data(irq, domain); + irq_set_chained_handler(irq, tb10x_irq_cascade); + } + + ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0); + ab_irqctl_writereg(gc, AB_IRQCTL_INT_MODE, 0); + ab_irqctl_writereg(gc, AB_IRQCTL_INT_POLARITY, 0); + ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, ~0UL); + + return 0; + +gc_alloc_fail: + irq_domain_remove(domain); +irq_domain_add_fail: + iounmap(reg_base); +ioremap_fail: + release_mem_region(mem.start, resource_size(&mem)); + return ret; +} +IRQCHIP_DECLARE(tb10x_intc, "abilis,tb10x-ictl", of_tb10x_init_irq); diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 9dbd82b716d..3ae2bb8d9cf 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/exception.h> #include <asm/mach/irq.h> @@ -119,7 +120,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(f->valid & BIT(hwirq))) - return -ENOTSUPP; + return -EPERM; irq_set_chip_data(irq, f); irq_set_chip_and_handler(irq, &f->chip, handle_level_irq); @@ -139,7 +140,7 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, int i; if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) { - pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_PLAT_VERSATILE_FPGA_IRQ_NR\n", __func__); + pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__); return; } f = &fpga_irq_devices[fpga_irq_id]; @@ -167,8 +168,12 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, f->used_irqs++; } - pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs\n", + pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", fpga_irq_id, name, base, f->used_irqs); + if (parent_irq != -1) + pr_cont(", parent IRQ: %d\n", parent_irq); + else + pr_cont("\n"); fpga_irq_id++; } @@ -180,6 +185,7 @@ int __init fpga_irq_of_init(struct device_node *node, void __iomem *base; u32 clear_mask; u32 valid_mask; + int parent_irq; if (WARN_ON(!node)) return -ENODEV; @@ -193,7 +199,12 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; - fpga_irq_init(base, node->name, 0, -1, valid_mask, node); + /* Some chips are cascaded from a parent IRQ */ + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) + parent_irq = -1; + + fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); writel(clear_mask, base + IRQ_ENABLE_CLEAR); writel(clear_mask, base + FIQ_ENABLE_CLEAR); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 3cf97aaebe4..7d35287f9e9 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -23,6 +23,8 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> @@ -33,7 +35,7 @@ #include <linux/irqchip/arm-vic.h> #include <asm/exception.h> -#include <asm/mach/irq.h> +#include <asm/irq.h> #include "irqchip.h" @@ -56,6 +58,7 @@ /** * struct vic_device - VIC PM device + * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0. * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @valid_sources: A bitmask of valid interrupts @@ -196,7 +199,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, /* Skip invalid IRQs, only register handlers for the real ones */ if (!(v->valid_sources & (1 << hwirq))) - return -ENOTSUPP; + return -EPERM; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); @@ -223,11 +226,27 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) return handled; } +static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) +{ + u32 stat, hwirq; + struct irq_chip *host_chip = irq_desc_get_chip(desc); + struct vic_device *vic = irq_desc_get_handler_data(desc); + + chained_irq_enter(host_chip, desc); + + while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { + hwirq = ffs(stat) - 1; + generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); + } + + chained_irq_exit(host_chip, desc); +} + /* * Keep iterating over all registered VIC's until there are no pending * interrupts. */ -static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) { int i, handled; @@ -245,6 +264,7 @@ static struct irq_domain_ops vic_irqdomain_ops = { /** * vic_register() - Register a VIC. * @base: The base address of the VIC. + * @parent_irq: The parent IRQ if cascaded, else 0. * @irq: The base IRQ for the VIC. * @valid_sources: bitmask of valid interrupts * @resume_sources: bitmask of interrupts allowed for resume sources. @@ -256,7 +276,8 @@ static struct irq_domain_ops vic_irqdomain_ops = { * * This also configures the IRQ domain for the VIC. */ -static void __init vic_register(void __iomem *base, unsigned int irq, +static void __init vic_register(void __iomem *base, unsigned int parent_irq, + unsigned int irq, u32 valid_sources, u32 resume_sources, struct device_node *node) { @@ -272,15 +293,25 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v->base = base; v->valid_sources = valid_sources; v->resume_sources = resume_sources; - v->irq = irq; set_handle_irq(vic_handle_irq); vic_id++; + + if (parent_irq) { + irq_set_handler_data(parent_irq, v); + irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded); + } + v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, &vic_irqdomain_ops, v); /* create an IRQ mapping for each valid IRQ */ for (i = 0; i < fls(valid_sources); i++) if (valid_sources & (1 << i)) irq_create_mapping(v->domain, i); + /* If no base IRQ was passed, figure out our allocated base */ + if (irq) + v->irq = irq; + else + v->irq = irq_find_mapping(v->domain, 0); } static void vic_ack_irq(struct irq_data *d) @@ -408,10 +439,10 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, writel(32, base + VIC_PL190_DEF_VECT_ADDR); } - vic_register(base, irq_start, vic_sources, 0, node); + vic_register(base, 0, irq_start, vic_sources, 0, node); } -void __init __vic_init(void __iomem *base, int irq_start, +void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { @@ -448,7 +479,7 @@ void __init __vic_init(void __iomem *base, int irq_start, vic_init2(base); - vic_register(base, irq_start, vic_sources, resume_sources, node); + vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node); } /** @@ -461,13 +492,37 @@ void __init __vic_init(void __iomem *base, int irq_start, void __init vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources) { - __vic_init(base, irq_start, vic_sources, resume_sources, NULL); + __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); +} + +/** + * vic_init_cascaded() - initialise a cascaded vectored interrupt controller + * @base: iomem base address + * @parent_irq: the parent IRQ we're cascaded off + * @irq_start: starting interrupt number, must be muliple of 32 + * @vic_sources: bitmask of interrupt sources to allow + * @resume_sources: bitmask of interrupt sources to allow for resume + * + * This returns the base for the new interrupts or negative on error. + */ +int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, + u32 vic_sources, u32 resume_sources) +{ + struct vic_device *v; + + v = &vic_devices[vic_id]; + __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); + /* Return out acquired base */ + return v->irq; } +EXPORT_SYMBOL_GPL(vic_init_cascaded); #ifdef CONFIG_OF int __init vic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *regs; + u32 interrupt_mask = ~0; + u32 wakeup_mask = ~0; if (WARN(parent, "non-root VICs are not supported")) return -EINVAL; @@ -476,10 +531,13 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) if (WARN_ON(!regs)) return -EIO; + of_property_read_u32(node, "valid-mask", &interrupt_mask); + of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); + /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, ~0, ~0, node); + __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); return 0; } diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c new file mode 100644 index 00000000000..eb6e91efdec --- /dev/null +++ b/drivers/irqchip/irq-vt8500.c @@ -0,0 +1,260 @@ +/* + * arch/arm/mach-vt8500/irq.c + * + * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz> + * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * This file is copied and modified from the original irq.c provided by + * Alexey Charkov. Minor changes have been made for Device Tree Support. + */ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> + +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> + +#include <asm/irq.h> +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#include "irqchip.h" + +#define VT8500_ICPC_IRQ 0x20 +#define VT8500_ICPC_FIQ 0x24 +#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */ +#define VT8500_ICIS 0x80 /* Interrupt status, 16*u32 */ + +/* ICPC */ +#define ICPC_MASK 0x3F +#define ICPC_ROTATE BIT(6) + +/* IC_DCTR */ +#define ICDC_IRQ 0x00 +#define ICDC_FIQ 0x01 +#define ICDC_DSS0 0x02 +#define ICDC_DSS1 0x03 +#define ICDC_DSS2 0x04 +#define ICDC_DSS3 0x05 +#define ICDC_DSS4 0x06 +#define ICDC_DSS5 0x07 + +#define VT8500_INT_DISABLE 0 +#define VT8500_INT_ENABLE BIT(3) + +#define VT8500_TRIGGER_HIGH 0 +#define VT8500_TRIGGER_RISING BIT(5) +#define VT8500_TRIGGER_FALLING BIT(6) +#define VT8500_EDGE ( VT8500_TRIGGER_RISING \ + | VT8500_TRIGGER_FALLING) + +/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */ +#define VT8500_INTC_MAX 2 + +struct vt8500_irq_data { + void __iomem *base; /* IO Memory base address */ + struct irq_domain *domain; /* Domain for this controller */ +}; + +/* Global variable for accessing io-mem addresses */ +static struct vt8500_irq_data intc[VT8500_INTC_MAX]; +static u32 active_cnt = 0; + +static void vt8500_irq_mask(struct irq_data *d) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); + u8 edge, dctr; + u32 status; + + edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; + if (edge) { + status = readl(stat_reg); + + status |= (1 << (d->hwirq & 0x1f)); + writel(status, stat_reg); + } else { + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr &= ~VT8500_INT_ENABLE; + writeb(dctr, base + VT8500_ICDC + d->hwirq); + } +} + +static void vt8500_irq_unmask(struct irq_data *d) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + u8 dctr; + + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr |= VT8500_INT_ENABLE; + writeb(dctr, base + VT8500_ICDC + d->hwirq); +} + +static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + u8 dctr; + + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr &= ~VT8500_EDGE; + + switch (flow_type) { + case IRQF_TRIGGER_LOW: + return -EINVAL; + case IRQF_TRIGGER_HIGH: + dctr |= VT8500_TRIGGER_HIGH; + __irq_set_handler_locked(d->irq, handle_level_irq); + break; + case IRQF_TRIGGER_FALLING: + dctr |= VT8500_TRIGGER_FALLING; + __irq_set_handler_locked(d->irq, handle_edge_irq); + break; + case IRQF_TRIGGER_RISING: + dctr |= VT8500_TRIGGER_RISING; + __irq_set_handler_locked(d->irq, handle_edge_irq); + break; + } + writeb(dctr, base + VT8500_ICDC + d->hwirq); + + return 0; +} + +static struct irq_chip vt8500_irq_chip = { + .name = "vt8500", + .irq_ack = vt8500_irq_mask, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, +}; + +static void __init vt8500_init_irq_hw(void __iomem *base) +{ + u32 i; + + /* Enable rotating priority for IRQ */ + writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ); + writel(0x00, base + VT8500_ICPC_FIQ); + + /* Disable all interrupts and route them to IRQ */ + for (i = 0; i < 64; i++) + writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i); +} + +static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static struct irq_domain_ops vt8500_irq_domain_ops = { + .map = vt8500_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) +{ + u32 stat, i; + int irqnr, virq; + void __iomem *base; + + /* Loop through each active controller */ + for (i=0; i<active_cnt; i++) { + base = intc[i].base; + irqnr = readl_relaxed(base) & 0x3F; + /* + Highest Priority register default = 63, so check that this + is a real interrupt by checking the status register + */ + if (irqnr == 63) { + stat = readl_relaxed(base + VT8500_ICIS + 4); + if (!(stat & BIT(31))) + continue; + } + + virq = irq_find_mapping(intc[i].domain, irqnr); + handle_IRQ(virq, regs); + } +} + +static int __init vt8500_irq_init(struct device_node *node, + struct device_node *parent) +{ + int irq, i; + struct device_node *np = node; + + if (active_cnt == VT8500_INTC_MAX) { + pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", + __func__); + goto out; + } + + intc[active_cnt].base = of_iomap(np, 0); + intc[active_cnt].domain = irq_domain_add_linear(node, 64, + &vt8500_irq_domain_ops, &intc[active_cnt]); + + if (!intc[active_cnt].base) { + pr_err("%s: Unable to map IO memory\n", __func__); + goto out; + } + + if (!intc[active_cnt].domain) { + pr_err("%s: Unable to add irq domain!\n", __func__); + goto out; + } + + set_handle_irq(vt8500_handle_irq); + + vt8500_init_irq_hw(intc[active_cnt].base); + + pr_info("vt8500-irq: Added interrupt controller\n"); + + active_cnt++; + + /* check if this is a slaved controller */ + if (of_irq_count(np) != 0) { + /* check that we have the correct number of interrupts */ + if (of_irq_count(np) != 8) { + pr_err("%s: Incorrect IRQ map for slaved controller\n", + __func__); + return -EINVAL; + } + + for (i = 0; i < 8; i++) { + irq = irq_of_parse_and_map(np, i); + enable_irq(irq); + } + + pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); + } +out: + return 0; +} + +IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init); diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c new file mode 100644 index 00000000000..e1c2f963289 --- /dev/null +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -0,0 +1,164 @@ +/* + * Xtensa MX interrupt distributor + * + * Copyright (C) 2002 - 2013 Tensilica, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/of.h> + +#include <asm/mxregs.h> + +#include "irqchip.h" + +#define HW_IRQ_IPI_COUNT 2 +#define HW_IRQ_MX_BASE 2 +#define HW_IRQ_EXTERN_BASE 3 + +static DEFINE_PER_CPU(unsigned int, cached_irq_mask); + +static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + if (hw < HW_IRQ_IPI_COUNT) { + struct irq_chip *irq_chip = d->host_data; + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "ipi"); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; + } + return xtensa_irq_map(d, irq, hw); +} + +/* + * Device Tree IRQ specifier translation function which works with one or + * two cell bindings. First cell value maps directly to the hwirq number. + * Second cell if present specifies whether hwirq number is external (1) or + * internal (0). + */ +static int xtensa_mx_irq_domain_xlate(struct irq_domain *d, + struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + return xtensa_irq_domain_xlate(intspec, intsize, + intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE, + out_hwirq, out_type); +} + +static const struct irq_domain_ops xtensa_mx_irq_domain_ops = { + .xlate = xtensa_mx_irq_domain_xlate, + .map = xtensa_mx_irq_map, +}; + +void secondary_init_irq(void) +{ + __this_cpu_write(cached_irq_mask, + XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL); + set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable); +} + +static void xtensa_mx_irq_mask(struct irq_data *d) +{ + unsigned int mask = 1u << d->hwirq; + + if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - + HW_IRQ_MX_BASE), MIENG); + } else { + mask = __this_cpu_read(cached_irq_mask) & ~mask; + __this_cpu_write(cached_irq_mask, mask); + set_sr(mask, intenable); + } +} + +static void xtensa_mx_irq_unmask(struct irq_data *d) +{ + unsigned int mask = 1u << d->hwirq; + + if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - + HW_IRQ_MX_BASE), MIENGSET); + } else { + mask |= __this_cpu_read(cached_irq_mask); + __this_cpu_write(cached_irq_mask, mask); + set_sr(mask, intenable); + } +} + +static void xtensa_mx_irq_enable(struct irq_data *d) +{ + variant_irq_enable(d->hwirq); + xtensa_mx_irq_unmask(d); +} + +static void xtensa_mx_irq_disable(struct irq_data *d) +{ + xtensa_mx_irq_mask(d); + variant_irq_disable(d->hwirq); +} + +static void xtensa_mx_irq_ack(struct irq_data *d) +{ + set_sr(1 << d->hwirq, intclear); +} + +static int xtensa_mx_irq_retrigger(struct irq_data *d) +{ + set_sr(1 << d->hwirq, intset); + return 1; +} + +static int xtensa_mx_irq_set_affinity(struct irq_data *d, + const struct cpumask *dest, bool force) +{ + unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); + + set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); + return 0; + +} + +static struct irq_chip xtensa_mx_irq_chip = { + .name = "xtensa-mx", + .irq_enable = xtensa_mx_irq_enable, + .irq_disable = xtensa_mx_irq_disable, + .irq_mask = xtensa_mx_irq_mask, + .irq_unmask = xtensa_mx_irq_unmask, + .irq_ack = xtensa_mx_irq_ack, + .irq_retrigger = xtensa_mx_irq_retrigger, + .irq_set_affinity = xtensa_mx_irq_set_affinity, +}; + +int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) +{ + struct irq_domain *root_domain = + irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + &xtensa_mx_irq_domain_ops, + &xtensa_mx_irq_chip); + irq_set_default_host(root_domain); + secondary_init_irq(); + return 0; +} + +static int __init xtensa_mx_init(struct device_node *np, + struct device_node *interrupt_parent) +{ + struct irq_domain *root_domain = + irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops, + &xtensa_mx_irq_chip); + irq_set_default_host(root_domain); + secondary_init_irq(); + return 0; +} +IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init); diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c new file mode 100644 index 00000000000..7d71126d1ce --- /dev/null +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -0,0 +1,108 @@ +/* + * Xtensa built-in interrupt controller + * + * Copyright (C) 2002 - 2013 Tensilica, Inc. + * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Chris Zankel <chris@zankel.net> + * Kevin Chea + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/of.h> + +#include "irqchip.h" + +unsigned int cached_irq_mask; + +/* + * Device Tree IRQ specifier translation function which works with one or + * two cell bindings. First cell value maps directly to the hwirq number. + * Second cell if present specifies whether hwirq number is external (1) or + * internal (0). + */ +static int xtensa_pic_irq_domain_xlate(struct irq_domain *d, + struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + return xtensa_irq_domain_xlate(intspec, intsize, + intspec[0], intspec[0], + out_hwirq, out_type); +} + +static const struct irq_domain_ops xtensa_irq_domain_ops = { + .xlate = xtensa_pic_irq_domain_xlate, + .map = xtensa_irq_map, +}; + +static void xtensa_irq_mask(struct irq_data *d) +{ + cached_irq_mask &= ~(1 << d->hwirq); + set_sr(cached_irq_mask, intenable); +} + +static void xtensa_irq_unmask(struct irq_data *d) +{ + cached_irq_mask |= 1 << d->hwirq; + set_sr(cached_irq_mask, intenable); +} + +static void xtensa_irq_enable(struct irq_data *d) +{ + variant_irq_enable(d->hwirq); + xtensa_irq_unmask(d); +} + +static void xtensa_irq_disable(struct irq_data *d) +{ + xtensa_irq_mask(d); + variant_irq_disable(d->hwirq); +} + +static void xtensa_irq_ack(struct irq_data *d) +{ + set_sr(1 << d->hwirq, intclear); +} + +static int xtensa_irq_retrigger(struct irq_data *d) +{ + set_sr(1 << d->hwirq, intset); + return 1; +} + +static struct irq_chip xtensa_irq_chip = { + .name = "xtensa", + .irq_enable = xtensa_irq_enable, + .irq_disable = xtensa_irq_disable, + .irq_mask = xtensa_irq_mask, + .irq_unmask = xtensa_irq_unmask, + .irq_ack = xtensa_irq_ack, + .irq_retrigger = xtensa_irq_retrigger, +}; + +int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) +{ + struct irq_domain *root_domain = + irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + &xtensa_irq_domain_ops, &xtensa_irq_chip); + irq_set_default_host(root_domain); + return 0; +} + +static int __init xtensa_pic_init(struct device_node *np, + struct device_node *interrupt_parent) +{ + struct irq_domain *root_domain = + irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops, + &xtensa_irq_chip); + irq_set_default_host(root_domain); + return 0; +} +IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init); diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c new file mode 100644 index 00000000000..ceb3a4318f7 --- /dev/null +++ b/drivers/irqchip/irq-zevio.c @@ -0,0 +1,127 @@ +/* + * linux/drivers/irqchip/irq-zevio.c + * + * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <asm/mach/irq.h> +#include <asm/exception.h> + +#include "irqchip.h" + +#define IO_STATUS 0x000 +#define IO_RAW_STATUS 0x004 +#define IO_ENABLE 0x008 +#define IO_DISABLE 0x00C +#define IO_CURRENT 0x020 +#define IO_RESET 0x028 +#define IO_MAX_PRIOTY 0x02C + +#define IO_IRQ_BASE 0x000 +#define IO_FIQ_BASE 0x100 + +#define IO_INVERT_SEL 0x200 +#define IO_STICKY_SEL 0x204 +#define IO_PRIORITY_SEL 0x300 + +#define MAX_INTRS 32 +#define FIQ_START MAX_INTRS + +static struct irq_domain *zevio_irq_domain; +static void __iomem *zevio_irq_io; + +static void zevio_irq_ack(struct irq_data *irqd) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd); + struct irq_chip_regs *regs = + &container_of(irqd->chip, struct irq_chip_type, chip)->regs; + + readl(gc->reg_base + regs->ack); +} + +static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs) +{ + int irqnr; + + while (readl(zevio_irq_io + IO_STATUS)) { + irqnr = readl(zevio_irq_io + IO_CURRENT); + irqnr = irq_find_mapping(zevio_irq_domain, irqnr); + handle_IRQ(irqnr, regs); + }; +} + +static void __init zevio_init_irq_base(void __iomem *base) +{ + /* Disable all interrupts */ + writel(~0, base + IO_DISABLE); + + /* Accept interrupts of all priorities */ + writel(0xF, base + IO_MAX_PRIOTY); + + /* Reset existing interrupts */ + readl(base + IO_RESET); +} + +static int __init zevio_of_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct irq_chip_generic *gc; + int ret; + + if (WARN_ON(zevio_irq_io || zevio_irq_domain)) + return -EBUSY; + + zevio_irq_io = of_iomap(node, 0); + BUG_ON(!zevio_irq_io); + + /* Do not invert interrupt status bits */ + writel(~0, zevio_irq_io + IO_INVERT_SEL); + + /* Disable sticky interrupts */ + writel(0, zevio_irq_io + IO_STICKY_SEL); + + /* We don't use IRQ priorities. Set each IRQ to highest priority. */ + memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32)); + + /* Init IRQ and FIQ */ + zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE); + zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE); + + zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS, + &irq_generic_chip_ops, NULL); + BUG_ON(!zevio_irq_domain); + + ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1, + "zevio_intc", handle_level_irq, + clr, 0, IRQ_GC_INIT_MASK_CACHE); + BUG_ON(ret); + + gc = irq_get_domain_generic_chip(zevio_irq_domain, 0); + gc->reg_base = zevio_irq_io; + gc->chip_types[0].chip.irq_ack = zevio_irq_ack; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE; + gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE; + gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE; + gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET; + + set_handle_irq(zevio_handle_irq); + + pr_info("TI-NSPIRE classic IRQ controller\n"); + return 0; +} + +IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init); diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index f496afce29d..0fe2f718d81 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -10,8 +10,7 @@ #include <linux/init.h> #include <linux/of_irq.h> - -#include "irqchip.h" +#include <linux/irqchip.h> /* * This special of_device_id is the sentinel at the end of the @@ -20,11 +19,11 @@ * special section. */ static const struct of_device_id -irqchip_of_match_end __used __section(__irqchip_of_end); +irqchip_of_match_end __used __section(__irqchip_of_table_end); -extern struct of_device_id __irqchip_begin[]; +extern struct of_device_id __irqchip_of_table[]; void __init irqchip_init(void) { - of_irq_init(__irqchip_begin); + of_irq_init(__irqchip_of_table); } diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h index e445ba2d6ad..0f6486d4f1b 100644 --- a/drivers/irqchip/irqchip.h +++ b/drivers/irqchip/irqchip.h @@ -11,6 +11,8 @@ #ifndef _IRQCHIP_H #define _IRQCHIP_H +#include <linux/of.h> + /* * This macro must be used by the different irqchip drivers to declare * the association between their DT compatible string and their @@ -21,9 +23,6 @@ * @compstr: compatible string of the irqchip driver * @fn: initialization function */ -#define IRQCHIP_DECLARE(name,compstr,fn) \ - static const struct of_device_id irqchip_of_match_##name \ - __used __section(__irqchip_of_table) \ - = { .compatible = compstr, .data = fn } +#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) #endif diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 8527743b5ce..6ce6bd3441b 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -5,7 +5,7 @@ * Viresh Kumar <viresh.linux@gmail.com> * * Copyright (C) 2012 ST Microelectronics - * Shiraz Hashim <shiraz.hashim@st.com> + * Shiraz Hashim <shiraz.linux.kernel@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = { }; static struct spear_shirq spear320_shirq_ras3 = { - .irq_nr = 3, + .irq_nr = 7, .irq_bit_off = 0, .invalid_irq = 1, .regs = { |
