aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Kconfig85
-rw-r--r--kernel/irq/Makefile5
-rw-r--r--kernel/irq/autoprobe.c119
-rw-r--r--kernel/irq/chip.c886
-rw-r--r--kernel/irq/debug.h45
-rw-r--r--kernel/irq/devres.c68
-rw-r--r--kernel/irq/dummychip.c61
-rw-r--r--kernel/irq/generic-chip.c591
-rw-r--r--kernel/irq/handle.c337
-rw-r--r--kernel/irq/internals.h208
-rw-r--r--kernel/irq/irqdesc.c554
-rw-r--r--kernel/irq/irqdomain.c711
-rw-r--r--kernel/irq/manage.c1736
-rw-r--r--kernel/irq/migration.c65
-rw-r--r--kernel/irq/pm.c130
-rw-r--r--kernel/irq/proc.c429
-rw-r--r--kernel/irq/resend.c41
-rw-r--r--kernel/irq/settings.h156
-rw-r--r--kernel/irq/spurious.c365
19 files changed, 5446 insertions, 1146 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
new file mode 100644
index 00000000000..d269cecdfbf
--- /dev/null
+++ b/kernel/irq/Kconfig
@@ -0,0 +1,85 @@
+menu "IRQ subsystem"
+# Options selectable by the architecture code
+
+# Make sparse irq Kconfig switch below available
+config MAY_HAVE_SPARSE_IRQ
+ bool
+
+# Legacy support, required for itanic
+config GENERIC_IRQ_LEGACY
+ bool
+
+# Enable the generic irq autoprobe mechanism
+config GENERIC_IRQ_PROBE
+ bool
+
+# Use the generic /proc/interrupts implementation
+config GENERIC_IRQ_SHOW
+ bool
+
+# Print level/edge extra information
+config GENERIC_IRQ_SHOW_LEVEL
+ bool
+
+# Facility to allocate a hardware interrupt. This is legacy support
+# and should not be used in new code. Use irq domains instead.
+config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+ bool
+
+# Support for delayed migration from interrupt context
+config GENERIC_PENDING_IRQ
+ bool
+
+# Alpha specific irq affinity mechanism
+config AUTO_IRQ_AFFINITY
+ bool
+
+# Tasklet based software resend for pending interrupts on enable_irq()
+config HARDIRQS_SW_RESEND
+ bool
+
+# Preflow handler support for fasteoi (sparc64)
+config IRQ_PREFLOW_FASTEOI
+ bool
+
+# Edge style eoi based handler (cell)
+config IRQ_EDGE_EOI_HANDLER
+ bool
+
+# Generic configurable interrupt chip implementation
+config GENERIC_IRQ_CHIP
+ bool
+ select IRQ_DOMAIN
+
+# Generic irq_domain hw <--> linux irq number translation
+config IRQ_DOMAIN
+ bool
+
+config IRQ_DOMAIN_DEBUG
+ bool "Expose hardware/virtual IRQ mapping via debugfs"
+ depends on IRQ_DOMAIN && DEBUG_FS
+ help
+ This option will show the mapping relationship between hardware irq
+ numbers and Linux irq numbers. The mapping is exposed via debugfs
+ in the file "irq_domain_mapping".
+
+ If you don't know what this means you don't need it.
+
+# Support forced irq threading
+config IRQ_FORCED_THREADING
+ bool
+
+config SPARSE_IRQ
+ bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
+ ---help---
+
+ Sparse irq numbering is useful for distro kernels that want
+ to define a high CONFIG_NR_CPUS value but still want to have
+ low kernel memory footprint on smaller machines.
+
+ ( Sparse irqs can also be beneficial on NUMA boxes, as they spread
+ out the interrupt descriptors in a more NUMA-friendly way. )
+
+ If you don't know what to do here, say N.
+
+endmenu
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 681c52dbfe2..fff17381f0a 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,8 @@
-obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
+obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
+obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
+obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 533068cfb60..0119b9d467a 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/async.h>
#include "internals.h"
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
- * "IRQ_WAITING" cleared and the interrupt disabled.
+ * "IRQS_WAITING" cleared and the interrupt disabled.
*/
static DEFINE_MUTEX(probing_active);
@@ -30,34 +31,31 @@ static DEFINE_MUTEX(probing_active);
unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
- unsigned long mask;
- unsigned int i;
+ unsigned long mask = 0;
+ int i;
+ /*
+ * quiesce the kernel, or at least the asynchronous portion
+ */
+ async_synchronize_full();
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
- for (i = NR_IRQS-1; i > 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
- /*
- * An old-style architecture might still have
- * the handle_bad_irq handler there:
- */
- compat_irq_chip_set_default_handler(desc);
-
+ for_each_irq_desc_reverse(i, desc) {
+ raw_spin_lock_irq(&desc->lock);
+ if (!desc->action && irq_settings_can_probe(desc)) {
/*
* Some chips need to know about probing in
* progress:
*/
- if (desc->chip->set_type)
- desc->chip->set_type(i, IRQ_TYPE_PROBE);
- desc->chip->startup(i);
+ if (desc->irq_data.chip->irq_set_type)
+ desc->irq_data.chip->irq_set_type(&desc->irq_data,
+ IRQ_TYPE_PROBE);
+ irq_startup(desc, false);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -68,16 +66,14 @@ unsigned long probe_irq_on(void)
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
- for (i = NR_IRQS-1; i > 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
- desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->chip->startup(i))
- desc->status |= IRQ_PENDING;
+ for_each_irq_desc_reverse(i, desc) {
+ raw_spin_lock_irq(&desc->lock);
+ if (!desc->action && irq_settings_can_probe(desc)) {
+ desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
+ if (irq_startup(desc, false))
+ desc->istate |= IRQS_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
@@ -88,24 +84,19 @@ unsigned long probe_irq_on(void)
/*
* Now filter out any obviously spurious interrupts
*/
- mask = 0;
- for (i = 0; i < NR_IRQS; i++) {
- unsigned int status;
+ for_each_irq_desc(i, desc) {
+ raw_spin_lock_irq(&desc->lock);
- desc = irq_desc + i;
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
+ if (desc->istate & IRQS_AUTODETECT) {
/* It triggered already - consider it spurious. */
- if (!(status & IRQ_WAITING)) {
- desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ if (!(desc->istate & IRQS_WAITING)) {
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
} else
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -126,25 +117,20 @@ EXPORT_SYMBOL(probe_irq_on);
*/
unsigned int probe_irq_mask(unsigned long val)
{
- unsigned int mask;
+ unsigned int mask = 0;
+ struct irq_desc *desc;
int i;
- mask = 0;
- for (i = 0; i < NR_IRQS; i++) {
- struct irq_desc *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- if (i < 16 && !(status & IRQ_WAITING))
+ for_each_irq_desc(i, desc) {
+ raw_spin_lock_irq(&desc->lock);
+ if (desc->istate & IRQS_AUTODETECT) {
+ if (i < 16 && !(desc->istate & IRQS_WAITING))
mask |= 1 << i;
- desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,29 +157,26 @@ EXPORT_SYMBOL(probe_irq_mask);
*/
int probe_irq_off(unsigned long val)
{
- int i, irq_found = 0, nr_irqs = 0;
-
- for (i = 0; i < NR_IRQS; i++) {
- struct irq_desc *desc = irq_desc + i;
- unsigned int status;
+ int i, irq_found = 0, nr_of_irqs = 0;
+ struct irq_desc *desc;
- spin_lock_irq(&desc->lock);
- status = desc->status;
+ for_each_irq_desc(i, desc) {
+ raw_spin_lock_irq(&desc->lock);
- if (status & IRQ_AUTODETECT) {
- if (!(status & IRQ_WAITING)) {
- if (!nr_irqs)
+ if (desc->istate & IRQS_AUTODETECT) {
+ if (!(desc->istate & IRQS_WAITING)) {
+ if (!nr_of_irqs)
irq_found = i;
- nr_irqs++;
+ nr_of_irqs++;
}
- desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->istate &= ~IRQS_AUTODETECT;
+ irq_shutdown(desc);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
- if (nr_irqs > 1)
+ if (nr_of_irqs > 1)
irq_found = -irq_found;
return irq_found;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fdb3fbe2b0c..a2b28a2fd7b 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -16,280 +16,330 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
-#include "internals.h"
-
-/**
- * dynamic_irq_init - initialize a dynamically allocated irq
- * @irq: irq number to initialize
- */
-void dynamic_irq_init(unsigned int irq)
-{
- struct irq_desc *desc;
- unsigned long flags;
-
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
- WARN_ON(1);
- return;
- }
-
- /* Ensure we don't have left over values from a previous use of this irq */
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- desc->status = IRQ_DISABLED;
- desc->chip = &no_irq_chip;
- desc->handle_irq = handle_bad_irq;
- desc->depth = 1;
- desc->msi_desc = NULL;
- desc->handler_data = NULL;
- desc->chip_data = NULL;
- desc->action = NULL;
- desc->irq_count = 0;
- desc->irqs_unhandled = 0;
-#ifdef CONFIG_SMP
- desc->affinity = CPU_MASK_ALL;
-#endif
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/**
- * dynamic_irq_cleanup - cleanup a dynamically allocated irq
- * @irq: irq number to initialize
- */
-void dynamic_irq_cleanup(unsigned int irq)
-{
- struct irq_desc *desc;
- unsigned long flags;
-
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
- WARN_ON(1);
- return;
- }
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
- printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
- irq);
- WARN_ON(1);
- return;
- }
- desc->msi_desc = NULL;
- desc->handler_data = NULL;
- desc->chip_data = NULL;
- desc->handle_irq = handle_bad_irq;
- desc->chip = &no_irq_chip;
- spin_unlock_irqrestore(&desc->lock, flags);
-}
+#include <trace/events/irq.h>
+#include "internals.h"
/**
- * set_irq_chip - set the irq chip for an irq
+ * irq_set_chip - set the irq chip for an irq
* @irq: irq number
* @chip: pointer to irq chip description structure
*/
-int set_irq_chip(unsigned int irq, struct irq_chip *chip)
+int irq_set_chip(unsigned int irq, struct irq_chip *chip)
{
- struct irq_desc *desc;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
- WARN_ON(1);
+ if (!desc)
return -EINVAL;
- }
if (!chip)
chip = &no_irq_chip;
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- irq_chip_set_defaults(chip);
- desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
-
+ desc->irq_data.chip = chip;
+ irq_put_desc_unlock(desc, flags);
+ /*
+ * For !CONFIG_SPARSE_IRQ make the irq show up in
+ * allocated_irqs.
+ */
+ irq_mark_irq(irq);
return 0;
}
-EXPORT_SYMBOL(set_irq_chip);
+EXPORT_SYMBOL(irq_set_chip);
/**
- * set_irq_type - set the irq type for an irq
+ * irq_set_type - set the irq trigger type for an irq
* @irq: irq number
- * @type: interrupt type - see include/linux/interrupt.h
+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
-int set_irq_type(unsigned int irq, unsigned int type)
+int irq_set_irq_type(unsigned int irq, unsigned int type)
{
- struct irq_desc *desc;
unsigned long flags;
- int ret = -ENXIO;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+ int ret = 0;
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
- return -ENODEV;
- }
+ if (!desc)
+ return -EINVAL;
- desc = irq_desc + irq;
- if (desc->chip->set_type) {
- spin_lock_irqsave(&desc->lock, flags);
- ret = desc->chip->set_type(irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
- }
+ type &= IRQ_TYPE_SENSE_MASK;
+ ret = __irq_set_trigger(desc, irq, type);
+ irq_put_desc_busunlock(desc, flags);
return ret;
}
-EXPORT_SYMBOL(set_irq_type);
+EXPORT_SYMBOL(irq_set_irq_type);
/**
- * set_irq_data - set irq type data for an irq
+ * irq_set_handler_data - set irq handler data for an irq
* @irq: Interrupt number
* @data: Pointer to interrupt specific data
*
* Set the hardware irq controller data for an irq
*/
-int set_irq_data(unsigned int irq, void *data)
+int irq_set_handler_data(unsigned int irq, void *data)
{
- struct irq_desc *desc;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
- if (irq >= NR_IRQS) {
- printk(KERN_ERR
- "Trying to install controller data for IRQ%d\n", irq);
+ if (!desc)
return -EINVAL;
- }
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ desc->irq_data.handler_data = data;
+ irq_put_desc_unlock(desc, flags);
return 0;
}
-EXPORT_SYMBOL(set_irq_data);
+EXPORT_SYMBOL(irq_set_handler_data);
/**
- * set_irq_data - set irq type data for an irq
- * @irq: Interrupt number
- * @entry: Pointer to MSI descriptor data
+ * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
+ * @irq_base: Interrupt number base
+ * @irq_offset: Interrupt number offset
+ * @entry: Pointer to MSI descriptor data
*
- * Set the hardware irq controller data for an irq
+ * Set the MSI descriptor entry for an irq at offset
*/
-int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+ struct msi_desc *entry)
{
- struct irq_desc *desc;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
- if (irq >= NR_IRQS) {
- printk(KERN_ERR
- "Trying to install msi data for IRQ%d\n", irq);
+ if (!desc)
return -EINVAL;
- }
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- desc->msi_desc = entry;
- if (entry)
- entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ desc->irq_data.msi_desc = entry;
+ if (entry && !irq_offset)
+ entry->irq = irq_base;
+ irq_put_desc_unlock(desc, flags);
return 0;
}
/**
- * set_irq_chip_data - set irq chip data for an irq
+ * irq_set_msi_desc - set MSI descriptor data for an irq
+ * @irq: Interrupt number
+ * @entry: Pointer to MSI descriptor data
+ *
+ * Set the MSI descriptor entry for an irq
+ */
+int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
+{
+ return irq_set_msi_desc_off(irq, 0, entry);
+}
+
+/**
+ * irq_set_chip_data - set irq chip data for an irq
* @irq: Interrupt number
* @data: Pointer to chip specific data
*
* Set the hardware irq chip data for an irq
*/
-int set_irq_chip_data(unsigned int irq, void *data)
+int irq_set_chip_data(unsigned int irq, void *data)
{
- struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
- if (irq >= NR_IRQS || !desc->chip) {
- printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
+ if (!desc)
return -EINVAL;
+ desc->irq_data.chip_data = data;
+ irq_put_desc_unlock(desc, flags);
+ return 0;
+}
+EXPORT_SYMBOL(irq_set_chip_data);
+
+struct irq_data *irq_get_irq_data(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return desc ? &desc->irq_data : NULL;
+}
+EXPORT_SYMBOL_GPL(irq_get_irq_data);
+
+static void irq_state_clr_disabled(struct irq_desc *desc)
+{
+ irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
+}
+
+static void irq_state_set_disabled(struct irq_desc *desc)
+{
+ irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
+}
+
+static void irq_state_clr_masked(struct irq_desc *desc)
+{
+ irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
+}
+
+static void irq_state_set_masked(struct irq_desc *desc)
+{
+ irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
+}
+
+int irq_startup(struct irq_desc *desc, bool resend)
+{
+ int ret = 0;
+
+ irq_state_clr_disabled(desc);
+ desc->depth = 0;
+
+ if (desc->irq_data.chip->irq_startup) {
+ ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+ irq_state_clr_masked(desc);
+ } else {
+ irq_enable(desc);
}
+ if (resend)
+ check_irq_resend(desc, desc->irq_data.irq);
+ return ret;
+}
- spin_lock_irqsave(&desc->lock, flags);
- desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+void irq_shutdown(struct irq_desc *desc)
+{
+ irq_state_set_disabled(desc);
+ desc->depth = 1;
+ if (desc->irq_data.chip->irq_shutdown)
+ desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+ else if (desc->irq_data.chip->irq_disable)
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ irq_state_set_masked(desc);
+}
- return 0;
+void irq_enable(struct irq_desc *desc)
+{
+ irq_state_clr_disabled(desc);
+ if (desc->irq_data.chip->irq_enable)
+ desc->irq_data.chip->irq_enable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ irq_state_clr_masked(desc);
}
-EXPORT_SYMBOL(set_irq_chip_data);
-/*
- * default enable function
+/**
+ * irq_disable - Mark interrupt disabled
+ * @desc: irq descriptor which should be disabled
+ *
+ * If the chip does not implement the irq_disable callback, we
+ * use a lazy disable approach. That means we mark the interrupt
+ * disabled, but leave the hardware unmasked. That's an
+ * optimization because we avoid the hardware access for the
+ * common case where no interrupt happens after we marked it
+ * disabled. If an interrupt happens, then the interrupt flow
+ * handler masks the line at the hardware level and marks it
+ * pending.
*/
-static void default_enable(unsigned int irq)
+void irq_disable(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_desc + irq;
+ irq_state_set_disabled(desc);
+ if (desc->irq_data.chip->irq_disable) {
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ irq_state_set_masked(desc);
+ }
+}
- desc->chip->unmask(irq);
- desc->status &= ~IRQ_MASKED;
+void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
+{
+ if (desc->irq_data.chip->irq_enable)
+ desc->irq_data.chip->irq_enable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ cpumask_set_cpu(cpu, desc->percpu_enabled);
}
-/*
- * default disable function
- */
-static void default_disable(unsigned int irq)
+void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
+ if (desc->irq_data.chip->irq_disable)
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ cpumask_clear_cpu(cpu, desc->percpu_enabled);
}
-/*
- * default startup function
- */
-static unsigned int default_startup(unsigned int irq)
+static inline void mask_ack_irq(struct irq_desc *desc)
{
- irq_desc[irq].chip->enable(irq);
+ if (desc->irq_data.chip->irq_mask_ack)
+ desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
+ else {
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ if (desc->irq_data.chip->irq_ack)
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+ }
+ irq_state_set_masked(desc);
+}
- return 0;
+void mask_irq(struct irq_desc *desc)
+{
+ if (desc->irq_data.chip->irq_mask) {
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ irq_state_set_masked(desc);
+ }
}
-/*
- * default shutdown function
- */
-static void default_shutdown(unsigned int irq)
+void unmask_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_desc + irq;
+ if (desc->irq_data.chip->irq_unmask) {
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ irq_state_clr_masked(desc);
+ }
+}
+
+void unmask_threaded_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = desc->irq_data.chip;
- desc->chip->mask(irq);
- desc->status |= IRQ_MASKED;
+ if (chip->flags & IRQCHIP_EOI_THREADED)
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_unmask) {
+ chip->irq_unmask(&desc->irq_data);
+ irq_state_clr_masked(desc);
+ }
}
/*
- * Fixup enable/disable function pointers
+ * handle_nested_irq - Handle a nested irq from a irq thread
+ * @irq: the interrupt number
+ *
+ * Handle interrupts which are nested into a threaded interrupt
+ * handler. The handler function is called inside the calling
+ * threads context.
*/
-void irq_chip_set_defaults(struct irq_chip *chip)
-{
- if (!chip->enable)
- chip->enable = default_enable;
- if (!chip->disable)
- chip->disable = default_disable;
- if (!chip->startup)
- chip->startup = default_startup;
- /*
- * We use chip->disable, when the user provided its own. When
- * we have default_disable set for chip->disable, then we need
- * to use default_shutdown, otherwise the irq line is not
- * disabled on free_irq():
- */
- if (!chip->shutdown)
- chip->shutdown = chip->disable != default_disable ?
- chip->disable : default_shutdown;
- if (!chip->name)
- chip->name = chip->typename;
- if (!chip->end)
- chip->end = dummy_irq_chip.end;
+void handle_nested_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ irqreturn_t action_ret;
+
+ might_sleep();
+
+ raw_spin_lock_irq(&desc->lock);
+
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ action = desc->action;
+ if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
+ goto out_unlock;
+ }
+
+ irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ raw_spin_unlock_irq(&desc->lock);
+
+ action_ret = action->thread_fn(action->irq, action->dev_id);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
+
+ raw_spin_lock_irq(&desc->lock);
+ irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+
+out_unlock:
+ raw_spin_unlock_irq(&desc->lock);
}
+EXPORT_SYMBOL_GPL(handle_nested_irq);
-static inline void mask_ack_irq(struct irq_desc *desc, int irq)
+static bool irq_check_poll(struct irq_desc *desc)
{
- if (desc->chip->mask_ack)
- desc->chip->mask_ack(irq);
- else {
- desc->chip->mask(irq);
- desc->chip->ack(irq);
- }
+ if (!(desc->istate & IRQS_POLL_INPROGRESS))
+ return false;
+ return irq_wait_for_poll(desc);
}
/**
@@ -307,32 +357,43 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
- struct irqaction *action;
- irqreturn_t action_ret;
- const unsigned int cpu = smp_processor_id();
+ raw_spin_lock(&desc->lock);
- spin_lock(&desc->lock);
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
+ if (!irq_check_poll(desc))
+ goto out_unlock;
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- kstat_cpu(cpu).irqs[irq]++;
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ kstat_incr_irqs_this_cpu(irq, desc);
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
goto out_unlock;
+ }
- desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
-
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ handle_irq_event(desc);
- spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
+}
+EXPORT_SYMBOL_GPL(handle_simple_irq);
+
+/*
+ * Called unconditionally from handle_level_irq() and only for oneshot
+ * interrupts from handle_fasteoi_irq()
+ */
+static void cond_unmask_irq(struct irq_desc *desc)
+{
+ /*
+ * We need to unmask in the following cases:
+ * - Standard level irq (IRQF_ONESHOT is not set)
+ * - Oneshot irq which did not wake the thread (caused by a
+ * spurious interrupt or a primary handler handling it
+ * completely).
+ */
+ if (!irqd_irq_disabled(&desc->irq_data) &&
+ irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
+ unmask_irq(desc);
}
/**
@@ -348,39 +409,63 @@ out_unlock:
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cpu = smp_processor_id();
- struct irqaction *action;
- irqreturn_t action_ret;
+ raw_spin_lock(&desc->lock);
+ mask_ack_irq(desc);
- spin_lock(&desc->lock);
- mask_ack_irq(desc, irq);
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
+ if (!irq_check_poll(desc))
+ goto out_unlock;
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- kstat_cpu(cpu).irqs[irq]++;
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* keep it masked and get out of here
*/
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
goto out_unlock;
+ }
- desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ handle_irq_event(desc);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ cond_unmask_irq(desc);
- spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
- desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
+}
+EXPORT_SYMBOL_GPL(handle_level_irq);
+
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void preflow_handler(struct irq_desc *desc)
+{
+ if (desc->preflow_handler)
+ desc->preflow_handler(&desc->irq_data);
+}
+#else
+static inline void preflow_handler(struct irq_desc *desc) { }
+#endif
+
+static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
+{
+ if (!(desc->istate & IRQS_ONESHOT)) {
+ chip->irq_eoi(&desc->irq_data);
+ return;
+ }
+ /*
+ * We need to unmask in the following cases:
+ * - Oneshot irq which did not wake the thread (caused by a
+ * spurious interrupt or a primary handler handling it
+ * completely).
+ */
+ if (!irqd_irq_disabled(&desc->irq_data) &&
+ irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
+ chip->irq_eoi(&desc->irq_data);
+ unmask_irq(desc);
+ } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
+ chip->irq_eoi(&desc->irq_data);
+ }
}
/**
@@ -396,44 +481,41 @@ out_unlock:
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cpu = smp_processor_id();
- struct irqaction *action;
- irqreturn_t action_ret;
+ struct irq_chip *chip = desc->irq_data.chip;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
- if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out;
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
+ if (!irq_check_poll(desc))
+ goto out;
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- kstat_cpu(cpu).irqs[irq]++;
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ kstat_incr_irqs_this_cpu(irq, desc);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
- action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
- desc->status |= IRQ_PENDING;
- if (desc->chip->mask)
- desc->chip->mask(irq);
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
+ mask_irq(desc);
goto out;
}
- desc->status |= IRQ_INPROGRESS;
- desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ if (desc->istate & IRQS_ONESHOT)
+ mask_irq(desc);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ preflow_handler(desc);
+ handle_irq_event(desc);
- spin_lock(&desc->lock);
- desc->status &= ~IRQ_INPROGRESS;
-out:
- desc->chip->eoi(irq);
+ cond_unmask_eoi_irq(desc, chip);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
+ return;
+out:
+ if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
+ chip->irq_eoi(&desc->irq_data);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -442,10 +524,10 @@ out:
* @desc: the interrupt description structure for this irq
*
* Interrupt occures on the falling and/or rising edge of a hardware
- * signal. The occurence is latched into the irq controller hardware
+ * signal. The occurrence is latched into the irq controller hardware
* and must be acked in order to be reenabled. After the ack another
* interrupt can happen on the same source even before the first one
- * is handled by the assosiacted event handler. If this happens it
+ * is handled by the associated event handler. If this happens it
* might be necessary to disable (mask) the interrupt depending on the
* controller hardware. This requires to reenable the interrupt inside
* of the loop which handles the interrupts which have arrived while
@@ -455,38 +537,30 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- const unsigned int cpu = smp_processor_id();
-
- spin_lock(&desc->lock);
-
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+ raw_spin_lock(&desc->lock);
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If we're currently running this IRQ, or its disabled,
* we shouldn't process the IRQ. Mark it pending, handle
* the necessary masking and go out
*/
- if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
- !desc->action)) {
- desc->status |= (IRQ_PENDING | IRQ_MASKED);
- mask_ack_irq(desc, irq);
- goto out_unlock;
+ if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
+ irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
+ if (!irq_check_poll(desc)) {
+ desc->istate |= IRQS_PENDING;
+ mask_ack_irq(desc);
+ goto out_unlock;
+ }
}
-
- kstat_cpu(cpu).irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
- desc->chip->ack(irq);
-
- /* Mark the IRQ currently in progress.*/
- desc->status |= IRQ_INPROGRESS;
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
do {
- struct irqaction *action = desc->action;
- irqreturn_t action_ret;
-
- if (unlikely(!action)) {
- desc->chip->mask(irq);
+ if (unlikely(!desc->action)) {
+ mask_irq(desc);
goto out_unlock;
}
@@ -495,29 +569,69 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
* one, we could have masked the irq.
* Renable it, if it was not disabled in meantime.
*/
- if (unlikely((desc->status &
- (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
- (IRQ_PENDING | IRQ_MASKED))) {
- desc->chip->unmask(irq);
- desc->status &= ~IRQ_MASKED;
+ if (unlikely(desc->istate & IRQS_PENDING)) {
+ if (!irqd_irq_disabled(&desc->irq_data) &&
+ irqd_irq_masked(&desc->irq_data))
+ unmask_irq(desc);
}
- desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ handle_irq_event(desc);
- } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+ } while ((desc->istate & IRQS_PENDING) &&
+ !irqd_irq_disabled(&desc->irq_data));
- desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
+EXPORT_SYMBOL(handle_edge_irq);
+#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
- * handle_percpu_IRQ - Per CPU local irq handler
+ * handle_edge_eoi_irq - edge eoi type IRQ handler
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * Similar as the above handle_edge_irq, but using eoi and w/o the
+ * mask/unmask logic.
+ */
+void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ raw_spin_lock(&desc->lock);
+
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ /*
+ * If we're currently running this IRQ, or its disabled,
+ * we shouldn't process the IRQ. Mark it pending, handle
+ * the necessary masking and go out
+ */
+ if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
+ irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
+ if (!irq_check_poll(desc)) {
+ desc->istate |= IRQS_PENDING;
+ goto out_eoi;
+ }
+ }
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ do {
+ if (unlikely(!desc->action))
+ goto out_eoi;
+
+ handle_irq_event(desc);
+
+ } while ((desc->istate & IRQS_PENDING) &&
+ !irqd_irq_disabled(&desc->irq_data));
+
+out_eoi:
+ chip->irq_eoi(&desc->irq_data);
+ raw_spin_unlock(&desc->lock);
+}
+#endif
+
+/**
+ * handle_percpu_irq - Per CPU local irq handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
@@ -526,120 +640,180 @@ out_unlock:
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
- irqreturn_t action_ret;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
- kstat_this_cpu.irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, desc);
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
- action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ handle_irq_event_percpu(desc, desc->action);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+/**
+ * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * Per CPU interrupts on SMP machines without locking requirements. Same as
+ * handle_percpu_irq() above but with the following extras:
+ *
+ * action->percpu_dev_id is a pointer to percpu variables which
+ * contain the real device id for the cpu on which this handler is
+ * called
+ */
+void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irqaction *action = desc->action;
+ void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
+ irqreturn_t res;
+
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
- if (desc->chip->eoi)
- desc->chip->eoi(irq);
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, dev_id);
+ trace_irq_handler_exit(irq, action, res);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
}
void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
- struct irq_desc *desc;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
- if (irq >= NR_IRQS) {
- printk(KERN_ERR
- "Trying to install type control for IRQ%d\n", irq);
+ if (!desc)
return;
- }
-
- desc = irq_desc + irq;
- if (!handle)
+ if (!handle) {
handle = handle_bad_irq;
- else if (desc->chip == &no_irq_chip) {
- printk(KERN_WARNING "Trying to install %sinterrupt handler "
- "for IRQ%d\n", is_chained ? "chained " : "", irq);
- /*
- * Some ARM implementations install a handler for really dumb
- * interrupt hardware without setting an irq_chip. This worked
- * with the ARM no_irq_chip but the check in setup_irq would
- * prevent us to setup the interrupt at all. Switch it to
- * dummy_irq_chip for easy transition.
- */
- desc->chip = &dummy_irq_chip;
+ } else {
+ if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
+ goto out;
}
- spin_lock_irqsave(&desc->lock, flags);
-
/* Uninstall? */
if (handle == handle_bad_irq) {
- if (desc->chip != &no_irq_chip)
- mask_ack_irq(desc, irq);
- desc->status |= IRQ_DISABLED;
+ if (desc->irq_data.chip != &no_irq_chip)
+ mask_ack_irq(desc);
+ irq_state_set_disabled(desc);
desc->depth = 1;
}
desc->handle_irq = handle;
desc->name = name;
if (handle != handle_bad_irq && is_chained) {
- desc->status &= ~IRQ_DISABLED;
- desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
- desc->depth = 0;
- desc->chip->unmask(irq);
+ irq_settings_set_noprobe(desc);
+ irq_settings_set_norequest(desc);
+ irq_settings_set_nothread(desc);
+ irq_startup(desc, true);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+out:
+ irq_put_desc_busunlock(desc, flags);
}
+EXPORT_SYMBOL_GPL(__irq_set_handler);
void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle)
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle, const char *name)
{
- set_irq_chip(irq, chip);
- __set_irq_handler(irq, handle, 0, NULL);
+ irq_set_chip(irq, chip);
+ __irq_set_handler(irq, handle, 0, name);
}
+EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
-void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle, const char *name)
+void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
- set_irq_chip(irq, chip);
- __set_irq_handler(irq, handle, 0, name);
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+
+ if (!desc)
+ return;
+ irq_settings_clr_and_set(desc, clr, set);
+
+ irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
+ IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+ if (irq_settings_has_no_balance_set(desc))
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ if (irq_settings_is_per_cpu(desc))
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+ if (irq_settings_can_move_pcntxt(desc))
+ irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
+ if (irq_settings_is_level(desc))
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
+
+ irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
+
+ irq_put_desc_unlock(desc, flags);
}
+EXPORT_SYMBOL_GPL(irq_modify_status);
-void __init set_irq_noprobe(unsigned int irq)
+/**
+ * irq_cpu_online - Invoke all irq_cpu_online functions.
+ *
+ * Iterate through all irqs and invoke the chip.irq_cpu_online()
+ * for each.
+ */
+void irq_cpu_online(void)
{
struct irq_desc *desc;
+ struct irq_chip *chip;
unsigned long flags;
+ unsigned int irq;
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
+ for_each_active_irq(irq) {
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
- return;
- }
+ raw_spin_lock_irqsave(&desc->lock, flags);
- desc = irq_desc + irq;
+ chip = irq_data_get_irq_chip(&desc->irq_data);
+ if (chip && chip->irq_cpu_online &&
+ (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
+ !irqd_irq_disabled(&desc->irq_data)))
+ chip->irq_cpu_online(&desc->irq_data);
- spin_lock_irqsave(&desc->lock, flags);
- desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
}
-void __init set_irq_probe(unsigned int irq)
+/**
+ * irq_cpu_offline - Invoke all irq_cpu_offline functions.
+ *
+ * Iterate through all irqs and invoke the chip.irq_cpu_offline()
+ * for each.
+ */
+void irq_cpu_offline(void)
{
struct irq_desc *desc;
+ struct irq_chip *chip;
unsigned long flags;
+ unsigned int irq;
- if (irq >= NR_IRQS) {
- printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
+ for_each_active_irq(irq) {
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
- return;
- }
+ raw_spin_lock_irqsave(&desc->lock, flags);
- desc = irq_desc + irq;
+ chip = irq_data_get_irq_chip(&desc->irq_data);
+ if (chip && chip->irq_cpu_offline &&
+ (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
+ !irqd_irq_disabled(&desc->irq_data)))
+ chip->irq_cpu_offline(&desc->irq_data);
- spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
}
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
new file mode 100644
index 00000000000..e75e29e4434
--- /dev/null
+++ b/kernel/irq/debug.h
@@ -0,0 +1,45 @@
+/*
+ * Debugging printout:
+ */
+
+#include <linux/kallsyms.h>
+
+#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
+#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
+/* FIXME */
+#define ___PD(f) do { } while (0)
+
+static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+ irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ printk("->handle_irq(): %p, ", desc->handle_irq);
+ print_symbol("%s\n", (unsigned long)desc->handle_irq);
+ printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
+ print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
+ printk("->action(): %p\n", desc->action);
+ if (desc->action) {
+ printk("->action->handler(): %p, ", desc->action->handler);
+ print_symbol("%s\n", (unsigned long)desc->action->handler);
+ }
+
+ ___P(IRQ_LEVEL);
+ ___P(IRQ_PER_CPU);
+ ___P(IRQ_NOPROBE);
+ ___P(IRQ_NOREQUEST);
+ ___P(IRQ_NOTHREAD);
+ ___P(IRQ_NOAUTOEN);
+
+ ___PS(IRQS_AUTODETECT);
+ ___PS(IRQS_REPLAY);
+ ___PS(IRQS_WAITING);
+ ___PS(IRQS_PENDING);
+
+ ___PD(IRQS_INPROGRESS);
+ ___PD(IRQS_DISABLED);
+ ___PD(IRQS_MASKED);
+}
+
+#undef ___P
+#undef ___PS
+#undef ___PD
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 6d9204f3a37..1ef0606797c 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -1,6 +1,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/gfp.h>
/*
* Device resource management aware IRQ request/free implementation.
@@ -25,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
}
/**
- * devm_request_irq - allocate an interrupt line for a managed device
+ * devm_request_threaded_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -39,11 +42,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
* automatically freed on driver detach.
*
* If an IRQ allocated with this function needs to be freed
- * separately, dev_free_irq() must be used.
+ * separately, devm_free_irq() must be used.
*/
-int devm_request_irq(struct device *dev, unsigned int irq,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id)
+int devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id)
{
struct irq_devres *dr;
int rc;
@@ -53,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq,
if (!dr)
return -ENOMEM;
- rc = request_irq(irq, handler, irqflags, devname, dev_id);
+ rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
+ dev_id);
if (rc) {
devres_free(dr);
return rc;
@@ -65,7 +70,52 @@ int devm_request_irq(struct device *dev, unsigned int irq,
return 0;
}
-EXPORT_SYMBOL(devm_request_irq);
+EXPORT_SYMBOL(devm_request_threaded_irq);
+
+/**
+ * devm_request_any_context_irq - allocate an interrupt line for a managed device
+ * @dev: device to request interrupt for
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as
+ * request_any_context_irq(). IRQs requested with this function will be
+ * automatically freed on driver detach.
+ *
+ * If an IRQ allocated with this function needs to be freed
+ * separately, devm_free_irq() must be used.
+ */
+int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ struct irq_devres *dr;
+ int rc;
+
+ dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
+ GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ dr->irq = irq;
+ dr->dev_id = dev_id;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_request_any_context_irq);
/**
* devm_free_irq - free an interrupt
@@ -76,14 +126,14 @@ EXPORT_SYMBOL(devm_request_irq);
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as free_irq().
* This function instead of free_irq() should be used to manually
- * free IRQs allocated with dev_request_irq().
+ * free IRQs allocated with devm_request_irq().
*/
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
{
struct irq_devres match_data = { irq, dev_id };
- free_irq(irq, dev_id);
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
&match_data));
+ free_irq(irq, dev_id);
}
EXPORT_SYMBOL(devm_free_irq);
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
new file mode 100644
index 00000000000..988dc58e884
--- /dev/null
+++ b/kernel/irq/dummychip.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
+ *
+ * This file contains the dummy interrupt chip implementation
+ */
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/export.h>
+
+#include "internals.h"
+
+/*
+ * What should we do if we get a hw irq event on an illegal vector?
+ * Each architecture has to answer this themself.
+ */
+static void ack_bad(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ print_irq_desc(data->irq, desc);
+ ack_bad_irq(data->irq);
+}
+
+/*
+ * NOP functions
+ */
+static void noop(struct irq_data *data) { }
+
+static unsigned int noop_ret(struct irq_data *data)
+{
+ return 0;
+}
+
+/*
+ * Generic no controller implementation
+ */
+struct irq_chip no_irq_chip = {
+ .name = "none",
+ .irq_startup = noop_ret,
+ .irq_shutdown = noop,
+ .irq_enable = noop,
+ .irq_disable = noop,
+ .irq_ack = ack_bad,
+};
+
+/*
+ * Generic dummy implementation which can be used for
+ * real dumb interrupt sources
+ */
+struct irq_chip dummy_irq_chip = {
+ .name = "dummy",
+ .irq_startup = noop_ret,
+ .irq_shutdown = noop,
+ .irq_enable = noop,
+ .irq_disable = noop,
+ .irq_ack = noop,
+ .irq_mask = noop,
+ .irq_unmask = noop,
+};
+EXPORT_SYMBOL_GPL(dummy_irq_chip);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
new file mode 100644
index 00000000000..452d6f2ba21
--- /dev/null
+++ b/kernel/irq/generic-chip.c
@@ -0,0 +1,591 @@
+/*
+ * Library implementing the most common irq chip callback functions
+ *
+ * Copyright (C) 2011, Thomas Gleixner
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/syscore_ops.h>
+
+#include "internals.h"
+
+static LIST_HEAD(gc_list);
+static DEFINE_RAW_SPINLOCK(gc_lock);
+
+/**
+ * irq_gc_noop - NOOP function
+ * @d: irq_data
+ */
+void irq_gc_noop(struct irq_data *d)
+{
+}
+
+/**
+ * irq_gc_mask_disable_reg - Mask chip via disable register
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register.
+ */
+void irq_gc_mask_disable_reg(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
+ *ct->mask_cache &= ~mask;
+ irq_gc_unlock(gc);
+}
+
+/**
+ * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
+ * @d: irq_data
+ *
+ * Chip has a single mask register. Values of this register are cached
+ * and protected by gc->lock
+ */
+void irq_gc_mask_set_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ *ct->mask_cache |= mask;
+ irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
+
+/**
+ * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
+ * @d: irq_data
+ *
+ * Chip has a single mask register. Values of this register are cached
+ * and protected by gc->lock
+ */
+void irq_gc_mask_clr_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
+
+/**
+ * irq_gc_unmask_enable_reg - Unmask chip via enable register
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register.
+ */
+void irq_gc_unmask_enable_reg(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
+ *ct->mask_cache |= mask;
+ irq_gc_unlock(gc);
+}
+
+/**
+ * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
+ * @d: irq_data
+ */
+void irq_gc_ack_set_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
+
+/**
+ * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
+ * @d: irq_data
+ */
+void irq_gc_ack_clr_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = ~d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
+/**
+ * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ */
+void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
+/**
+ * irq_gc_eoi - EOI interrupt
+ * @d: irq_data
+ */
+void irq_gc_eoi(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
+ irq_gc_unlock(gc);
+}
+
+/**
+ * irq_gc_set_wake - Set/clr wake bit for an interrupt
+ * @d: irq_data
+ * @on: Indicates whether the wake bit should be set or cleared
+ *
+ * For chips where the wake from suspend functionality is not
+ * configured in a separate register and the wakeup active state is
+ * just stored in a bitmask.
+ */
+int irq_gc_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 mask = d->mask;
+
+ if (!(mask & gc->wake_enabled))
+ return -EINVAL;
+
+ irq_gc_lock(gc);
+ if (on)
+ gc->wake_active |= mask;
+ else
+ gc->wake_active &= ~mask;
+ irq_gc_unlock(gc);
+ return 0;
+}
+
+static void
+irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
+ int num_ct, unsigned int irq_base,
+ void __iomem *reg_base, irq_flow_handler_t handler)
+{
+ raw_spin_lock_init(&gc->lock);
+ gc->num_ct = num_ct;
+ gc->irq_base = irq_base;
+ gc->reg_base = reg_base;
+ gc->chip_types->chip.name = name;
+ gc->chip_types->handler = handler;
+}
+
+/**
+ * irq_alloc_generic_chip - Allocate a generic chip and initialize it
+ * @name: Name of the irq chip
+ * @num_ct: Number of irq_chip_type instances associated with this
+ * @irq_base: Interrupt base nr for this chip
+ * @reg_base: Register base address (virtual)
+ * @handler: Default flow handler associated with this chip
+ *
+ * Returns an initialized irq_chip_generic structure. The chip defaults
+ * to the primary (index 0) irq_chip_type and @handler
+ */
+struct irq_chip_generic *
+irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
+ void __iomem *reg_base, irq_flow_handler_t handler)
+{
+ struct irq_chip_generic *gc;
+ unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+
+ gc = kzalloc(sz, GFP_KERNEL);
+ if (gc) {
+ irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
+ handler);
+ }
+ return gc;
+}
+EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
+
+static void
+irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
+{
+ struct irq_chip_type *ct = gc->chip_types;
+ u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
+ int i;
+
+ for (i = 0; i < gc->num_ct; i++) {
+ if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
+ mskptr = &ct[i].mask_cache_priv;
+ mskreg = ct[i].regs.mask;
+ }
+ ct[i].mask_cache = mskptr;
+ if (flags & IRQ_GC_INIT_MASK_CACHE)
+ *mskptr = irq_reg_readl(gc->reg_base + mskreg);
+ }
+}
+
+/**
+ * irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
+ * @d: irq domain for which to allocate chips
+ * @irqs_per_chip: Number of interrupts each chip handles
+ * @num_ct: Number of irq_chip_type instances associated with this
+ * @name: Name of the irq chip
+ * @handler: Default flow handler associated with these chips
+ * @clr: IRQ_* bits to clear in the mapping function
+ * @set: IRQ_* bits to set in the mapping function
+ * @gcflags: Generic chip specific setup flags
+ */
+int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
+ int num_ct, const char *name,
+ irq_flow_handler_t handler,
+ unsigned int clr, unsigned int set,
+ enum irq_gc_flags gcflags)
+{
+ struct irq_domain_chip_generic *dgc;
+ struct irq_chip_generic *gc;
+ int numchips, sz, i;
+ unsigned long flags;
+ void *tmp;
+
+ if (d->gc)
+ return -EBUSY;
+
+ numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
+ if (!numchips)
+ return -EINVAL;
+
+ /* Allocate a pointer, generic chip and chiptypes for each chip */
+ sz = sizeof(*dgc) + numchips * sizeof(gc);
+ sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
+
+ tmp = dgc = kzalloc(sz, GFP_KERNEL);
+ if (!dgc)
+ return -ENOMEM;
+ dgc->irqs_per_chip = irqs_per_chip;
+ dgc->num_chips = numchips;
+ dgc->irq_flags_to_set = set;
+ dgc->irq_flags_to_clear = clr;
+ dgc->gc_flags = gcflags;
+ d->gc = dgc;
+
+ /* Calc pointer to the first generic chip */
+ tmp += sizeof(*dgc) + numchips * sizeof(gc);
+ for (i = 0; i < numchips; i++) {
+ /* Store the pointer to the generic chip */
+ dgc->gc[i] = gc = tmp;
+ irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
+ NULL, handler);
+ gc->domain = d;
+ raw_spin_lock_irqsave(&gc_lock, flags);
+ list_add_tail(&gc->list, &gc_list);
+ raw_spin_unlock_irqrestore(&gc_lock, flags);
+ /* Calc pointer to the next generic chip */
+ tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+ }
+ d->name = name;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
+
+/**
+ * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
+ * @d: irq domain pointer
+ * @hw_irq: Hardware interrupt number
+ */
+struct irq_chip_generic *
+irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
+{
+ struct irq_domain_chip_generic *dgc = d->gc;
+ int idx;
+
+ if (!dgc)
+ return NULL;
+ idx = hw_irq / dgc->irqs_per_chip;
+ if (idx >= dgc->num_chips)
+ return NULL;
+ return dgc->gc[idx];
+}
+EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
+
+/*
+ * Separate lockdep class for interrupt chip which can nest irq_desc
+ * lock.
+ */
+static struct lock_class_key irq_nested_lock_class;
+
+/*
+ * irq_map_generic_chip - Map a generic chip for an irq domain
+ */
+static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ struct irq_data *data = irq_get_irq_data(virq);
+ struct irq_domain_chip_generic *dgc = d->gc;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct irq_chip *chip;
+ unsigned long flags;
+ int idx;
+
+ if (!d->gc)
+ return -ENODEV;
+
+ idx = hw_irq / dgc->irqs_per_chip;
+ if (idx >= dgc->num_chips)
+ return -EINVAL;
+ gc = dgc->gc[idx];
+
+ idx = hw_irq % dgc->irqs_per_chip;
+
+ if (test_bit(idx, &gc->unused))
+ return -ENOTSUPP;
+
+ if (test_bit(idx, &gc->installed))
+ return -EBUSY;
+
+ ct = gc->chip_types;
+ chip = &ct->chip;
+
+ /* We only init the cache for the first mapping of a generic chip */
+ if (!gc->installed) {
+ raw_spin_lock_irqsave(&gc->lock, flags);
+ irq_gc_init_mask_cache(gc, dgc->gc_flags);
+ raw_spin_unlock_irqrestore(&gc->lock, flags);
+ }
+
+ /* Mark the interrupt as installed */
+ set_bit(idx, &gc->installed);
+
+ if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
+ irq_set_lockdep_class(virq, &irq_nested_lock_class);
+
+ if (chip->irq_calc_mask)
+ chip->irq_calc_mask(data);
+ else
+ data->mask = 1 << idx;
+
+ irq_set_chip_and_handler(virq, chip, ct->handler);
+ irq_set_chip_data(virq, gc);
+ irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
+ return 0;
+}
+
+struct irq_domain_ops irq_generic_chip_ops = {
+ .map = irq_map_generic_chip,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
+
+/**
+ * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
+ * @gc: Generic irq chip holding all data
+ * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
+ * @flags: Flags for initialization
+ * @clr: IRQ_* bits to clear
+ * @set: IRQ_* bits to set
+ *
+ * Set up max. 32 interrupts starting from gc->irq_base. Note, this
+ * initializes all interrupts to the primary irq_chip_type and its
+ * associated handler.
+ */
+void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ enum irq_gc_flags flags, unsigned int clr,
+ unsigned int set)
+{
+ struct irq_chip_type *ct = gc->chip_types;
+ struct irq_chip *chip = &ct->chip;
+ unsigned int i;
+
+ raw_spin_lock(&gc_lock);
+ list_add_tail(&gc->list, &gc_list);
+ raw_spin_unlock(&gc_lock);
+
+ irq_gc_init_mask_cache(gc, flags);
+
+ for (i = gc->irq_base; msk; msk >>= 1, i++) {
+ if (!(msk & 0x01))
+ continue;
+
+ if (flags & IRQ_GC_INIT_NESTED_LOCK)
+ irq_set_lockdep_class(i, &irq_nested_lock_class);
+
+ if (!(flags & IRQ_GC_NO_MASK)) {
+ struct irq_data *d = irq_get_irq_data(i);
+
+ if (chip->irq_calc_mask)
+ chip->irq_calc_mask(d);
+ else
+ d->mask = 1 << (i - gc->irq_base);
+ }
+ irq_set_chip_and_handler(i, chip, ct->handler);
+ irq_set_chip_data(i, gc);
+ irq_modify_status(i, clr, set);
+ }
+ gc->irq_cnt = i - gc->irq_base;
+}
+EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
+
+/**
+ * irq_setup_alt_chip - Switch to alternative chip
+ * @d: irq_data for this interrupt
+ * @type: Flow type to be initialized
+ *
+ * Only to be called from chip->irq_set_type() callbacks.
+ */
+int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = gc->chip_types;
+ unsigned int i;
+
+ for (i = 0; i < gc->num_ct; i++, ct++) {
+ if (ct->type & type) {
+ d->chip = &ct->chip;
+ irq_data_to_desc(d)->handle_irq = ct->handler;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+
+/**
+ * irq_remove_generic_chip - Remove a chip
+ * @gc: Generic irq chip holding all data
+ * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
+ * @clr: IRQ_* bits to clear
+ * @set: IRQ_* bits to set
+ *
+ * Remove up to 32 interrupts starting from gc->irq_base.
+ */
+void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set)
+{
+ unsigned int i = gc->irq_base;
+
+ raw_spin_lock(&gc_lock);
+ list_del(&gc->list);
+ raw_spin_unlock(&gc_lock);
+
+ for (; msk; msk >>= 1, i++) {
+ if (!(msk & 0x01))
+ continue;
+
+ /* Remove handler first. That will mask the irq line */
+ irq_set_handler(i, NULL);
+ irq_set_chip(i, &no_irq_chip);
+ irq_set_chip_data(i, NULL);
+ irq_modify_status(i, clr, set);
+ }
+}
+EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+
+static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
+{
+ unsigned int virq;
+
+ if (!gc->domain)
+ return irq_get_irq_data(gc->irq_base);
+
+ /*
+ * We don't know which of the irqs has been actually
+ * installed. Use the first one.
+ */
+ if (!gc->installed)
+ return NULL;
+
+ virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
+ return virq ? irq_get_irq_data(virq) : NULL;
+}
+
+#ifdef CONFIG_PM
+static int irq_gc_suspend(void)
+{
+ struct irq_chip_generic *gc;
+
+ list_for_each_entry(gc, &gc_list, list) {
+ struct irq_chip_type *ct = gc->chip_types;
+
+ if (ct->chip.irq_suspend) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_suspend(data);
+ }
+ }
+ return 0;
+}
+
+static void irq_gc_resume(void)
+{
+ struct irq_chip_generic *gc;
+
+ list_for_each_entry(gc, &gc_list, list) {
+ struct irq_chip_type *ct = gc->chip_types;
+
+ if (ct->chip.irq_resume) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_resume(data);
+ }
+ }
+}
+#else
+#define irq_gc_suspend NULL
+#define irq_gc_resume NULL
+#endif
+
+static void irq_gc_shutdown(void)
+{
+ struct irq_chip_generic *gc;
+
+ list_for_each_entry(gc, &gc_list, list) {
+ struct irq_chip_type *ct = gc->chip_types;
+
+ if (ct->chip.irq_pm_shutdown) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_pm_shutdown(data);
+ }
+ }
+}
+
+static struct syscore_ops irq_gc_syscore_ops = {
+ .suspend = irq_gc_suspend,
+ .resume = irq_gc_resume,
+ .shutdown = irq_gc_shutdown,
+};
+
+static int __init irq_gc_init_ops(void)
+{
+ register_syscore_ops(&irq_gc_syscore_ops);
+ return 0;
+}
+device_initcall(irq_gc_init_ops);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 5fa6198e913..63548027085 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,11 +11,13 @@
*/
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/random.h>
+#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <trace/events/irq.h>
+
#include "internals.h"
/**
@@ -25,250 +27,171 @@
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
-void
-handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
- kstat_this_cpu.irqs[irq]++;
- ack_bad_irq(irq);
-}
-
-/*
- * Linux has a controller-independent interrupt architecture.
- * Every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the appropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * The code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic or
- * having to touch the generic code.
- *
- * Controller mappings for all interrupt sources:
- */
-struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
- [0 ... NR_IRQS-1] = {
- .status = IRQ_DISABLED,
- .chip = &no_irq_chip,
- .handle_irq = handle_bad_irq,
- .depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
- }
-};
-
-/*
- * What should we do if we get a hw irq event on an illegal vector?
- * Each architecture has to answer this themself.
- */
-static void ack_bad(unsigned int irq)
-{
- print_irq_desc(irq, irq_desc + irq);
+ kstat_incr_irqs_this_cpu(irq, desc);
ack_bad_irq(irq);
}
/*
- * NOP functions
- */
-static void noop(unsigned int irq)
-{
-}
-
-static unsigned int noop_ret(unsigned int irq)
-{
- return 0;
-}
-
-/*
- * Generic no controller implementation
- */
-struct irq_chip no_irq_chip = {
- .name = "none",
- .startup = noop_ret,
- .shutdown = noop,
- .enable = noop,
- .disable = noop,
- .ack = ack_bad,
- .end = noop,
-};
-
-/*
- * Generic dummy implementation which can be used for
- * real dumb interrupt sources
- */
-struct irq_chip dummy_irq_chip = {
- .name = "dummy",
- .startup = noop_ret,
- .shutdown = noop,
- .enable = noop,
- .disable = noop,
- .ack = noop,
- .mask = noop,
- .unmask = noop,
- .end = noop,
-};
-
-/*
* Special, empty irq handler:
*/
irqreturn_t no_action(int cpl, void *dev_id)
{
return IRQ_NONE;
}
+EXPORT_SYMBOL_GPL(no_action);
-/**
- * handle_IRQ_event - irq action chain handler
- * @irq: the interrupt number
- * @action: the interrupt action chain for this irq
- *
- * Handles the action chain of an irq event
- */
-irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
+static void warn_no_thread(unsigned int irq, struct irqaction *action)
{
- irqreturn_t ret, retval = IRQ_NONE;
- unsigned int status = 0;
-
- handle_dynamic_tick(action);
-
- if (!(action->flags & IRQF_DISABLED))
- local_irq_enable_in_hardirq();
+ if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
+ return;
- do {
- ret = action->handler(irq, action->dev_id);
- if (ret == IRQ_HANDLED)
- status |= action->flags;
- retval |= ret;
- action = action->next;
- } while (action);
-
- if (status & IRQF_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
- local_irq_disable();
-
- return retval;
+ printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
+ "but no thread function available.", irq, action->name);
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-/**
- * __do_IRQ - original all in one highlevel IRQ handler
- * @irq: the interrupt number
- *
- * __do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
- * This is the original x86 implementation which is used for every
- * interrupt type.
- */
-unsigned int __do_IRQ(unsigned int irq)
+void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
- struct irq_desc *desc = irq_desc + irq;
- struct irqaction *action;
- unsigned int status;
-
- kstat_this_cpu.irqs[irq]++;
- if (CHECK_IRQ_PER_CPU(desc->status)) {
- irqreturn_t action_ret;
-
- /*
- * No locking required for CPU-local interrupts:
- */
- if (desc->chip->ack)
- desc->chip->ack(irq);
- if (likely(!(desc->status & IRQ_DISABLED))) {
- action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
- }
- desc->chip->end(irq);
- return 1;
- }
-
- spin_lock(&desc->lock);
- if (desc->chip->ack)
- desc->chip->ack(irq);
/*
- * REPLAY is when Linux resends an IRQ that was dropped earlier
- * WAITING is used by probe to mark irqs that are being tested
+ * In case the thread crashed and was killed we just pretend that
+ * we handled the interrupt. The hardirq handler has disabled the
+ * device interrupt, so no irq storm is lurking.
*/
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
- status |= IRQ_PENDING; /* we _want_ to handle it */
+ if (action->thread->flags & PF_EXITING)
+ return;
/*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
+ * Wake up the handler thread for this action. If the
+ * RUNTHREAD bit is already set, nothing to do.
*/
- action = NULL;
- if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
+ if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ return;
/*
- * If there is no IRQ handler or it was disabled, exit early.
- * Since we set PENDING, if another processor is handling
- * a different instance of this same irq, the other processor
- * will take care of it.
+ * It's safe to OR the mask lockless here. We have only two
+ * places which write to threads_oneshot: This code and the
+ * irq thread.
+ *
+ * This code is the hard irq context and can never run on two
+ * cpus in parallel. If it ever does we have more serious
+ * problems than this bitmask.
+ *
+ * The irq threads of this irq which clear their "running" bit
+ * in threads_oneshot are serialized via desc->lock against
+ * each other and they are serialized against this code by
+ * IRQS_INPROGRESS.
+ *
+ * Hard irq handler:
+ *
+ * spin_lock(desc->lock);
+ * desc->state |= IRQS_INPROGRESS;
+ * spin_unlock(desc->lock);
+ * set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+ * desc->threads_oneshot |= mask;
+ * spin_lock(desc->lock);
+ * desc->state &= ~IRQS_INPROGRESS;
+ * spin_unlock(desc->lock);
+ *
+ * irq thread:
+ *
+ * again:
+ * spin_lock(desc->lock);
+ * if (desc->state & IRQS_INPROGRESS) {
+ * spin_unlock(desc->lock);
+ * while(desc->state & IRQS_INPROGRESS)
+ * cpu_relax();
+ * goto again;
+ * }
+ * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ * desc->threads_oneshot &= ~mask;
+ * spin_unlock(desc->lock);
+ *
+ * So either the thread waits for us to clear IRQS_INPROGRESS
+ * or we are waiting in the flow handler for desc->lock to be
+ * released before we reach this point. The thread also checks
+ * IRQTF_RUNTHREAD under desc->lock. If set it leaves
+ * threads_oneshot untouched and runs the thread another time.
*/
- if (unlikely(!action))
- goto out;
+ desc->threads_oneshot |= action->thread_mask;
/*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
+ * We increment the threads_active counter in case we wake up
+ * the irq thread. The irq thread decrements the counter when
+ * it returns from the handler or in the exit path and wakes
+ * up waiters which are stuck in synchronize_irq() when the
+ * active count becomes zero. synchronize_irq() is serialized
+ * against this code (hard irq handler) via IRQS_INPROGRESS
+ * like the finalize_oneshot() code. See comment above.
*/
- for (;;) {
- irqreturn_t action_ret;
+ atomic_inc(&desc->threads_active);
- spin_unlock(&desc->lock);
+ wake_up_process(action->thread);
+}
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+irqreturn_t
+handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+{
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
- spin_lock(&desc->lock);
- if (likely(!(desc->status & IRQ_PENDING)))
+ do {
+ irqreturn_t res;
+
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, action->dev_id);
+ trace_irq_handler_exit(irq, action, res);
+
+ if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
+ irq, action->handler))
+ local_irq_disable();
+
+ switch (res) {
+ case IRQ_WAKE_THREAD:
+ /*
+ * Catch drivers which return WAKE_THREAD but
+ * did not set up a thread function
+ */
+ if (unlikely(!action->thread_fn)) {
+ warn_no_thread(irq, action);
+ break;
+ }
+
+ __irq_wake_thread(desc, action);
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
+ flags |= action->flags;
break;
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
-out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ default:
+ break;
+ }
- return 1;
-}
-#endif
+ retval |= res;
+ action = action->next;
+ } while (action);
-#ifdef CONFIG_TRACE_IRQFLAGS
+ add_interrupt_randomness(irq, flags);
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-static struct lock_class_key irq_desc_lock_class;
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+ return retval;
+}
-void early_init_irq_lock_class(void)
+irqreturn_t handle_irq_event(struct irq_desc *desc)
{
- int i;
+ struct irqaction *action = desc->action;
+ irqreturn_t ret;
- for (i = 0; i < NR_IRQS; i++)
- lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
-}
+ desc->istate &= ~IRQS_PENDING;
+ irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ raw_spin_unlock(&desc->lock);
+
+ ret = handle_irq_event_percpu(desc, action);
-#endif
+ raw_spin_lock(&desc->lock);
+ irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ return ret;
+}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 08a849a2244..099ea2e0eb8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -1,64 +1,196 @@
/*
* IRQ subsystem internal functions and variables:
+ *
+ * Do not ever include this file from anything else than
+ * kernel/irq/. Do not even think about using any information outside
+ * of this file for your non core code.
*/
+#include <linux/irqdesc.h>
+#include <linux/kernel_stat.h>
-extern int noirqdebug;
+#ifdef CONFIG_SPARSE_IRQ
+# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
+#else
+# define IRQ_BITMAP_BITS NR_IRQS
+#endif
+
+#define istate core_internal_state__do_not_mess_with_it
+
+extern bool noirqdebug;
+
+/*
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY - irq thread is requested to adjust affinity
+ * IRQTF_FORCED_THREAD - irq action is force threaded
+ */
+enum {
+ IRQTF_RUNTHREAD,
+ IRQTF_WARNED,
+ IRQTF_AFFINITY,
+ IRQTF_FORCED_THREAD,
+};
-/* Set default functions for irq_chip structures: */
-extern void irq_chip_set_defaults(struct irq_chip *chip);
+/*
+ * Bit masks for desc->core_internal_state__do_not_mess_with_it
+ *
+ * IRQS_AUTODETECT - autodetection in progress
+ * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
+ * detection
+ * IRQS_POLL_INPROGRESS - polling in progress
+ * IRQS_ONESHOT - irq is not unmasked in primary handler
+ * IRQS_REPLAY - irq is replayed
+ * IRQS_WAITING - irq is waiting
+ * IRQS_PENDING - irq is pending and replayed later
+ * IRQS_SUSPENDED - irq is suspended
+ */
+enum {
+ IRQS_AUTODETECT = 0x00000001,
+ IRQS_SPURIOUS_DISABLED = 0x00000002,
+ IRQS_POLL_INPROGRESS = 0x00000008,
+ IRQS_ONESHOT = 0x00000020,
+ IRQS_REPLAY = 0x00000040,
+ IRQS_WAITING = 0x00000080,
+ IRQS_PENDING = 0x00000200,
+ IRQS_SUSPENDED = 0x00000800,
+};
+
+#include "debug.h"
+#include "settings.h"
+
+#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
-/* Set default handler: */
-extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
+extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
+ unsigned long flags);
+extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
+extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
+
+extern int irq_startup(struct irq_desc *desc, bool resend);
+extern void irq_shutdown(struct irq_desc *desc);
+extern void irq_enable(struct irq_desc *desc);
+extern void irq_disable(struct irq_desc *desc);
+extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
+extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+extern void unmask_threaded_irq(struct irq_desc *desc);
+
+#ifdef CONFIG_SPARSE_IRQ
+static inline void irq_mark_irq(unsigned int irq) { }
+#else
+extern void irq_mark_irq(unsigned int irq);
+#endif
+
+extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+irqreturn_t handle_irq_event(struct irq_desc *desc);
+
+/* Resending of interrupts :*/
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+bool irq_wait_for_poll(struct irq_desc *desc);
+void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
#ifdef CONFIG_PROC_FS
-extern void register_irq_proc(unsigned int irq);
+extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
+extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
#else
-static inline void register_irq_proc(unsigned int irq) { }
+static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
+static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
static inline void register_handler_proc(unsigned int irq,
struct irqaction *action) { }
static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
+extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
+
+extern void irq_set_thread_affinity(struct irq_desc *desc);
+
+extern int irq_do_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force);
+
+/* Inline functions for support of irq chips on slow busses */
+static inline void chip_bus_lock(struct irq_desc *desc)
+{
+ if (unlikely(desc->irq_data.chip->irq_bus_lock))
+ desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
+}
+
+static inline void chip_bus_sync_unlock(struct irq_desc *desc)
+{
+ if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
+ desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
+}
+
+#define _IRQ_DESC_CHECK (1 << 0)
+#define _IRQ_DESC_PERCPU (1 << 1)
+
+#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
+#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
+
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check);
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
+
+static inline struct irq_desc *
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
+{
+ return __irq_get_desc_lock(irq, flags, true, check);
+}
+
+static inline void
+irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
+{
+ __irq_put_desc_unlock(desc, flags, true);
+}
+
+static inline struct irq_desc *
+irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
+{
+ return __irq_get_desc_lock(irq, flags, false, check);
+}
+
+static inline void
+irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
+{
+ __irq_put_desc_unlock(desc, flags, false);
+}
+
/*
- * Debugging printout:
+ * Manipulation functions for irq_data.state
*/
+static inline void irqd_set_move_pending(struct irq_data *d)
+{
+ d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
+}
-#include <linux/kallsyms.h>
+static inline void irqd_clr_move_pending(struct irq_data *d)
+{
+ d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
+}
-#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+{
+ d->state_use_accessors &= ~mask;
+}
-static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
{
- printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
- irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
- printk("->handle_irq(): %p, ", desc->handle_irq);
- print_symbol("%s\n", (unsigned long)desc->handle_irq);
- printk("->chip(): %p, ", desc->chip);
- print_symbol("%s\n", (unsigned long)desc->chip);
- printk("->action(): %p\n", desc->action);
- if (desc->action) {
- printk("->action->handler(): %p, ", desc->action->handler);
- print_symbol("%s\n", (unsigned long)desc->action->handler);
- }
-
- P(IRQ_INPROGRESS);
- P(IRQ_DISABLED);
- P(IRQ_PENDING);
- P(IRQ_REPLAY);
- P(IRQ_AUTODETECT);
- P(IRQ_WAITING);
- P(IRQ_LEVEL);
- P(IRQ_MASKED);
-#ifdef CONFIG_IRQ_PER_CPU
- P(IRQ_PER_CPU);
-#endif
- P(IRQ_NOPROBE);
- P(IRQ_NOREQUEST);
- P(IRQ_NOAUTOEN);
+ d->state_use_accessors |= mask;
}
-#undef P
+static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
+{
+ return d->state_use_accessors & mask;
+}
+static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
+{
+ __this_cpu_inc(*desc->kstat_irqs);
+ __this_cpu_inc(kstat.irqs_sum);
+}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
new file mode 100644
index 00000000000..1487a123db5
--- /dev/null
+++ b/kernel/irq/irqdesc.c
@@ -0,0 +1,554 @@
+/*
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
+ *
+ * This file contains the interrupt descriptor management code
+ *
+ * Detailed information is available in Documentation/DocBook/genericirq
+ *
+ */
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/radix-tree.h>
+#include <linux/bitmap.h>
+
+#include "internals.h"
+
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+static struct lock_class_key irq_desc_lock_class;
+
+#if defined(CONFIG_SMP)
+static void __init init_irq_default_affinity(void)
+{
+ alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ cpumask_setall(irq_default_affinity);
+}
+#else
+static void __init init_irq_default_affinity(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
+{
+ if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
+ return -ENOMEM;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
+ free_cpumask_var(desc->irq_data.affinity);
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static void desc_smp_init(struct irq_desc *desc, int node)
+{
+ desc->irq_data.node = node;
+ cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
+}
+
+static inline int desc_node(struct irq_desc *desc)
+{
+ return desc->irq_data.node;
+}
+
+#else
+static inline int
+alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
+static inline void desc_smp_init(struct irq_desc *desc, int node) { }
+static inline int desc_node(struct irq_desc *desc) { return 0; }
+#endif
+
+static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
+ struct module *owner)
+{
+ int cpu;
+
+ desc->irq_data.irq = irq;
+ desc->irq_data.chip = &no_irq_chip;
+ desc->irq_data.chip_data = NULL;
+ desc->irq_data.handler_data = NULL;
+ desc->irq_data.msi_desc = NULL;
+ irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
+ irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
+ desc->handle_irq = handle_bad_irq;
+ desc->depth = 1;
+ desc->irq_count = 0;
+ desc->irqs_unhandled = 0;
+ desc->name = NULL;
+ desc->owner = owner;
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
+ desc_smp_init(desc, node);
+}
+
+int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL_GPL(nr_irqs);
+
+static DEFINE_MUTEX(sparse_irq_lock);
+static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
+
+#ifdef CONFIG_SPARSE_IRQ
+
+static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
+
+static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
+{
+ radix_tree_insert(&irq_desc_tree, irq, desc);
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return radix_tree_lookup(&irq_desc_tree, irq);
+}
+EXPORT_SYMBOL(irq_to_desc);
+
+static void delete_irq_desc(unsigned int irq)
+{
+ radix_tree_delete(&irq_desc_tree, irq);
+}
+
+#ifdef CONFIG_SMP
+static void free_masks(struct irq_desc *desc)
+{
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ free_cpumask_var(desc->pending_mask);
+#endif
+ free_cpumask_var(desc->irq_data.affinity);
+}
+#else
+static inline void free_masks(struct irq_desc *desc) { }
+#endif
+
+static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
+{
+ struct irq_desc *desc;
+ gfp_t gfp = GFP_KERNEL;
+
+ desc = kzalloc_node(sizeof(*desc), gfp, node);
+ if (!desc)
+ return NULL;
+ /* allocate based on nr_cpu_ids */
+ desc->kstat_irqs = alloc_percpu(unsigned int);
+ if (!desc->kstat_irqs)
+ goto err_desc;
+
+ if (alloc_masks(desc, gfp, node))
+ goto err_kstat;
+
+ raw_spin_lock_init(&desc->lock);
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+
+ desc_set_defaults(irq, desc, node, owner);
+
+ return desc;
+
+err_kstat:
+ free_percpu(desc->kstat_irqs);
+err_desc:
+ kfree(desc);
+ return NULL;
+}
+
+static void free_desc(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ unregister_irq_proc(irq, desc);
+
+ mutex_lock(&sparse_irq_lock);
+ delete_irq_desc(irq);
+ mutex_unlock(&sparse_irq_lock);
+
+ free_masks(desc);
+ free_percpu(desc->kstat_irqs);
+ kfree(desc);
+}
+
+static int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ struct module *owner)
+{
+ struct irq_desc *desc;
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ desc = alloc_desc(start + i, node, owner);
+ if (!desc)
+ goto err;
+ mutex_lock(&sparse_irq_lock);
+ irq_insert_desc(start + i, desc);
+ mutex_unlock(&sparse_irq_lock);
+ }
+ return start;
+
+err:
+ for (i--; i >= 0; i--)
+ free_desc(start + i);
+
+ mutex_lock(&sparse_irq_lock);
+ bitmap_clear(allocated_irqs, start, cnt);
+ mutex_unlock(&sparse_irq_lock);
+ return -ENOMEM;
+}
+
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+ if (nr > IRQ_BITMAP_BITS)
+ return -ENOMEM;
+ nr_irqs = nr;
+ return 0;
+}
+
+int __init early_irq_init(void)
+{
+ int i, initcnt, node = first_online_node;
+ struct irq_desc *desc;
+
+ init_irq_default_affinity();
+
+ /* Let arch update nr_irqs and return the nr of preallocated irqs */
+ initcnt = arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
+
+ if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
+ nr_irqs = IRQ_BITMAP_BITS;
+
+ if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
+ initcnt = IRQ_BITMAP_BITS;
+
+ if (initcnt > nr_irqs)
+ nr_irqs = initcnt;
+
+ for (i = 0; i < initcnt; i++) {
+ desc = alloc_desc(i, node, NULL);
+ set_bit(i, allocated_irqs);
+ irq_insert_desc(i, desc);
+ }
+ return arch_early_irq_init();
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
+
+struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
+ [0 ... NR_IRQS-1] = {
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ }
+};
+
+int __init early_irq_init(void)
+{
+ int count, i, node = first_online_node;
+ struct irq_desc *desc;
+
+ init_irq_default_affinity();
+
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
+ desc = irq_desc;
+ count = ARRAY_SIZE(irq_desc);
+
+ for (i = 0; i < count; i++) {
+ desc[i].kstat_irqs = alloc_percpu(unsigned int);
+ alloc_masks(&desc[i], GFP_KERNEL, node);
+ raw_spin_lock_init(&desc[i].lock);
+ lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+ desc_set_defaults(i, &desc[i], node, NULL);
+ }
+ return arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+EXPORT_SYMBOL(irq_to_desc);
+
+static void free_desc(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc_set_defaults(irq, desc, desc_node(desc), NULL);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ struct module *owner)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ struct irq_desc *desc = irq_to_desc(start + i);
+
+ desc->owner = owner;
+ }
+ return start;
+}
+
+static int irq_expand_nr_irqs(unsigned int nr)
+{
+ return -ENOMEM;
+}
+
+void irq_mark_irq(unsigned int irq)
+{
+ mutex_lock(&sparse_irq_lock);
+ bitmap_set(allocated_irqs, irq, 1);
+ mutex_unlock(&sparse_irq_lock);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_LEGACY
+void irq_init_desc(unsigned int irq)
+{
+ free_desc(irq);
+}
+#endif
+
+#endif /* !CONFIG_SPARSE_IRQ */
+
+/**
+ * generic_handle_irq - Invoke the handler for a particular irq
+ * @irq: The irq number to handle
+ *
+ */
+int generic_handle_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc)
+ return -EINVAL;
+ generic_handle_irq_desc(irq, desc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(generic_handle_irq);
+
+/* Dynamic interrupt handling */
+
+/**
+ * irq_free_descs - free irq descriptors
+ * @from: Start of descriptor range
+ * @cnt: Number of consecutive irqs to free
+ */
+void irq_free_descs(unsigned int from, unsigned int cnt)
+{
+ int i;
+
+ if (from >= nr_irqs || (from + cnt) > nr_irqs)
+ return;
+
+ for (i = 0; i < cnt; i++)
+ free_desc(from + i);
+
+ mutex_lock(&sparse_irq_lock);
+ bitmap_clear(allocated_irqs, from, cnt);
+ mutex_unlock(&sparse_irq_lock);
+}
+EXPORT_SYMBOL_GPL(irq_free_descs);
+
+/**
+ * irq_alloc_descs - allocate and initialize a range of irq descriptors
+ * @irq: Allocate for specific irq number if irq >= 0
+ * @from: Start the search from this irq number
+ * @cnt: Number of consecutive irqs to allocate.
+ * @node: Preferred node on which the irq descriptor should be allocated
+ * @owner: Owning module (can be NULL)
+ *
+ * Returns the first irq number or error code
+ */
+int __ref
+__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
+ struct module *owner)
+{
+ int start, ret;
+
+ if (!cnt)
+ return -EINVAL;
+
+ if (irq >= 0) {
+ if (from > irq)
+ return -EINVAL;
+ from = irq;
+ } else {
+ /*
+ * For interrupts which are freely allocated the
+ * architecture can force a lower bound to the @from
+ * argument. x86 uses this to exclude the GSI space.
+ */
+ from = arch_dynirq_lower_bound(from);
+ }
+
+ mutex_lock(&sparse_irq_lock);
+
+ start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
+ from, cnt, 0);
+ ret = -EEXIST;
+ if (irq >=0 && start != irq)
+ goto err;
+
+ if (start + cnt > nr_irqs) {
+ ret = irq_expand_nr_irqs(start + cnt);
+ if (ret)
+ goto err;
+ }
+
+ bitmap_set(allocated_irqs, start, cnt);
+ mutex_unlock(&sparse_irq_lock);
+ return alloc_descs(start, cnt, node, owner);
+
+err:
+ mutex_unlock(&sparse_irq_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__irq_alloc_descs);
+
+#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+/**
+ * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
+ * @cnt: number of interrupts to allocate
+ * @node: node on which to allocate
+ *
+ * Returns an interrupt number > 0 or 0, if the allocation fails.
+ */
+unsigned int irq_alloc_hwirqs(int cnt, int node)
+{
+ int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
+
+ if (irq < 0)
+ return 0;
+
+ for (i = irq; cnt > 0; i++, cnt--) {
+ if (arch_setup_hwirq(i, node))
+ goto err;
+ irq_clear_status_flags(i, _IRQ_NOREQUEST);
+ }
+ return irq;
+
+err:
+ for (i--; i >= irq; i--) {
+ irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
+ arch_teardown_hwirq(i);
+ }
+ irq_free_descs(irq, cnt);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
+
+/**
+ * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
+ * @from: Free from irq number
+ * @cnt: number of interrupts to free
+ *
+ */
+void irq_free_hwirqs(unsigned int from, int cnt)
+{
+ int i, j;
+
+ for (i = from, j = cnt; j > 0; i++, j--) {
+ irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
+ arch_teardown_hwirq(i);
+ }
+ irq_free_descs(from, cnt);
+}
+EXPORT_SYMBOL_GPL(irq_free_hwirqs);
+#endif
+
+/**
+ * irq_get_next_irq - get next allocated irq number
+ * @offset: where to start the search
+ *
+ * Returns next irq number after offset or nr_irqs if none is found.
+ */
+unsigned int irq_get_next_irq(unsigned int offset)
+{
+ return find_next_bit(allocated_irqs, nr_irqs, offset);
+}
+
+struct irq_desc *
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+ if (check & _IRQ_DESC_CHECK) {
+ if ((check & _IRQ_DESC_PERCPU) &&
+ !irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+
+ if (!(check & _IRQ_DESC_PERCPU) &&
+ irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+ }
+
+ if (bus)
+ chip_bus_lock(desc);
+ raw_spin_lock_irqsave(&desc->lock, *flags);
+ }
+ return desc;
+}
+
+void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
+{
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ if (bus)
+ chip_bus_sync_unlock(desc);
+}
+
+int irq_set_percpu_devid(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc)
+ return -EINVAL;
+
+ if (desc->percpu_enabled)
+ return -EINVAL;
+
+ desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
+
+ if (!desc->percpu_enabled)
+ return -ENOMEM;
+
+ irq_set_percpu_devid_flags(irq);
+ return 0;
+}
+
+void kstat_incr_irq_this_cpu(unsigned int irq)
+{
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+}
+
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return desc && desc->kstat_irqs ?
+ *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+}
+
+unsigned int kstat_irqs(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int cpu;
+ int sum = 0;
+
+ if (!desc || !desc->kstat_irqs)
+ return 0;
+ for_each_possible_cpu(cpu)
+ sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+ return sum;
+}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
new file mode 100644
index 00000000000..eb5e10e32e0
--- /dev/null
+++ b/kernel/irq/irqdomain.c
@@ -0,0 +1,711 @@
+#define pr_fmt(fmt) "irq: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/topology.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+static LIST_HEAD(irq_domain_list);
+static DEFINE_MUTEX(irq_domain_mutex);
+
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_domain *irq_default_domain;
+
+/**
+ * __irq_domain_add() - Allocate a new irq_domain data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
+ * direct mapping
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initialize and irq_domain structure.
+ * Returns pointer to IRQ domain, or NULL on failure.
+ */
+struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+ irq_hw_number_t hwirq_max, int direct_max,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+
+ domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
+ GFP_KERNEL, of_node_to_nid(of_node));
+ if (WARN_ON(!domain))
+ return NULL;
+
+ /* Fill structure */
+ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+ domain->ops = ops;
+ domain->host_data = host_data;
+ domain->of_node = of_node_get(of_node);
+ domain->hwirq_max = hwirq_max;
+ domain->revmap_size = size;
+ domain->revmap_direct_max_irq = direct_max;
+
+ mutex_lock(&irq_domain_mutex);
+ list_add(&domain->link, &irq_domain_list);
+ mutex_unlock(&irq_domain_mutex);
+
+ pr_debug("Added domain %s\n", domain->name);
+ return domain;
+}
+EXPORT_SYMBOL_GPL(__irq_domain_add);
+
+/**
+ * irq_domain_remove() - Remove an irq domain.
+ * @domain: domain to remove
+ *
+ * This routine is used to remove an irq domain. The caller must ensure
+ * that all mappings within the domain have been disposed of prior to
+ * use, depending on the revmap type.
+ */
+void irq_domain_remove(struct irq_domain *domain)
+{
+ mutex_lock(&irq_domain_mutex);
+
+ /*
+ * radix_tree_delete() takes care of destroying the root
+ * node when all entries are removed. Shout if there are
+ * any mappings left.
+ */
+ WARN_ON(domain->revmap_tree.height);
+
+ list_del(&domain->link);
+
+ /*
+ * If the going away domain is the default one, reset it.
+ */
+ if (unlikely(irq_default_domain == domain))
+ irq_set_default_host(NULL);
+
+ mutex_unlock(&irq_domain_mutex);
+
+ pr_debug("Removed domain %s\n", domain->name);
+
+ of_node_put(domain->of_node);
+ kfree(domain);
+}
+EXPORT_SYMBOL_GPL(irq_domain_remove);
+
+/**
+ * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in mapping
+ * @first_irq: first number of irq block assigned to the domain,
+ * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
+ * pre-map all of the irqs in the domain to virqs starting at first_irq.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates an irq_domain, and optionally if first_irq is positive then also
+ * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
+ *
+ * This is intended to implement the expected behaviour for most
+ * interrupt controllers. If device tree is used, then first_irq will be 0 and
+ * irqs get mapped dynamically on the fly. However, if the controller requires
+ * static virq assignments (non-DT boot) then it will set that up correctly.
+ */
+struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+
+ domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
+ if (!domain)
+ return NULL;
+
+ if (first_irq > 0) {
+ if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
+ /* attempt to allocated irq_descs */
+ int rc = irq_alloc_descs(first_irq, first_irq, size,
+ of_node_to_nid(of_node));
+ if (rc < 0)
+ pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ first_irq);
+ }
+ irq_domain_associate_many(domain, first_irq, 0, size);
+ }
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(irq_domain_add_simple);
+
+/**
+ * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in legacy mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @first_hwirq: first hwirq number to use for the translation. Should normally
+ * be '0', but a positive integer can be used if the effective
+ * hwirqs numbering does not begin at zero.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Note: the map() callback will be called before this function returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller).
+ */
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+
+ domain = __irq_domain_add(of_node, first_hwirq + size,
+ first_hwirq + size, 0, ops, host_data);
+ if (!domain)
+ return NULL;
+
+ irq_domain_associate_many(domain, first_irq, first_hwirq, size);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+
+/**
+ * irq_find_host() - Locates a domain for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+struct irq_domain *irq_find_host(struct device_node *node)
+{
+ struct irq_domain *h, *found = NULL;
+ int rc;
+
+ /* We might want to match the legacy controller last since
+ * it might potentially be set to match all interrupts in
+ * the absence of a device node. This isn't a problem so far
+ * yet though...
+ */
+ mutex_lock(&irq_domain_mutex);
+ list_for_each_entry(h, &irq_domain_list, link) {
+ if (h->ops->match)
+ rc = h->ops->match(h, node);
+ else
+ rc = (h->of_node != NULL) && (h->of_node == node);
+
+ if (rc) {
+ found = h;
+ break;
+ }
+ }
+ mutex_unlock(&irq_domain_mutex);
+ return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+/**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+ *
+ * For convenience, it's possible to set a "default" domain that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+void irq_set_default_host(struct irq_domain *domain)
+{
+ pr_debug("Default domain set to @0x%p\n", domain);
+
+ irq_default_domain = domain;
+}
+EXPORT_SYMBOL_GPL(irq_set_default_host);
+
+static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+{
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq;
+
+ if (WARN(!irq_data || irq_data->domain != domain,
+ "virq%i doesn't exist; cannot disassociate\n", irq))
+ return;
+
+ hwirq = irq_data->hwirq;
+ irq_set_status_flags(irq, IRQ_NOREQUEST);
+
+ /* remove chip and handler */
+ irq_set_chip_and_handler(irq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(irq);
+
+ /* Tell the PIC about it */
+ if (domain->ops->unmap)
+ domain->ops->unmap(domain, irq);
+ smp_mb();
+
+ irq_data->domain = NULL;
+ irq_data->hwirq = 0;
+
+ /* Clear reverse map for this hwirq */
+ if (hwirq < domain->revmap_size) {
+ domain->linear_revmap[hwirq] = 0;
+ } else {
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_delete(&domain->revmap_tree, hwirq);
+ mutex_unlock(&revmap_trees_mutex);
+ }
+}
+
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+ int ret;
+
+ if (WARN(hwirq >= domain->hwirq_max,
+ "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
+ return -EINVAL;
+ if (WARN(!irq_data, "error: virq%i is not allocated", virq))
+ return -EINVAL;
+ if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
+ return -EINVAL;
+
+ mutex_lock(&irq_domain_mutex);
+ irq_data->hwirq = hwirq;
+ irq_data->domain = domain;
+ if (domain->ops->map) {
+ ret = domain->ops->map(domain, virq, hwirq);
+ if (ret != 0) {
+ /*
+ * If map() returns -EPERM, this interrupt is protected
+ * by the firmware or some other service and shall not
+ * be mapped. Don't bother telling the user about it.
+ */
+ if (ret != -EPERM) {
+ pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
+ domain->name, hwirq, virq, ret);
+ }
+ irq_data->domain = NULL;
+ irq_data->hwirq = 0;
+ mutex_unlock(&irq_domain_mutex);
+ return ret;
+ }
+
+ /* If not already assigned, give the domain the chip's name */
+ if (!domain->name && irq_data->chip)
+ domain->name = irq_data->chip->name;
+ }
+
+ if (hwirq < domain->revmap_size) {
+ domain->linear_revmap[hwirq] = virq;
+ } else {
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
+ mutex_unlock(&revmap_trees_mutex);
+ }
+ mutex_unlock(&irq_domain_mutex);
+
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_associate);
+
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count)
+{
+ int i;
+
+ pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
+ of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
+
+ for (i = 0; i < count; i++) {
+ irq_domain_associate(domain, irq_base + i, hwirq_base + i);
+ }
+}
+EXPORT_SYMBOL_GPL(irq_domain_associate_many);
+
+/**
+ * irq_create_direct_mapping() - Allocate an irq for direct mapping
+ * @domain: domain to allocate the irq for or NULL for default domain
+ *
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux irq as the hardware interrupt number. It still uses the linear
+ * or radix tree to store the mapping, but the irq controller can optimize
+ * the revmap path by using the hwirq directly.
+ */
+unsigned int irq_create_direct_mapping(struct irq_domain *domain)
+{
+ unsigned int virq;
+
+ if (domain == NULL)
+ domain = irq_default_domain;
+
+ virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
+ if (!virq) {
+ pr_debug("create_direct virq allocation failed\n");
+ return 0;
+ }
+ if (virq >= domain->revmap_direct_max_irq) {
+ pr_err("ERROR: no free irqs available below %i maximum\n",
+ domain->revmap_direct_max_irq);
+ irq_free_desc(virq);
+ return 0;
+ }
+ pr_debug("create_direct obtained virq %d\n", virq);
+
+ if (irq_domain_associate(domain, virq, virq)) {
+ irq_free_desc(virq);
+ return 0;
+ }
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+
+/**
+ * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * irq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ */
+unsigned int irq_create_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ unsigned int hint;
+ int virq;
+
+ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+ /* Look for default domain if nececssary */
+ if (domain == NULL)
+ domain = irq_default_domain;
+ if (domain == NULL) {
+ WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
+ return 0;
+ }
+ pr_debug("-> using domain @%p\n", domain);
+
+ /* Check if mapping already exists */
+ virq = irq_find_mapping(domain, hwirq);
+ if (virq) {
+ pr_debug("-> existing mapping on virq %d\n", virq);
+ return virq;
+ }
+
+ /* Allocate a virtual interrupt number */
+ hint = hwirq % nr_irqs;
+ if (hint == 0)
+ hint++;
+ virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
+ if (virq <= 0)
+ virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
+ if (virq <= 0) {
+ pr_debug("-> virq allocation failed\n");
+ return 0;
+ }
+
+ if (irq_domain_associate(domain, virq, hwirq)) {
+ irq_free_desc(virq);
+ return 0;
+ }
+
+ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
+ hwirq, of_node_full_name(domain->of_node), virq);
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+/**
+ * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
+ * @domain: domain owning the interrupt range
+ * @irq_base: beginning of linux IRQ range
+ * @hwirq_base: beginning of hardware IRQ range
+ * @count: Number of interrupts to map
+ *
+ * This routine is used for allocating and mapping a range of hardware
+ * irqs to linux irqs where the linux irq numbers are at pre-defined
+ * locations. For use by controllers that already have static mappings
+ * to insert in to the domain.
+ *
+ * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
+ * domain insertion.
+ *
+ * 0 is returned upon success, while any failure to establish a static
+ * mapping is treated as an error.
+ */
+int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count)
+{
+ int ret;
+
+ ret = irq_alloc_descs(irq_base, irq_base, count,
+ of_node_to_nid(domain->of_node));
+ if (unlikely(ret < 0))
+ return ret;
+
+ irq_domain_associate_many(domain, irq_base, hwirq_base, count);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
+
+unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
+{
+ struct irq_domain *domain;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ unsigned int virq;
+
+ domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
+ if (!domain) {
+ pr_warn("no irq domain found for %s !\n",
+ of_node_full_name(irq_data->np));
+ return 0;
+ }
+
+ /* If domain has no translation, then we assume interrupt line */
+ if (domain->ops->xlate == NULL)
+ hwirq = irq_data->args[0];
+ else {
+ if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type))
+ return 0;
+ }
+
+ /* Create mapping */
+ virq = irq_create_mapping(domain, hwirq);
+ if (!virq)
+ return virq;
+
+ /* Set type if specified and different than the current one */
+ if (type != IRQ_TYPE_NONE &&
+ type != irq_get_trigger_type(virq))
+ irq_set_irq_type(virq, type);
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+/**
+ * irq_dispose_mapping() - Unmap an interrupt
+ * @virq: linux irq number of the interrupt to unmap
+ */
+void irq_dispose_mapping(unsigned int virq)
+{
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+ struct irq_domain *domain;
+
+ if (!virq || !irq_data)
+ return;
+
+ domain = irq_data->domain;
+ if (WARN_ON(domain == NULL))
+ return;
+
+ irq_domain_disassociate(domain, virq);
+ irq_free_desc(virq);
+}
+EXPORT_SYMBOL_GPL(irq_dispose_mapping);
+
+/**
+ * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ */
+unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ struct irq_data *data;
+
+ /* Look for default domain if nececssary */
+ if (domain == NULL)
+ domain = irq_default_domain;
+ if (domain == NULL)
+ return 0;
+
+ if (hwirq < domain->revmap_direct_max_irq) {
+ data = irq_get_irq_data(hwirq);
+ if (data && (data->domain == domain) && (data->hwirq == hwirq))
+ return hwirq;
+ }
+
+ /* Check if the hwirq is in the linear revmap. */
+ if (hwirq < domain->revmap_size)
+ return domain->linear_revmap[hwirq];
+
+ rcu_read_lock();
+ data = radix_tree_lookup(&domain->revmap_tree, hwirq);
+ rcu_read_unlock();
+ return data ? data->irq : 0;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+ unsigned long flags;
+ struct irq_desc *desc;
+ struct irq_domain *domain;
+ struct radix_tree_iter iter;
+ void *data, **slot;
+ int i;
+
+ seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
+ "name", "mapped", "linear-max", "direct-max", "devtree-node");
+ mutex_lock(&irq_domain_mutex);
+ list_for_each_entry(domain, &irq_domain_list, link) {
+ int count = 0;
+ radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
+ count++;
+ seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
+ domain == irq_default_domain ? '*' : ' ', domain->name,
+ domain->revmap_size + count, domain->revmap_size,
+ domain->revmap_direct_max_irq,
+ domain->of_node ? of_node_full_name(domain->of_node) : "");
+ }
+ mutex_unlock(&irq_domain_mutex);
+
+ seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
+ "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+ "active", "type", "domain");
+
+ for (i = 1; i < nr_irqs; i++) {
+ desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ domain = desc->irq_data.domain;
+
+ if (domain) {
+ struct irq_chip *chip;
+ int hwirq = desc->irq_data.hwirq;
+ bool direct;
+
+ seq_printf(m, "%5d ", i);
+ seq_printf(m, "0x%05x ", hwirq);
+
+ chip = irq_desc_get_chip(desc);
+ seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
+
+ data = irq_desc_get_chip_data(desc);
+ seq_printf(m, data ? "0x%p " : " %p ", data);
+
+ seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
+ direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
+ seq_printf(m, "%6s%-8s ",
+ (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
+ direct ? "(DIRECT)" : "");
+ seq_printf(m, "%s\n", desc->irq_data.domain->name);
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+ .open = virq_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+ if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
+ NULL, &virq_debug_fops) == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+__initcall(irq_debugfs_init);
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
+
+/**
+ * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with one cell
+ * bindings where the cell value maps directly to the hwirq number.
+ */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
+
+/**
+ * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with two cell
+ * bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ */
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
+
+/**
+ * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with either one
+ * or two cell bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ *
+ * Note: don't use this function unless your interrupt controller explicitly
+ * supports both one and two cell bindings. For the majority of controllers
+ * the _onecell() or _twocell() variants above should be used.
+ */
+int irq_domain_xlate_onetwocell(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
+
+const struct irq_domain_ops irq_domain_simple_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 438a0146428..3dc6a61bf06 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,32 +7,34 @@
* This file contains driver APIs to the irq subsystem.
*/
+#define pr_fmt(fmt) "genirq: " fmt
+
#include <linux/irq.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/task_work.h>
#include "internals.h"
-#ifdef CONFIG_SMP
+#ifdef CONFIG_IRQ_FORCED_THREADING
+__read_mostly bool force_irqthreads;
-/**
- * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
- * @irq: interrupt number to wait for
- *
- * This function waits for any pending IRQ handlers for this interrupt
- * to complete before returning. If you use this function while
- * holding a resource the IRQ handler may need you will deadlock.
- *
- * This function may be called - with care - from IRQ context.
- */
-void synchronize_irq(unsigned int irq)
+static int __init setup_forced_irqthreads(char *arg)
{
- struct irq_desc *desc = irq_desc + irq;
- unsigned int status;
+ force_irqthreads = true;
+ return 0;
+}
+early_param("threadirqs", setup_forced_irqthreads);
+#endif
- if (irq >= NR_IRQS)
- return;
+static void __synchronize_hardirq(struct irq_desc *desc)
+{
+ bool inprogress;
do {
unsigned long flags;
@@ -41,19 +43,72 @@ void synchronize_irq(unsigned int irq)
* Wait until we're out of the critical section. This might
* give the wrong answer due to the lack of memory barriers.
*/
- while (desc->status & IRQ_INPROGRESS)
+ while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
- status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ inprogress = irqd_irq_inprogress(&desc->irq_data);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
- } while (status & IRQ_INPROGRESS);
+ } while (inprogress);
+}
+
+/**
+ * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
+ * @irq: interrupt number to wait for
+ *
+ * This function waits for any pending hard IRQ handlers for this
+ * interrupt to complete before returning. If you use this
+ * function while holding a resource the IRQ handler may need you
+ * will deadlock. It does not take associated threaded handlers
+ * into account.
+ *
+ * Do not use this for shutdown scenarios where you must be sure
+ * that all parts (hardirq and threaded handler) have completed.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void synchronize_hardirq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc)
+ __synchronize_hardirq(desc);
+}
+EXPORT_SYMBOL(synchronize_hardirq);
+
+/**
+ * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+ * @irq: interrupt number to wait for
+ *
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void synchronize_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+ __synchronize_hardirq(desc);
+ /*
+ * We made sure that no hardirq handler is
+ * running. Now verify that no threaded handlers are
+ * active.
+ */
+ wait_event(desc->wait_for_threads,
+ !atomic_read(&desc->threads_active));
+ }
}
EXPORT_SYMBOL(synchronize_irq);
+#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
+
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
@@ -61,41 +116,296 @@ EXPORT_SYMBOL(synchronize_irq);
*/
int irq_can_set_affinity(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
- if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
- !desc->chip->set_affinity)
+ if (!desc || !irqd_can_balance(&desc->irq_data) ||
+ !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
return 0;
return 1;
}
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_set_thread_affinity - Notify irq threads to adjust affinity
+ * @desc: irq descriptor which has affitnity changed
*
+ * We just set IRQTF_AFFINITY and delegate the affinity setting
+ * to the interrupt thread itself. We can not call
+ * set_cpus_allowed_ptr() here as we hold desc->lock and this
+ * code can be called from hard interrupt context.
*/
-int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+void irq_set_thread_affinity(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_desc + irq;
-
- if (!desc->chip->set_affinity)
- return -EINVAL;
+ struct irqaction *action = desc->action;
- set_balance_irq_affinity(irq, cpumask);
+ while (action) {
+ if (action->thread)
+ set_bit(IRQTF_AFFINITY, &action->thread_flags);
+ action = action->next;
+ }
+}
#ifdef CONFIG_GENERIC_PENDING_IRQ
- set_pending_irq(irq, cpumask);
+static inline bool irq_can_move_pcntxt(struct irq_data *data)
+{
+ return irqd_can_move_in_process_context(data);
+}
+static inline bool irq_move_pending(struct irq_data *data)
+{
+ return irqd_is_setaffinity_pending(data);
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+ cpumask_copy(desc->pending_mask, mask);
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+ cpumask_copy(mask, desc->pending_mask);
+}
#else
- desc->affinity = cpumask;
- desc->chip->set_affinity(irq, cpumask);
+static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
+static inline bool irq_move_pending(struct irq_data *data) { return false; }
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
+
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ int ret;
+
+ ret = chip->irq_set_affinity(data, mask, force);
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ cpumask_copy(data->affinity, mask);
+ case IRQ_SET_MASK_OK_NOCOPY:
+ irq_set_thread_affinity(desc);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct irq_desc *desc = irq_data_to_desc(data);
+ int ret = 0;
+
+ if (!chip || !chip->irq_set_affinity)
+ return -EINVAL;
+
+ if (irq_can_move_pcntxt(data)) {
+ ret = irq_do_set_affinity(data, mask, force);
+ } else {
+ irqd_set_move_pending(data);
+ irq_copy_pending(desc, mask);
+ }
+
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
+ schedule_work(&desc->affinity_notify->work);
+ }
+ irqd_set(data, IRQD_AFFINITY_SET);
+
+ return ret;
+}
+
+int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+}
+
+int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+
+ if (!desc)
+ return -EINVAL;
+ desc->affinity_hint = m;
+ irq_put_desc_unlock(desc, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+
+ if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (irq_move_pending(&desc->irq_data))
+ irq_get_pending(cpumask, desc);
+ else
+ cpumask_copy(cpumask, desc->irq_data.affinity);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ notify->notify(notify, cpumask);
+
+ free_cpumask_var(cpumask);
+out:
+ kref_put(&notify->kref, notify->release);
+}
+
+/**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+ * @notify: Context for notification, or %NULL to disable
+ * notification. Function pointers must be initialised;
+ * the other fields will be initialised by this function.
+ *
+ * Must be called in process context. Notification may only be enabled
+ * after the IRQ is allocated and must be disabled before the IRQ is
+ * freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_affinity_notify *old_notify;
+ unsigned long flags;
+
+ /* The release function is promised process context */
+ might_sleep();
+
+ if (!desc)
+ return -EINVAL;
+
+ /* Complete initialisation of *notify */
+ if (notify) {
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ old_notify = desc->affinity_notify;
+ desc->affinity_notify = notify;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ if (old_notify)
+ kref_put(&old_notify->kref, old_notify->release);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
+
+#ifndef CONFIG_AUTO_IRQ_AFFINITY
+/*
+ * Generic version of the affinity autoselector.
+ */
+static int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+{
+ struct cpumask *set = irq_default_affinity;
+ int node = desc->irq_data.node;
+
+ /* Excludes PER_CPU and NO_BALANCE interrupts */
+ if (!irq_can_set_affinity(irq))
+ return 0;
+
+ /*
+ * Preserve an userspace affinity setup, but make sure that
+ * one of the targets is online.
+ */
+ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
+ if (cpumask_intersects(desc->irq_data.affinity,
+ cpu_online_mask))
+ set = desc->irq_data.affinity;
+ else
+ irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
+ }
+
+ cpumask_and(mask, cpu_online_mask, set);
+ if (node != NUMA_NO_NODE) {
+ const struct cpumask *nodemask = cpumask_of_node(node);
+
+ /* make sure at least one of the cpus in nodemask is online */
+ if (cpumask_intersects(mask, nodemask))
+ cpumask_and(mask, mask, nodemask);
+ }
+ irq_do_set_affinity(&desc->irq_data, mask, false);
return 0;
}
+#else
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
+{
+ return irq_select_affinity(irq);
+}
+#endif
+
+/*
+ * Called when affinity is set via /proc/irq
+ */
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ ret = setup_affinity(irq, desc, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+}
+#else
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+{
+ return 0;
+}
#endif
+void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+{
+ if (suspend) {
+ if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
+ return;
+ desc->istate |= IRQS_SUSPENDED;
+ }
+
+ if (!desc->depth++)
+ irq_disable(desc);
+}
+
+static int __disable_irq_nosync(unsigned int irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+
+ if (!desc)
+ return -EINVAL;
+ __disable_irq(desc, irq, false);
+ irq_put_desc_busunlock(desc, flags);
+ return 0;
+}
+
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
@@ -109,18 +419,7 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
*/
void disable_irq_nosync(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
- return;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
+ __disable_irq_nosync(irq);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -138,17 +437,44 @@ EXPORT_SYMBOL(disable_irq_nosync);
*/
void disable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
-
- if (irq >= NR_IRQS)
- return;
-
- disable_irq_nosync(irq);
- if (desc->action)
+ if (!__disable_irq_nosync(irq))
synchronize_irq(irq);
}
EXPORT_SYMBOL(disable_irq);
+void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
+{
+ if (resume) {
+ if (!(desc->istate & IRQS_SUSPENDED)) {
+ if (!desc->action)
+ return;
+ if (!(desc->action->flags & IRQF_FORCE_RESUME))
+ return;
+ /* Pretend that it got disabled ! */
+ desc->depth++;
+ }
+ desc->istate &= ~IRQS_SUSPENDED;
+ }
+
+ switch (desc->depth) {
+ case 0:
+ err_out:
+ WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+ break;
+ case 1: {
+ if (desc->istate & IRQS_SUSPENDED)
+ goto err_out;
+ /* Prevent probing on this irq: */
+ irq_settings_set_noprobe(desc);
+ irq_enable(desc);
+ check_irq_resend(desc, irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ }
+}
+
/**
* enable_irq - enable handling of an irq
* @irq: Interrupt to enable
@@ -157,39 +483,42 @@ EXPORT_SYMBOL(disable_irq);
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*
- * This function may be called from IRQ context.
+ * This function may be called from IRQ context only when
+ * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
- if (irq >= NR_IRQS)
+ if (!desc)
return;
+ if (WARN(!desc->irq_data.chip,
+ KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+ goto out;
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->depth) {
- case 0:
- printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
- WARN_ON(1);
- break;
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
-
- /* Prevent probing on this irq: */
- desc->status = status | IRQ_NOPROBE;
- check_irq_resend(desc, irq);
- /* fall-through */
- }
- default:
- desc->depth--;
- }
- spin_unlock_irqrestore(&desc->lock, flags);
+ __enable_irq(desc, irq, false);
+out:
+ irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL(enable_irq);
+static int set_irq_wake_real(unsigned int irq, unsigned int on)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int ret = -ENXIO;
+
+ if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
+ if (desc->irq_data.chip->irq_set_wake)
+ ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
+
+ return ret;
+}
+
/**
- * set_irq_wake - control irq power management wakeup
+ * irq_set_irq_wake - control irq power management wakeup
* @irq: interrupt to control
* @on: enable/disable power management wakeup
*
@@ -200,38 +529,41 @@ EXPORT_SYMBOL(enable_irq);
* Wakeup mode lets this IRQ wake the system from sleep
* states like "suspend to RAM".
*/
-int set_irq_wake(unsigned int irq, unsigned int on)
+int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
- struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
- int ret = -ENXIO;
- int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+ int ret = 0;
+
+ if (!desc)
+ return -EINVAL;
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
if (on) {
- if (desc->wake_depth++ == 0)
- desc->status |= IRQ_WAKEUP;
- else
- set_wake = NULL;
+ if (desc->wake_depth++ == 0) {
+ ret = set_irq_wake_real(irq, on);
+ if (ret)
+ desc->wake_depth = 0;
+ else
+ irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
+ }
} else {
if (desc->wake_depth == 0) {
- printk(KERN_WARNING "Unbalanced IRQ %d "
- "wake disable\n", irq);
- WARN_ON(1);
- } else if (--desc->wake_depth == 0)
- desc->status &= ~IRQ_WAKEUP;
- else
- set_wake = NULL;
+ WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
+ } else if (--desc->wake_depth == 0) {
+ ret = set_irq_wake_real(irq, on);
+ if (ret)
+ desc->wake_depth = 1;
+ else
+ irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
+ }
}
- if (set_wake)
- ret = desc->chip->set_wake(irq, on);
- spin_unlock_irqrestore(&desc->lock, flags);
+ irq_put_desc_busunlock(desc, flags);
return ret;
}
-EXPORT_SYMBOL(set_irq_wake);
+EXPORT_SYMBOL(irq_set_irq_wake);
/*
* Internal function that tells the architecture code whether a
@@ -240,170 +572,832 @@ EXPORT_SYMBOL(set_irq_wake);
*/
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
- struct irqaction *action;
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+ int canrequest = 0;
+
+ if (!desc)
+ return 0;
- if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST)
+ if (irq_settings_can_request(desc)) {
+ if (!desc->action ||
+ irqflags & desc->action->flags & IRQF_SHARED)
+ canrequest = 1;
+ }
+ irq_put_desc_unlock(desc, flags);
+ return canrequest;
+}
+
+int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
+ unsigned long flags)
+{
+ struct irq_chip *chip = desc->irq_data.chip;
+ int ret, unmask = 0;
+
+ if (!chip || !chip->irq_set_type) {
+ /*
+ * IRQF_TRIGGER_* but the PIC does not support multiple
+ * flow-types?
+ */
+ pr_debug("No set_type function for IRQ %d (%s)\n", irq,
+ chip ? (chip->name ? : "unknown") : "unknown");
return 0;
+ }
+
+ flags &= IRQ_TYPE_SENSE_MASK;
+
+ if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
+ if (!irqd_irq_masked(&desc->irq_data))
+ mask_irq(desc);
+ if (!irqd_irq_disabled(&desc->irq_data))
+ unmask = 1;
+ }
+
+ /* caller masked out all except trigger mode flags */
+ ret = chip->irq_set_type(&desc->irq_data, flags);
+
+ switch (ret) {
+ case IRQ_SET_MASK_OK:
+ irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
+ irqd_set(&desc->irq_data, flags);
+
+ case IRQ_SET_MASK_OK_NOCOPY:
+ flags = irqd_get_trigger_type(&desc->irq_data);
+ irq_settings_set_trigger_mask(desc, flags);
+ irqd_clear(&desc->irq_data, IRQD_LEVEL);
+ irq_settings_clr_level(desc);
+ if (flags & IRQ_TYPE_LEVEL_MASK) {
+ irq_settings_set_level(desc);
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
+ }
+
+ ret = 0;
+ break;
+ default:
+ pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
+ flags, irq, chip->irq_set_type);
+ }
+ if (unmask)
+ unmask_irq(desc);
+ return ret;
+}
+
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+int irq_set_parent(int irq, int parent_irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+
+ if (!desc)
+ return -EINVAL;
+
+ desc->parent_irq = parent_irq;
+
+ irq_put_desc_unlock(desc, flags);
+ return 0;
+}
+#endif
+
+/*
+ * Default primary interrupt handler for threaded interrupts. Is
+ * assigned as primary handler when request_threaded_irq is called
+ * with handler == NULL. Useful for oneshot interrupts.
+ */
+static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Primary handler for nested threaded interrupts. Should never be
+ * called.
+ */
+static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
+{
+ WARN(1, "Primary handler called for nested irq %d\n", irq);
+ return IRQ_NONE;
+}
+
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!kthread_should_stop()) {
+
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+ return -1;
+}
+
+/*
+ * Oneshot interrupts keep the irq line masked until the threaded
+ * handler finished. unmask if the interrupt has not been disabled and
+ * is marked MASKED.
+ */
+static void irq_finalize_oneshot(struct irq_desc *desc,
+ struct irqaction *action)
+{
+ if (!(desc->istate & IRQS_ONESHOT))
+ return;
+again:
+ chip_bus_lock(desc);
+ raw_spin_lock_irq(&desc->lock);
+
+ /*
+ * Implausible though it may be we need to protect us against
+ * the following scenario:
+ *
+ * The thread is faster done than the hard interrupt handler
+ * on the other CPU. If we unmask the irq line then the
+ * interrupt can come in again and masks the line, leaves due
+ * to IRQS_INPROGRESS and the irq line is masked forever.
+ *
+ * This also serializes the state of shared oneshot handlers
+ * versus "desc->threads_onehsot |= action->thread_mask;" in
+ * irq_wake_thread(). See the comment there which explains the
+ * serialization.
+ */
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
+ raw_spin_unlock_irq(&desc->lock);
+ chip_bus_sync_unlock(desc);
+ cpu_relax();
+ goto again;
+ }
- action = irq_desc[irq].action;
- if (action)
- if (irqflags & action->flags & IRQF_SHARED)
- action = NULL;
+ /*
+ * Now check again, whether the thread should run. Otherwise
+ * we would clear the threads_oneshot bit of this thread which
+ * was just set.
+ */
+ if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ goto out_unlock;
+
+ desc->threads_oneshot &= ~action->thread_mask;
- return !action;
+ if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
+ irqd_irq_masked(&desc->irq_data))
+ unmask_threaded_irq(desc);
+
+out_unlock:
+ raw_spin_unlock_irq(&desc->lock);
+ chip_bus_sync_unlock(desc);
}
-void compat_irq_chip_set_default_handler(struct irq_desc *desc)
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
+ cpumask_var_t mask;
+ bool valid = true;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+
+ /*
+ * In case we are out of memory we set IRQTF_AFFINITY again and
+ * try again next time
+ */
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ set_bit(IRQTF_AFFINITY, &action->thread_flags);
+ return;
+ }
+
+ raw_spin_lock_irq(&desc->lock);
/*
- * If the architecture still has not overriden
- * the flow handler then zap the default. This
- * should catch incorrect flow-type setting.
+ * This code is triggered unconditionally. Check the affinity
+ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
- if (desc->handle_irq == &handle_bad_irq)
- desc->handle_irq = NULL;
+ if (desc->irq_data.affinity)
+ cpumask_copy(mask, desc->irq_data.affinity);
+ else
+ valid = false;
+ raw_spin_unlock_irq(&desc->lock);
+
+ if (valid)
+ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
+/*
+ * Interrupts which are not explicitely requested as threaded
+ * interrupts rely on the implicit bh/preempt disable of the hard irq
+ * context. So we need to disable bh here to avoid deadlocks and other
+ * side effects.
+ */
+static irqreturn_t
+irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+{
+ irqreturn_t ret;
+
+ local_bh_disable();
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action);
+ local_bh_enable();
+ return ret;
+}
+
+/*
+ * Interrupts explicitly requested as threaded interrupts want to be
+ * preemtible - many of them need to sleep and wait for slow busses to
+ * complete.
+ */
+static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+ struct irqaction *action)
+{
+ irqreturn_t ret;
+
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action);
+ return ret;
+}
+
+static void wake_threads_waitq(struct irq_desc *desc)
+{
+ if (atomic_dec_and_test(&desc->threads_active))
+ wake_up(&desc->wait_for_threads);
+}
+
+static void irq_thread_dtor(struct callback_head *unused)
+{
+ struct task_struct *tsk = current;
+ struct irq_desc *desc;
+ struct irqaction *action;
+
+ if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
+ return;
+
+ action = kthread_data(tsk);
+
+ pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm, tsk->pid, action->irq);
+
+
+ desc = irq_to_desc(action->irq);
+ /*
+ * If IRQTF_RUNTHREAD is set, we need to decrement
+ * desc->threads_active and wake possible waiters.
+ */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ wake_threads_waitq(desc);
+
+ /* Prevent a stale desc->threads_oneshot */
+ irq_finalize_oneshot(desc, action);
+}
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct callback_head on_exit_work;
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ irqreturn_t (*handler_fn)(struct irq_desc *desc,
+ struct irqaction *action);
+
+ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
+ &action->thread_flags))
+ handler_fn = irq_forced_thread_fn;
+ else
+ handler_fn = irq_thread_fn;
+
+ init_task_work(&on_exit_work, irq_thread_dtor);
+ task_work_add(current, &on_exit_work, false);
+
+ irq_thread_check_affinity(desc, action);
+
+ while (!irq_wait_for_interrupt(action)) {
+ irqreturn_t action_ret;
+
+ irq_thread_check_affinity(desc, action);
+
+ action_ret = handler_fn(desc, action);
+ if (action_ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
+ wake_threads_waitq(desc);
+ }
+
+ /*
+ * This is the regular exit path. __free_irq() is stopping the
+ * thread via kthread_stop() after calling
+ * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
+ * oneshot mask bit can be set. We cannot verify that as we
+ * cannot touch the oneshot mask at this point anymore as
+ * __setup_irq() might have given out currents thread_mask
+ * again.
+ */
+ task_work_cancel(current, irq_thread_dtor);
+ return 0;
+}
+
+/**
+ * irq_wake_thread - wake the irq thread for the action identified by dev_id
+ * @irq: Interrupt line
+ * @dev_id: Device identity for which the thread should be woken
+ *
+ */
+void irq_wake_thread(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ for (action = desc->action; action; action = action->next) {
+ if (action->dev_id == dev_id) {
+ if (action->thread)
+ __irq_wake_thread(desc, action);
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(irq_wake_thread);
+
+static void irq_setup_forced_threading(struct irqaction *new)
+{
+ if (!force_irqthreads)
+ return;
+ if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+ return;
+
+ new->flags |= IRQF_ONESHOT;
+
+ if (!new->thread_fn) {
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+ new->thread_fn = new->handler;
+ new->handler = irq_default_primary_handler;
+ }
+}
+
+static int irq_request_resources(struct irq_desc *desc)
+{
+ struct irq_data *d = &desc->irq_data;
+ struct irq_chip *c = d->chip;
+
+ return c->irq_request_resources ? c->irq_request_resources(d) : 0;
+}
+
+static void irq_release_resources(struct irq_desc *desc)
+{
+ struct irq_data *d = &desc->irq_data;
+ struct irq_chip *c = d->chip;
+
+ if (c->irq_release_resources)
+ c->irq_release_resources(d);
}
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*/
-int setup_irq(unsigned int irq, struct irqaction *new)
+static int
+__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
- struct irq_desc *desc = irq_desc + irq;
- struct irqaction *old, **p;
- const char *old_name = NULL;
- unsigned long flags;
- int shared = 0;
+ struct irqaction *old, **old_ptr;
+ unsigned long flags, thread_mask = 0;
+ int ret, nested, shared = 0;
+ cpumask_var_t mask;
- if (irq >= NR_IRQS)
+ if (!desc)
return -EINVAL;
- if (desc->chip == &no_irq_chip)
+ if (desc->irq_data.chip == &no_irq_chip)
return -ENOSYS;
+ if (!try_module_get(desc->owner))
+ return -ENODEV;
+
+ /*
+ * Check whether the interrupt nests into another interrupt
+ * thread.
+ */
+ nested = irq_settings_is_nested_thread(desc);
+ if (nested) {
+ if (!new->thread_fn) {
+ ret = -EINVAL;
+ goto out_mput;
+ }
+ /*
+ * Replace the primary handler which was provided from
+ * the driver for non nested interrupt handling by the
+ * dummy function which warns when called.
+ */
+ new->handler = irq_nested_primary_handler;
+ } else {
+ if (irq_settings_can_thread(desc))
+ irq_setup_forced_threading(new);
+ }
+
/*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
+ * Create a handler thread when a thread function is supplied
+ * and the interrupt does not nest into another interrupt
+ * thread.
*/
- if (new->flags & IRQF_SAMPLE_RANDOM) {
+ if (new->thread_fn && !nested) {
+ struct task_struct *t;
+ static const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
+ goto out_mput;
+ }
+
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
/*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
*/
- rand_initialize_irq(irq);
+ get_task_struct(t);
+ new->thread = t;
+ /*
+ * Tell the thread to set its affinity. This is
+ * important for shared interrupt handlers as we do
+ * not invoke setup_affinity() for the secondary
+ * handlers as everything is already set up. Even for
+ * interrupts marked with IRQF_NO_BALANCE this is
+ * correct as we want the thread to move to the cpu(s)
+ * on which the requesting code placed the interrupt.
+ */
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ }
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_thread;
}
/*
+ * Drivers are often written to work w/o knowledge about the
+ * underlying irq chip implementation, so a request for a
+ * threaded irq without a primary hard irq context handler
+ * requires the ONESHOT flag to be set. Some irq chips like
+ * MSI based interrupts are per se one shot safe. Check the
+ * chip flags, so we can avoid the unmask dance at the end of
+ * the threaded handler for those.
+ */
+ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
+ new->flags &= ~IRQF_ONESHOT;
+
+ /*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
- old = *p;
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ old_ptr = &desc->action;
+ old = *old_ptr;
if (old) {
/*
* Can't share interrupts unless both agree to and are
* the same type (level, edge, polarity). So both flag
* fields must have IRQF_SHARED set and the bits which
- * set the trigger type must match.
+ * set the trigger type must match. Also all must
+ * agree on ONESHOT.
*/
if (!((old->flags & new->flags) & IRQF_SHARED) ||
- ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
- old_name = old->name;
+ ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
+ ((old->flags ^ new->flags) & IRQF_ONESHOT))
goto mismatch;
- }
-#if defined(CONFIG_IRQ_PER_CPU)
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQF_PERCPU) !=
(new->flags & IRQF_PERCPU))
goto mismatch;
-#endif
/* add new interrupt at end of irq queue */
do {
- p = &old->next;
- old = *p;
+ /*
+ * Or all existing action->thread_mask bits,
+ * so we can find the next zero bit for this
+ * new action.
+ */
+ thread_mask |= old->thread_mask;
+ old_ptr = &old->next;
+ old = *old_ptr;
} while (old);
shared = 1;
}
- *p = new;
+ /*
+ * Setup the thread mask for this irqaction for ONESHOT. For
+ * !ONESHOT irqs the thread mask is 0 so we can avoid a
+ * conditional in irq_wake_thread().
+ */
+ if (new->flags & IRQF_ONESHOT) {
+ /*
+ * Unlikely to have 32 resp 64 irqs sharing one line,
+ * but who knows.
+ */
+ if (thread_mask == ~0UL) {
+ ret = -EBUSY;
+ goto out_mask;
+ }
+ /*
+ * The thread_mask for the action is or'ed to
+ * desc->thread_active to indicate that the
+ * IRQF_ONESHOT thread handler has been woken, but not
+ * yet finished. The bit is cleared when a thread
+ * completes. When all threads of a shared interrupt
+ * line have completed desc->threads_active becomes
+ * zero and the interrupt line is unmasked. See
+ * handle.c:irq_wake_thread() for further information.
+ *
+ * If no thread is woken by primary (hard irq context)
+ * interrupt handlers, then desc->threads_active is
+ * also checked for zero to unmask the irq line in the
+ * affected hard irq flow handlers
+ * (handle_[fasteoi|level]_irq).
+ *
+ * The new action gets the first zero bit of
+ * thread_mask assigned. See the loop above which or's
+ * all existing action->thread_mask bits.
+ */
+ new->thread_mask = 1 << ffz(thread_mask);
- /* Exclude IRQ from balancing */
- if (new->flags & IRQF_NOBALANCING)
- desc->status |= IRQ_NO_BALANCING;
+ } else if (new->handler == irq_default_primary_handler &&
+ !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
+ /*
+ * The interrupt was requested with handler = NULL, so
+ * we use the default primary handler for it. But it
+ * does not have the oneshot flag set. In combination
+ * with level interrupts this is deadly, because the
+ * default primary handler just wakes the thread, then
+ * the irq lines is reenabled, but the device still
+ * has the level irq asserted. Rinse and repeat....
+ *
+ * While this works for edge type interrupts, we play
+ * it safe and reject unconditionally because we can't
+ * say for sure which type this interrupt really
+ * has. The type flags are unreliable as the
+ * underlying chip implementation can override them.
+ */
+ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+ irq);
+ ret = -EINVAL;
+ goto out_mask;
+ }
if (!shared) {
- irq_chip_set_defaults(desc->chip);
+ ret = irq_request_resources(desc);
+ if (ret) {
+ pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
+ new->name, irq, desc->irq_data.chip->name);
+ goto out_mask;
+ }
-#if defined(CONFIG_IRQ_PER_CPU)
- if (new->flags & IRQF_PERCPU)
- desc->status |= IRQ_PER_CPU;
-#endif
+ init_waitqueue_head(&desc->wait_for_threads);
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
- if (desc->chip && desc->chip->set_type)
- desc->chip->set_type(irq,
- new->flags & IRQF_TRIGGER_MASK);
- else
- /*
- * IRQF_TRIGGER_* but the PIC does not support
- * multiple flow-types?
- */
- printk(KERN_WARNING "No IRQF_TRIGGER set_type "
- "function for IRQ %d (%s)\n", irq,
- desc->chip ? desc->chip->name :
- "unknown");
- } else
- compat_irq_chip_set_default_handler(desc);
-
- desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
- IRQ_INPROGRESS);
-
- if (!(desc->status & IRQ_NOAUTOEN)) {
- desc->depth = 0;
- desc->status &= ~IRQ_DISABLED;
- if (desc->chip->startup)
- desc->chip->startup(irq);
- else
- desc->chip->enable(irq);
- } else
+ ret = __irq_set_trigger(desc, irq,
+ new->flags & IRQF_TRIGGER_MASK);
+
+ if (ret)
+ goto out_mask;
+ }
+
+ desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+ IRQS_ONESHOT | IRQS_WAITING);
+ irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+
+ if (new->flags & IRQF_PERCPU) {
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+ irq_settings_set_per_cpu(desc);
+ }
+
+ if (new->flags & IRQF_ONESHOT)
+ desc->istate |= IRQS_ONESHOT;
+
+ if (irq_settings_can_autoenable(desc))
+ irq_startup(desc, true);
+ else
/* Undo nested disables: */
desc->depth = 1;
+
+ /* Exclude IRQ from balancing if requested */
+ if (new->flags & IRQF_NOBALANCING) {
+ irq_settings_set_no_balancing(desc);
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
+
+ /* Set default affinity mask once everything is setup */
+ setup_affinity(irq, desc, mask);
+
+ } else if (new->flags & IRQF_TRIGGER_MASK) {
+ unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+ unsigned int omsk = irq_settings_get_trigger_mask(desc);
+
+ if (nmsk != omsk)
+ /* hope the handler works with current trigger mode */
+ pr_warning("irq %d uses trigger mode %u; requested %u\n",
+ irq, nmsk, omsk);
}
+
+ new->irq = irq;
+ *old_ptr = new;
+
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
desc->irqs_unhandled = 0;
- spin_unlock_irqrestore(&desc->lock, flags);
- new->irq = irq;
- register_irq_proc(irq);
+ /*
+ * Check whether we disabled the irq via the spurious handler
+ * before. Reenable it and give it another chance.
+ */
+ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
+ desc->istate &= ~IRQS_SPURIOUS_DISABLED;
+ __enable_irq(desc, irq, false);
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ /*
+ * Strictly no need to wake it up, but hung_task complains
+ * when no hard interrupt wakes the thread up.
+ */
+ if (new->thread)
+ wake_up_process(new->thread);
+
+ register_irq_proc(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);
+ free_cpumask_var(mask);
return 0;
mismatch:
-#ifdef CONFIG_DEBUG_SHIRQ
if (!(new->flags & IRQF_PROBE_SHARED)) {
- printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
- if (old_name)
- printk(KERN_ERR "current handler: %s\n", old_name);
+ pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+ irq, new->flags, new->name, old->flags, old->name);
+#ifdef CONFIG_DEBUG_SHIRQ
dump_stack();
+#endif
+ }
+ ret = -EBUSY;
+
+out_mask:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ free_cpumask_var(mask);
+
+out_thread:
+ if (new->thread) {
+ struct task_struct *t = new->thread;
+
+ new->thread = NULL;
+ kthread_stop(t);
+ put_task_struct(t);
+ }
+out_mput:
+ module_put(desc->owner);
+ return ret;
+}
+
+/**
+ * setup_irq - setup an interrupt
+ * @irq: Interrupt line to setup
+ * @act: irqaction for the interrupt
+ *
+ * Used to statically setup interrupts in the early boot process.
+ */
+int setup_irq(unsigned int irq, struct irqaction *act)
+{
+ int retval;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return -EINVAL;
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, act);
+ chip_bus_sync_unlock(desc);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(setup_irq);
+
+/*
+ * Internal function to unregister an irqaction - used to free
+ * regular and special interrupts that are part of the architecture.
+ */
+static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action, **action_ptr;
+ unsigned long flags;
+
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
+
+ if (!desc)
+ return NULL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ /*
+ * There can be multiple actions per IRQ descriptor, find the right
+ * one based on the dev_id:
+ */
+ action_ptr = &desc->action;
+ for (;;) {
+ action = *action_ptr;
+
+ if (!action) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return NULL;
+ }
+
+ if (action->dev_id == dev_id)
+ break;
+ action_ptr = &action->next;
+ }
+
+ /* Found it - now remove it from the list of entries: */
+ *action_ptr = action->next;
+
+ /* If this was the last handler, shut down the IRQ line: */
+ if (!desc->action) {
+ irq_shutdown(desc);
+ irq_release_resources(desc);
+ }
+
+#ifdef CONFIG_SMP
+ /* make sure affinity_hint is cleaned up */
+ if (WARN_ON_ONCE(desc->affinity_hint))
+ desc->affinity_hint = NULL;
+#endif
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ /* Make sure it's not being used on another CPU: */
+ synchronize_irq(irq);
+
+#ifdef CONFIG_DEBUG_SHIRQ
+ /*
+ * It's a shared IRQ -- the driver ought to be prepared for an IRQ
+ * event to happen even now it's being freed, so let's make sure that
+ * is so by doing an extra call to the handler ....
+ *
+ * ( We do this after actually deregistering it, to make sure that a
+ * 'real' IRQ doesn't run in * parallel with our fake. )
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
}
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
- return -EBUSY;
+
+ if (action->thread) {
+ kthread_stop(action->thread);
+ put_task_struct(action->thread);
+ }
+
+ module_put(desc->owner);
+ return action;
+}
+
+/**
+ * remove_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ __free_irq(irq, act->dev_id);
}
+EXPORT_SYMBOL_GPL(remove_irq);
/**
- * free_irq - free an interrupt
+ * free_irq - free an interrupt allocated with request_irq
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
@@ -418,80 +1412,31 @@ mismatch:
*/
void free_irq(unsigned int irq, void *dev_id)
{
- struct irq_desc *desc;
- struct irqaction **p;
- unsigned long flags;
+ struct irq_desc *desc = irq_to_desc(irq);
- WARN_ON(in_interrupt());
- if (irq >= NR_IRQS)
+ if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return;
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
- p = &desc->action;
- for (;;) {
- struct irqaction *action = *p;
-
- if (action) {
- struct irqaction **pp = p;
-
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
-
- /* Currently used only by UML, might disappear one day.*/
-#ifdef CONFIG_IRQ_RELEASE_METHOD
- if (desc->chip->release)
- desc->chip->release(irq, dev_id);
+#ifdef CONFIG_SMP
+ if (WARN_ON(desc->affinity_notify))
+ desc->affinity_notify = NULL;
#endif
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
- else
- desc->chip->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
- unregister_handler_proc(irq, action);
-
- /* Make sure it's not being used on another CPU */
- synchronize_irq(irq);
-#ifdef CONFIG_DEBUG_SHIRQ
- /*
- * It's a shared IRQ -- the driver ought to be
- * prepared for it to happen even now it's
- * being freed, so let's make sure.... We do
- * this after actually deregistering it, to
- * make sure that a 'real' IRQ doesn't run in
- * parallel with our fake
- */
- if (action->flags & IRQF_SHARED) {
- local_irq_save(flags);
- action->handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
- kfree(action);
- return;
- }
- printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
-#ifdef CONFIG_DEBUG_SHIRQ
- dump_stack();
-#endif
- spin_unlock_irqrestore(&desc->lock, flags);
- return;
- }
+ chip_bus_lock(desc);
+ kfree(__free_irq(irq, dev_id));
+ chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL(free_irq);
/**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL and thread_fn != NULL the default
+ * primary handler is installed
+ * @thread_fn: Function called from the irq handler thread
+ * If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -503,6 +1448,15 @@ EXPORT_SYMBOL(free_irq);
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
+ * If you want to set up a threaded irq handler for your device
+ * then you need to supply @handler and @thread_fn. @handler is
+ * still called in hard interrupt context and has to check
+ * whether the interrupt originates from the device. If yes it
+ * needs to disable the interrupt on the device and return
+ * IRQ_WAKE_THREAD which will wake up the handler thread and run
+ * @thread_fn. This split handler design is necessary to support
+ * shared interrupts.
+ *
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
@@ -513,22 +1467,17 @@ EXPORT_SYMBOL(free_irq);
* Flags:
*
* IRQF_SHARED Interrupt is shared
- * IRQF_DISABLED Disable local interrupts while processing
- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
+ * IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
+int request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn, unsigned long irqflags,
+ const char *devname, void *dev_id)
{
struct irqaction *action;
+ struct irq_desc *desc;
int retval;
-#ifdef CONFIG_LOCKDEP
- /*
- * Lockdep wants atomic interrupt handlers:
- */
- irqflags |= IRQF_DISABLED;
-#endif
/*
* Sanity-check: shared interrupts must pass in a real dev-ID,
* otherwise we'll have trouble later trying to figure out
@@ -537,46 +1486,287 @@ int request_irq(unsigned int irq, irq_handler_t handler,
*/
if ((irqflags & IRQF_SHARED) && !dev_id)
return -EINVAL;
- if (irq >= NR_IRQS)
- return -EINVAL;
- if (irq_desc[irq].status & IRQ_NOREQUEST)
+
+ desc = irq_to_desc(irq);
+ if (!desc)
return -EINVAL;
- if (!handler)
+
+ if (!irq_settings_can_request(desc) ||
+ WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!handler) {
+ if (!thread_fn)
+ return -EINVAL;
+ handler = irq_default_primary_handler;
+ }
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
+ action->thread_fn = thread_fn;
action->flags = irqflags;
- cpus_clear(action->mask);
action->name = devname;
- action->next = NULL;
action->dev_id = dev_id;
- select_smp_affinity(irq);
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(desc);
-#ifdef CONFIG_DEBUG_SHIRQ
- if (irqflags & IRQF_SHARED) {
+ if (retval)
+ kfree(action);
+
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
+ if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
- * We do this before actually registering it, to make sure that
- * a 'real' IRQ doesn't run in parallel with our fake
+ * We disable the irq to make sure that a 'real' IRQ doesn't
+ * run in parallel with our fake.
*/
unsigned long flags;
+ disable_irq(irq);
local_irq_save(flags);
+
handler(irq, dev_id);
+
local_irq_restore(flags);
+ enable_irq(irq);
}
#endif
+ return retval;
+}
+EXPORT_SYMBOL(request_threaded_irq);
+
+/**
+ * request_any_context_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Threaded handler for threaded interrupts.
+ * @flags: Interrupt type flags
+ * @name: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. It selects either a
+ * hardirq or threaded handling method depending on the
+ * context.
+ *
+ * On failure, it returns a negative value. On success,
+ * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
+ */
+int request_any_context_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *name, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ if (irq_settings_is_nested_thread(desc)) {
+ ret = request_threaded_irq(irq, NULL, handler,
+ flags, name, dev_id);
+ return !ret ? IRQC_IS_NESTED : ret;
+ }
+
+ ret = request_irq(irq, handler, flags, name, dev_id);
+ return !ret ? IRQC_IS_HARDIRQ : ret;
+}
+EXPORT_SYMBOL_GPL(request_any_context_irq);
+
+void enable_percpu_irq(unsigned int irq, unsigned int type)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ type &= IRQ_TYPE_SENSE_MASK;
+ if (type != IRQ_TYPE_NONE) {
+ int ret;
+
+ ret = __irq_set_trigger(desc, irq, type);
+
+ if (ret) {
+ WARN(1, "failed to set type for IRQ%d\n", irq);
+ goto out;
+ }
+ }
+
+ irq_percpu_enable(desc, cpu);
+out:
+ irq_put_desc_unlock(desc, flags);
+}
+EXPORT_SYMBOL_GPL(enable_percpu_irq);
+
+void disable_percpu_irq(unsigned int irq)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ irq_percpu_disable(desc, cpu);
+ irq_put_desc_unlock(desc, flags);
+}
+EXPORT_SYMBOL_GPL(disable_percpu_irq);
+
+/*
+ * Internal function to unregister a percpu irqaction.
+ */
+static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
+
+ if (!desc)
+ return NULL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ action = desc->action;
+ if (!action || action->percpu_dev_id != dev_id) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ goto bad;
+ }
+
+ if (!cpumask_empty(desc->percpu_enabled)) {
+ WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
+ irq, cpumask_first(desc->percpu_enabled));
+ goto bad;
+ }
+
+ /* Found it - now remove it from the list of entries: */
+ desc->action = NULL;
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ module_put(desc->owner);
+ return action;
+
+bad:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return NULL;
+}
+
+/**
+ * remove_percpu_irq - free a per-cpu interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && irq_settings_is_per_cpu_devid(desc))
+ __free_percpu_irq(irq, act->percpu_dev_id);
+}
+
+/**
+ * free_percpu_irq - free an interrupt allocated with request_percpu_irq
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove a percpu interrupt handler. The handler is removed, but
+ * the interrupt line is not disabled. This must be done on each
+ * CPU before calling this function. The function does not return
+ * until any executing interrupts for this IRQ have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return;
+
+ chip_bus_lock(desc);
+ kfree(__free_percpu_irq(irq, dev_id));
+ chip_bus_sync_unlock(desc);
+}
+
+/**
+ * setup_percpu_irq - setup a per-cpu interrupt
+ * @irq: Interrupt line to setup
+ * @act: irqaction for the interrupt
+ *
+ * Used to statically setup per-cpu interrupts in the early boot process.
+ */
+int setup_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int retval;
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, act);
+ chip_bus_sync_unlock(desc);
+
+ return retval;
+}
+
+/**
+ * request_percpu_irq - allocate a percpu interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A percpu cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources, but doesn't
+ * automatically enable the interrupt. It has to be done on each
+ * CPU using enable_percpu_irq().
+ *
+ * Dev_id must be globally unique. It is a per-cpu variable, and
+ * the handler gets called with the interrupted CPU's instance of
+ * that variable.
+ */
+int request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev_id)
+{
+ struct irqaction *action;
+ struct irq_desc *desc;
+ int retval;
+
+ if (!dev_id)
+ return -EINVAL;
+
+ desc = irq_to_desc(irq);
+ if (!desc || !irq_settings_can_request(desc) ||
+ !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
+ action->name = devname;
+ action->percpu_dev_id = dev_id;
+
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(desc);
- retval = setup_irq(irq, action);
if (retval)
kfree(action);
return retval;
}
-EXPORT_SYMBOL(request_irq);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 77b7acc875c..ca3f4aaff70 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,75 +1,72 @@
#include <linux/irq.h>
+#include <linux/interrupt.h>
-void set_pending_irq(unsigned int irq, cpumask_t mask)
-{
- struct irq_desc *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- desc->status |= IRQ_MOVE_PENDING;
- irq_desc[irq].pending_mask = mask;
- spin_unlock_irqrestore(&desc->lock, flags);
-}
+#include "internals.h"
-void move_masked_irq(int irq)
+void irq_move_masked_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_desc + irq;
- cpumask_t tmp;
+ struct irq_desc *desc = irq_data_to_desc(idata);
+ struct irq_chip *chip = idata->chip;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
- if (CHECK_IRQ_PER_CPU(desc->status)) {
+ if (!irqd_can_balance(&desc->irq_data)) {
WARN_ON(1);
return;
}
- desc->status &= ~IRQ_MOVE_PENDING;
+ irqd_clr_move_pending(&desc->irq_data);
- if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
+ if (unlikely(cpumask_empty(desc->pending_mask)))
return;
- if (!desc->chip->set_affinity)
+ if (!chip->irq_set_affinity)
return;
- assert_spin_locked(&desc->lock);
-
- cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
- * when an active trigger is comming in. This could
+ * when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (likely(!cpus_empty(tmp))) {
- desc->chip->set_affinity(irq,tmp);
- }
- cpus_clear(irq_desc[irq].pending_mask);
+ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+ irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
+
+ cpumask_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void irq_move_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_desc + irq;
+ bool masked;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(idata)))
return;
- if (unlikely(desc->status & IRQ_DISABLED))
+ if (unlikely(irqd_irq_disabled(idata)))
return;
- desc->chip->mask(irq);
- move_masked_irq(irq);
- desc->chip->unmask(irq);
+ /*
+ * Be careful vs. already masked interrupts. If this is a
+ * threaded interrupt with ONESHOT set, we can end up with an
+ * interrupt storm.
+ */
+ masked = irqd_irq_masked(idata);
+ if (!masked)
+ idata->chip->irq_mask(idata);
+ irq_move_masked_irq(idata);
+ if (!masked)
+ idata->chip->irq_unmask(idata);
}
-
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
new file mode 100644
index 00000000000..abcd6ca86cb
--- /dev/null
+++ b/kernel/irq/pm.c
@@ -0,0 +1,130 @@
+/*
+ * linux/kernel/irq/pm.c
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file contains power management functions related to interrupts.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
+
+#include "internals.h"
+
+/**
+ * suspend_device_irqs - disable all currently enabled interrupt lines
+ *
+ * During system-wide suspend or hibernation device drivers need to be prevented
+ * from receiving interrupts and this function is provided for this purpose.
+ * It marks all interrupt lines in use, except for the timer ones, as disabled
+ * and sets the IRQS_SUSPENDED flag for each of them.
+ */
+void suspend_device_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ __disable_irq(desc, irq, true);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ for_each_irq_desc(irq, desc)
+ if (desc->istate & IRQS_SUSPENDED)
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(suspend_device_irqs);
+
+static void resume_irqs(bool want_early)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+ bool is_early = desc->action &&
+ desc->action->flags & IRQF_EARLY_RESUME;
+
+ if (!is_early && want_early)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ __enable_irq(desc, irq, true);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+/**
+ * irq_pm_syscore_ops - enable interrupt lines early
+ *
+ * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
+ */
+static void irq_pm_syscore_resume(void)
+{
+ resume_irqs(true);
+}
+
+static struct syscore_ops irq_pm_syscore_ops = {
+ .resume = irq_pm_syscore_resume,
+};
+
+static int __init irq_pm_init_ops(void)
+{
+ register_syscore_ops(&irq_pm_syscore_ops);
+ return 0;
+}
+
+device_initcall(irq_pm_init_ops);
+
+/**
+ * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
+ *
+ * Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
+ * disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
+ * set as well as those with %IRQF_FORCE_RESUME.
+ */
+void resume_device_irqs(void)
+{
+ resume_irqs(false);
+}
+EXPORT_SYMBOL_GPL(resume_device_irqs);
+
+/**
+ * check_wakeup_irqs - check if any wake-up interrupts are pending
+ */
+int check_wakeup_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ /*
+ * Only interrupts which are marked as wakeup source
+ * and have not been disabled before the suspend check
+ * can abort suspend.
+ */
+ if (irqd_is_wakeup_set(&desc->irq_data)) {
+ if (desc->depth == 1 && desc->istate & IRQS_PENDING)
+ return -EBUSY;
+ continue;
+ }
+ /*
+ * Check the non wakeup interrupts whether they need
+ * to be masked before finally going into suspend
+ * state. That's for hardware which has no wakeup
+ * source configuration facility. The chip
+ * implementation indicates that with
+ * IRQCHIP_MASK_ON_SUSPEND.
+ */
+ if (desc->istate & IRQS_SUSPENDED &&
+ irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
+ mask_irq(desc);
+ }
+
+ return 0;
+}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index c2f2ccb0549..ac1ba2f1103 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -7,8 +7,11 @@
*/
#include <linux/irq.h>
+#include <linux/gfp.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include "internals.h"
@@ -16,23 +19,42 @@ static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
-static int irq_affinity_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int show_irq_affinity(int type, struct seq_file *m, void *v)
{
- struct irq_desc *desc = irq_desc + (long)data;
- cpumask_t *mask = &desc->affinity;
- int len;
+ struct irq_desc *desc = irq_to_desc((long)m->private);
+ const struct cpumask *mask = desc->irq_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (desc->status & IRQ_MOVE_PENDING)
- mask = &desc->pending_mask;
+ if (irqd_is_setaffinity_pending(&desc->irq_data))
+ mask = desc->pending_mask;
#endif
- len = cpumask_scnprintf(page, count, *mask);
+ if (type)
+ seq_cpumask_list(m, mask);
+ else
+ seq_cpumask(m, mask);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
+{
+ struct irq_desc *desc = irq_to_desc((long)m->private);
+ unsigned long flags;
+ cpumask_var_t mask;
- if (count - len < 2)
- return -EINVAL;
- len += sprintf(page + len, "\n");
- return len;
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->affinity_hint)
+ cpumask_copy(mask, desc->affinity_hint);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ seq_cpumask(m, mask);
+ seq_putc(m, '\n');
+ free_cpumask_var(mask);
+
+ return 0;
}
#ifndef is_affinity_mask_valid
@@ -40,63 +62,220 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
#endif
int no_irq_affinity;
-static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
- unsigned int irq = (int)(long)data, full_count = count, err;
- cpumask_t new_value, tmp;
+ return show_irq_affinity(0, m, v);
+}
- if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
- irq_balancing_disabled(irq))
+static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
+{
+ return show_irq_affinity(1, m, v);
+}
+
+
+static ssize_t write_irq_affinity(int type, struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
+ cpumask_var_t new_value;
+ int err;
+
+ if (!irq_can_set_affinity(irq) || no_irq_affinity)
return -EIO;
- err = cpumask_parse_user(buffer, count, new_value);
+ if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (type)
+ err = cpumask_parselist_user(buffer, count, new_value);
+ else
+ err = cpumask_parse_user(buffer, count, new_value);
if (err)
- return err;
+ goto free_cpumask;
- if (!is_affinity_mask_valid(new_value))
- return -EINVAL;
+ if (!is_affinity_mask_valid(new_value)) {
+ err = -EINVAL;
+ goto free_cpumask;
+ }
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
- cpus_and(tmp, new_value, cpu_online_map);
- if (cpus_empty(tmp))
+ if (!cpumask_intersects(new_value, cpu_online_mask)) {
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
- return select_smp_affinity(irq) ? -EINVAL : full_count;
+ err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
+ } else {
+ irq_set_affinity(irq, new_value);
+ err = count;
+ }
+
+free_cpumask:
+ free_cpumask_var(new_value);
+ return err;
+}
+
+static ssize_t irq_affinity_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ return write_irq_affinity(0, file, buffer, count, pos);
+}
+
+static ssize_t irq_affinity_list_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ return write_irq_affinity(1, file, buffer, count, pos);
+}
+
+static int irq_affinity_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
+}
+
+static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
+}
+
+static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations irq_affinity_proc_fops = {
+ .open = irq_affinity_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = irq_affinity_proc_write,
+};
+
+static const struct file_operations irq_affinity_hint_proc_fops = {
+ .open = irq_affinity_hint_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations irq_affinity_list_proc_fops = {
+ .open = irq_affinity_list_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = irq_affinity_list_proc_write,
+};
+
+static int default_affinity_show(struct seq_file *m, void *v)
+{
+ seq_cpumask(m, irq_default_affinity);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static ssize_t default_affinity_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ cpumask_var_t new_value;
+ int err;
+
+ if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpumask_parse_user(buffer, count, new_value);
+ if (err)
+ goto out;
+
+ if (!is_affinity_mask_valid(new_value)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!cpumask_intersects(new_value, cpu_online_mask)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ cpumask_copy(irq_default_affinity, new_value);
+ err = count;
+
+out:
+ free_cpumask_var(new_value);
+ return err;
+}
+
+static int default_affinity_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, default_affinity_show, PDE_DATA(inode));
+}
+
+static const struct file_operations default_affinity_proc_fops = {
+ .open = default_affinity_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = default_affinity_write,
+};
+
+static int irq_node_proc_show(struct seq_file *m, void *v)
+{
+ struct irq_desc *desc = irq_to_desc((long) m->private);
- irq_set_affinity(irq, new_value);
+ seq_printf(m, "%d\n", desc->irq_data.node);
+ return 0;
+}
- return full_count;
+static int irq_node_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_node_proc_show, PDE_DATA(inode));
}
+static const struct file_operations irq_node_proc_fops = {
+ .open = irq_node_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
-static int irq_spurious_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int irq_spurious_proc_show(struct seq_file *m, void *v)
{
- struct irq_desc *d = &irq_desc[(long) data];
- return sprintf(page, "count %u\n"
- "unhandled %u\n"
- "last_unhandled %u ms\n",
- d->irq_count,
- d->irqs_unhandled,
- jiffies_to_msecs(d->last_unhandled));
+ struct irq_desc *desc = irq_to_desc((long) m->private);
+
+ seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
+ desc->irq_count, desc->irqs_unhandled,
+ jiffies_to_msecs(desc->last_unhandled));
+ return 0;
}
+static int irq_spurious_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations irq_spurious_proc_fops = {
+ .open = irq_spurious_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#define MAX_NAMELEN 128
static int name_unique(unsigned int irq, struct irqaction *new_action)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -104,15 +283,16 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
void register_handler_proc(unsigned int irq, struct irqaction *action)
{
char name [MAX_NAMELEN];
+ struct irq_desc *desc = irq_to_desc(irq);
- if (!irq_desc[irq].dir || action->dir || !action->name ||
+ if (!desc->dir || action->dir || !action->name ||
!name_unique(irq, action))
return;
@@ -120,70 +300,185 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
- action->dir = proc_mkdir(name, irq_desc[irq].dir);
+ action->dir = proc_mkdir(name, desc->dir);
}
#undef MAX_NAMELEN
#define MAX_NAMELEN 10
-void register_irq_proc(unsigned int irq)
+void register_irq_proc(unsigned int irq, struct irq_desc *desc)
{
char name [MAX_NAMELEN];
- struct proc_dir_entry *entry;
- if (!root_irq_dir ||
- (irq_desc[irq].chip == &no_irq_chip) ||
- irq_desc[irq].dir)
+ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
return;
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
- irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
+ desc->dir = proc_mkdir(name, root_irq_dir);
+ if (!desc->dir)
+ return;
#ifdef CONFIG_SMP
- {
- /* create /proc/irq/<irq>/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
-
- if (entry) {
- entry->data = (void *)(long)irq;
- entry->read_proc = irq_affinity_read_proc;
- entry->write_proc = irq_affinity_write_proc;
- }
- }
+ /* create /proc/irq/<irq>/smp_affinity */
+ proc_create_data("smp_affinity", 0644, desc->dir,
+ &irq_affinity_proc_fops, (void *)(long)irq);
+
+ /* create /proc/irq/<irq>/affinity_hint */
+ proc_create_data("affinity_hint", 0444, desc->dir,
+ &irq_affinity_hint_proc_fops, (void *)(long)irq);
+
+ /* create /proc/irq/<irq>/smp_affinity_list */
+ proc_create_data("smp_affinity_list", 0644, desc->dir,
+ &irq_affinity_list_proc_fops, (void *)(long)irq);
+
+ proc_create_data("node", 0444, desc->dir,
+ &irq_node_proc_fops, (void *)(long)irq);
#endif
- entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
- if (entry) {
- entry->data = (void *)(long)irq;
- entry->read_proc = irq_spurious_read;
- }
+ proc_create_data("spurious", 0444, desc->dir,
+ &irq_spurious_proc_fops, (void *)(long)irq);
+}
+
+void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || !desc->dir)
+ return;
+#ifdef CONFIG_SMP
+ remove_proc_entry("smp_affinity", desc->dir);
+ remove_proc_entry("affinity_hint", desc->dir);
+ remove_proc_entry("smp_affinity_list", desc->dir);
+ remove_proc_entry("node", desc->dir);
+#endif
+ remove_proc_entry("spurious", desc->dir);
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%u", irq);
+ remove_proc_entry(name, root_irq_dir);
}
#undef MAX_NAMELEN
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
{
- if (action->dir)
- remove_proc_entry(action->dir->name, irq_desc[irq].dir);
+ proc_remove(action->dir);
+}
+
+static void register_default_affinity_proc(void)
+{
+#ifdef CONFIG_SMP
+ proc_create("irq/default_smp_affinity", 0644, NULL,
+ &default_affinity_proc_fops);
+#endif
}
void init_irq_proc(void)
{
- int i;
+ unsigned int irq;
+ struct irq_desc *desc;
/* create /proc/irq */
root_irq_dir = proc_mkdir("irq", NULL);
if (!root_irq_dir)
return;
+ register_default_affinity_proc();
+
/*
* Create entries for all existing IRQs.
*/
- for (i = 0; i < NR_IRQS; i++)
- register_irq_proc(i);
+ for_each_irq_desc(irq, desc) {
+ if (!desc)
+ continue;
+
+ register_irq_proc(irq, desc);
+ }
}
+#ifdef CONFIG_GENERIC_IRQ_SHOW
+
+int __weak arch_show_interrupts(struct seq_file *p, int prec)
+{
+ return 0;
+}
+
+#ifndef ACTUAL_NR_IRQS
+# define ACTUAL_NR_IRQS nr_irqs
+#endif
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ static int prec;
+
+ unsigned long flags, any_count = 0;
+ int i = *(loff_t *) v, j;
+ struct irqaction *action;
+ struct irq_desc *desc;
+
+ if (i > ACTUAL_NR_IRQS)
+ return 0;
+
+ if (i == ACTUAL_NR_IRQS)
+ return arch_show_interrupts(p, prec);
+
+ /* print header and calculate the width of the first column */
+ if (i == 0) {
+ for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+ j *= 10;
+
+ seq_printf(p, "%*s", prec + 8, "");
+ for_each_online_cpu(j)
+ seq_printf(p, "CPU%-8d", j);
+ seq_putc(p, '\n');
+ }
+
+ desc = irq_to_desc(i);
+ if (!desc)
+ return 0;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ for_each_online_cpu(j)
+ any_count |= kstat_irqs_cpu(i, j);
+ action = desc->action;
+ if (!action && !any_count)
+ goto out;
+
+ seq_printf(p, "%*d: ", prec, i);
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+
+ if (desc->irq_data.chip) {
+ if (desc->irq_data.chip->irq_print_chip)
+ desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
+ else if (desc->irq_data.chip->name)
+ seq_printf(p, " %8s", desc->irq_data.chip->name);
+ else
+ seq_printf(p, " %8s", "-");
+ } else {
+ seq_printf(p, " %8s", "None");
+ }
+ if (desc->irq_data.domain)
+ seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
+#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
+ seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
+#endif
+ if (desc->name)
+ seq_printf(p, "-%-8s", desc->name);
+
+ if (action) {
+ seq_printf(p, " %s", action->name);
+ while ((action = action->next) != NULL)
+ seq_printf(p, ", %s", action->name);
+ }
+
+ seq_putc(p, '\n');
+out:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
+#endif
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index a8046791ba2..9065107f083 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Bitmap to handle software resend of interrupts: */
-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
/*
* Run software resends of IRQ's
@@ -33,10 +33,10 @@ static void resend_irqs(unsigned long arg)
struct irq_desc *desc;
int irq;
- while (!bitmap_empty(irqs_resend, NR_IRQS)) {
- irq = find_first_bit(irqs_resend, NR_IRQS);
+ while (!bitmap_empty(irqs_resend, nr_irqs)) {
+ irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
local_irq_disable();
desc->handle_irq(irq, desc);
local_irq_enable();
@@ -55,24 +55,33 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq)
{
- unsigned int status = desc->status;
-
- /*
- * Make sure the interrupt is enabled, before resending it:
- */
- desc->chip->enable(irq);
-
/*
* We do not resend level type interrupts. Level type
* interrupts are resent by hardware when they are still
- * active.
+ * active. Clear the pending bit so suspend/resume does not
+ * get confused.
*/
- if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
+ if (irq_settings_is_level(desc)) {
+ desc->istate &= ~IRQS_PENDING;
+ return;
+ }
+ if (desc->istate & IRQS_REPLAY)
+ return;
+ if (desc->istate & IRQS_PENDING) {
+ desc->istate &= ~IRQS_PENDING;
+ desc->istate |= IRQS_REPLAY;
- if (!desc->chip || !desc->chip->retrigger ||
- !desc->chip->retrigger(irq)) {
+ if (!desc->irq_data.chip->irq_retrigger ||
+ !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
+ /*
+ * If the interrupt has a parent irq and runs
+ * in the thread context of the parent irq,
+ * retrigger the parent.
+ */
+ if (desc->parent_irq &&
+ irq_settings_is_nested_thread(desc))
+ irq = desc->parent_irq;
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
new file mode 100644
index 00000000000..3320b84cc60
--- /dev/null
+++ b/kernel/irq/settings.h
@@ -0,0 +1,156 @@
+/*
+ * Internal header to deal with irq_desc->status which will be renamed
+ * to irq_desc->settings.
+ */
+enum {
+ _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
+ _IRQ_PER_CPU = IRQ_PER_CPU,
+ _IRQ_LEVEL = IRQ_LEVEL,
+ _IRQ_NOPROBE = IRQ_NOPROBE,
+ _IRQ_NOREQUEST = IRQ_NOREQUEST,
+ _IRQ_NOTHREAD = IRQ_NOTHREAD,
+ _IRQ_NOAUTOEN = IRQ_NOAUTOEN,
+ _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
+ _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
+ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
+ _IRQ_IS_POLLED = IRQ_IS_POLLED,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+};
+
+#define IRQ_PER_CPU GOT_YOU_MORON
+#define IRQ_NO_BALANCING GOT_YOU_MORON
+#define IRQ_LEVEL GOT_YOU_MORON
+#define IRQ_NOPROBE GOT_YOU_MORON
+#define IRQ_NOREQUEST GOT_YOU_MORON
+#define IRQ_NOTHREAD GOT_YOU_MORON
+#define IRQ_NOAUTOEN GOT_YOU_MORON
+#define IRQ_NESTED_THREAD GOT_YOU_MORON
+#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
+#define IRQ_IS_POLLED GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+ desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_PER_CPU;
+}
+
+static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NO_BALANCING;
+}
+
+static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline void
+irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
+{
+ desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
+ desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
+}
+
+static inline bool irq_settings_is_level(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_LEVEL;
+}
+
+static inline void irq_settings_clr_level(struct irq_desc *desc)
+{
+ desc->status_use_accessors &= ~_IRQ_LEVEL;
+}
+
+static inline void irq_settings_set_level(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_LEVEL;
+}
+
+static inline bool irq_settings_can_request(struct irq_desc *desc)
+{
+ return !(desc->status_use_accessors & _IRQ_NOREQUEST);
+}
+
+static inline void irq_settings_clr_norequest(struct irq_desc *desc)
+{
+ desc->status_use_accessors &= ~_IRQ_NOREQUEST;
+}
+
+static inline void irq_settings_set_norequest(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NOREQUEST;
+}
+
+static inline bool irq_settings_can_thread(struct irq_desc *desc)
+{
+ return !(desc->status_use_accessors & _IRQ_NOTHREAD);
+}
+
+static inline void irq_settings_clr_nothread(struct irq_desc *desc)
+{
+ desc->status_use_accessors &= ~_IRQ_NOTHREAD;
+}
+
+static inline void irq_settings_set_nothread(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NOTHREAD;
+}
+
+static inline bool irq_settings_can_probe(struct irq_desc *desc)
+{
+ return !(desc->status_use_accessors & _IRQ_NOPROBE);
+}
+
+static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
+{
+ desc->status_use_accessors &= ~_IRQ_NOPROBE;
+}
+
+static inline void irq_settings_set_noprobe(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NOPROBE;
+}
+
+static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
+}
+
+static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
+{
+ return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
+}
+
+static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NESTED_THREAD;
+}
+
+static inline bool irq_settings_is_polled(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_IS_POLLED;
+}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 088dabbf2d6..e2514b0e439 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -12,83 +12,174 @@
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/timer.h>
+
+#include "internals.h"
static int irqfixup __read_mostly;
+#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
+static void poll_spurious_irqs(unsigned long dummy);
+static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
+static int irq_poll_cpu;
+static atomic_t irq_poll_active;
+
+/*
+ * We wait here for a poller to finish.
+ *
+ * If the poll runs on this CPU, then we yell loudly and return
+ * false. That will leave the interrupt line disabled in the worst
+ * case, but it should never happen.
+ *
+ * We wait until the poller is done and then recheck disabled and
+ * action (about to be disabled). Only if it's still active, we return
+ * true and let the handler run.
+ */
+bool irq_wait_for_poll(struct irq_desc *desc)
+{
+ if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+ "irq poll in progress on cpu %d for irq %d\n",
+ smp_processor_id(), desc->irq_data.irq))
+ return false;
+
+#ifdef CONFIG_SMP
+ do {
+ raw_spin_unlock(&desc->lock);
+ while (irqd_irq_inprogress(&desc->irq_data))
+ cpu_relax();
+ raw_spin_lock(&desc->lock);
+ } while (irqd_irq_inprogress(&desc->irq_data));
+ /* Might have been disabled in meantime */
+ return !irqd_irq_disabled(&desc->irq_data) && desc->action;
+#else
+ return false;
+#endif
+}
+
+
/*
* Recovery handler for misrouted interrupts.
*/
-static int misrouted_irq(int irq)
+static int try_one_irq(int irq, struct irq_desc *desc, bool force)
{
- int i;
- int ok = 0;
- int work = 0; /* Did we do work for a real IRQ */
+ irqreturn_t ret = IRQ_NONE;
+ struct irqaction *action;
- for (i = 1; i < NR_IRQS; i++) {
- struct irq_desc *desc = irq_desc + i;
- struct irqaction *action;
+ raw_spin_lock(&desc->lock);
- if (i == irq) /* Already tried */
- continue;
+ /*
+ * PER_CPU, nested thread interrupts and interrupts explicitely
+ * marked polled are excluded from polling.
+ */
+ if (irq_settings_is_per_cpu(desc) ||
+ irq_settings_is_nested_thread(desc) ||
+ irq_settings_is_polled(desc))
+ goto out;
- spin_lock(&desc->lock);
- /* Already running on another processor */
- if (desc->status & IRQ_INPROGRESS) {
- /*
- * Already running: If it is shared get the other
- * CPU to go looking for our mystery interrupt too
- */
- if (desc->action && (desc->action->flags & IRQF_SHARED))
- desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
- continue;
- }
- /* Honour the normal IRQ locking */
- desc->status |= IRQ_INPROGRESS;
- action = desc->action;
- spin_unlock(&desc->lock);
-
- while (action) {
- /* Only shared IRQ handlers are safe to call */
- if (action->flags & IRQF_SHARED) {
- if (action->handler(i, action->dev_id) ==
- IRQ_HANDLED)
- ok = 1;
- }
- action = action->next;
- }
- local_irq_disable();
- /* Now clean up the flags */
- spin_lock(&desc->lock);
- action = desc->action;
+ /*
+ * Do not poll disabled interrupts unless the spurious
+ * disabled poller asks explicitely.
+ */
+ if (irqd_irq_disabled(&desc->irq_data) && !force)
+ goto out;
+ /*
+ * All handlers must agree on IRQF_SHARED, so we test just the
+ * first.
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+ (action->flags & __IRQF_TIMER))
+ goto out;
+
+ /* Already running on another processor */
+ if (irqd_irq_inprogress(&desc->irq_data)) {
/*
- * While we were looking for a fixup someone queued a real
- * IRQ clashing with our walk:
- */
- while ((desc->status & IRQ_PENDING) && action) {
- /*
- * Perform real IRQ processing for the IRQ we deferred
- */
- work = 1;
- spin_unlock(&desc->lock);
- handle_IRQ_event(i, action);
- spin_lock(&desc->lock);
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
- /*
- * If we did actual work for the real IRQ line we must let the
- * IRQ controller clean up too
+ * Already running: If it is shared get the other
+ * CPU to go looking for our mystery interrupt too
*/
- if (work && desc->chip && desc->chip->end)
- desc->chip->end(i);
- spin_unlock(&desc->lock);
+ desc->istate |= IRQS_PENDING;
+ goto out;
+ }
+
+ /* Mark it poll in progress */
+ desc->istate |= IRQS_POLL_INPROGRESS;
+ do {
+ if (handle_irq_event(desc) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ /* Make sure that there is still a valid action */
+ action = desc->action;
+ } while ((desc->istate & IRQS_PENDING) && action);
+ desc->istate &= ~IRQS_POLL_INPROGRESS;
+out:
+ raw_spin_unlock(&desc->lock);
+ return ret == IRQ_HANDLED;
+}
+
+static int misrouted_irq(int irq)
+{
+ struct irq_desc *desc;
+ int i, ok = 0;
+
+ if (atomic_inc_return(&irq_poll_active) != 1)
+ goto out;
+
+ irq_poll_cpu = smp_processor_id();
+
+ for_each_irq_desc(i, desc) {
+ if (!i)
+ continue;
+
+ if (i == irq) /* Already tried */
+ continue;
+
+ if (try_one_irq(i, desc, false))
+ ok = 1;
}
+out:
+ atomic_dec(&irq_poll_active);
/* So the caller can adjust the irq error counts */
return ok;
}
+static void poll_spurious_irqs(unsigned long dummy)
+{
+ struct irq_desc *desc;
+ int i;
+
+ if (atomic_inc_return(&irq_poll_active) != 1)
+ goto out;
+ irq_poll_cpu = smp_processor_id();
+
+ for_each_irq_desc(i, desc) {
+ unsigned int state;
+
+ if (!i)
+ continue;
+
+ /* Racy but it doesn't matter */
+ state = desc->istate;
+ barrier();
+ if (!(state & IRQS_SPURIOUS_DISABLED))
+ continue;
+
+ local_irq_disable();
+ try_one_irq(i, desc, true);
+ local_irq_enable();
+ }
+out:
+ atomic_dec(&irq_poll_active);
+ mod_timer(&poll_spurious_irq_timer,
+ jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+}
+
+static inline int bad_action_ret(irqreturn_t action_ret)
+{
+ if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
+ return 0;
+ return 1;
+}
+
/*
* If 99,900 of the previous 100,000 interrupts have not been handled
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -96,17 +187,15 @@ static int misrouted_irq(int irq)
*
* (The other 100-of-100,000 interrupts may have been a correctly
* functioning device sharing an IRQ with the failing one)
- *
- * Called under desc->lock
*/
-
static void
__report_bad_irq(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
struct irqaction *action;
+ unsigned long flags;
- if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+ if (bad_action_ret(action_ret)) {
printk(KERN_ERR "irq event %d: bogus return value %x\n",
irq, action_ret);
} else {
@@ -116,14 +205,23 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
dump_stack();
printk(KERN_ERR "handlers:\n");
+ /*
+ * We need to take desc->lock here. note_interrupt() is called
+ * w/o desc->lock held, but IRQ_PROGRESS set. We might race
+ * with something else removing an action. It's ok to take
+ * desc->lock here. See synchronize_irq().
+ */
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
while (action) {
- printk(KERN_ERR "[<%p>]", action->handler);
- print_symbol(" (%s)",
- (unsigned long)action->handler);
- printk("\n");
+ printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
+ if (action->thread_fn)
+ printk(KERN_CONT " threaded [<%p>] %pf",
+ action->thread_fn, action->thread_fn);
+ printk(KERN_CONT "\n");
action = action->next;
}
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static void
@@ -137,7 +235,9 @@ report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
}
}
-static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+static inline int
+try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret)
{
struct irqaction *action;
@@ -170,14 +270,125 @@ static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irq
return action && (action->flags & IRQF_IRQPOLL);
}
+#define SPURIOUS_DEFERRED 0x80000000
+
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
- if (unlikely(action_ret != IRQ_HANDLED)) {
+ if (desc->istate & IRQS_POLL_INPROGRESS ||
+ irq_settings_is_polled(desc))
+ return;
+
+ if (bad_action_ret(action_ret)) {
+ report_bad_irq(irq, desc, action_ret);
+ return;
+ }
+
+ /*
+ * We cannot call note_interrupt from the threaded handler
+ * because we need to look at the compound of all handlers
+ * (primary and threaded). Aside of that in the threaded
+ * shared case we have no serialization against an incoming
+ * hardware interrupt while we are dealing with a threaded
+ * result.
+ *
+ * So in case a thread is woken, we just note the fact and
+ * defer the analysis to the next hardware interrupt.
+ *
+ * The threaded handlers store whether they sucessfully
+ * handled an interrupt and we check whether that number
+ * changed versus the last invocation.
+ *
+ * We could handle all interrupts with the delayed by one
+ * mechanism, but for the non forced threaded case we'd just
+ * add pointless overhead to the straight hardirq interrupts
+ * for the sake of a few lines less code.
+ */
+ if (action_ret & IRQ_WAKE_THREAD) {
+ /*
+ * There is a thread woken. Check whether one of the
+ * shared primary handlers returned IRQ_HANDLED. If
+ * not we defer the spurious detection to the next
+ * interrupt.
+ */
+ if (action_ret == IRQ_WAKE_THREAD) {
+ int handled;
+ /*
+ * We use bit 31 of thread_handled_last to
+ * denote the deferred spurious detection
+ * active. No locking necessary as
+ * thread_handled_last is only accessed here
+ * and we have the guarantee that hard
+ * interrupts are not reentrant.
+ */
+ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
+ desc->threads_handled_last |= SPURIOUS_DEFERRED;
+ return;
+ }
+ /*
+ * Check whether one of the threaded handlers
+ * returned IRQ_HANDLED since the last
+ * interrupt happened.
+ *
+ * For simplicity we just set bit 31, as it is
+ * set in threads_handled_last as well. So we
+ * avoid extra masking. And we really do not
+ * care about the high bits of the handled
+ * count. We just care about the count being
+ * different than the one we saw before.
+ */
+ handled = atomic_read(&desc->threads_handled);
+ handled |= SPURIOUS_DEFERRED;
+ if (handled != desc->threads_handled_last) {
+ action_ret = IRQ_HANDLED;
+ /*
+ * Note: We keep the SPURIOUS_DEFERRED
+ * bit set. We are handling the
+ * previous invocation right now.
+ * Keep it for the current one, so the
+ * next hardware interrupt will
+ * account for it.
+ */
+ desc->threads_handled_last = handled;
+ } else {
+ /*
+ * None of the threaded handlers felt
+ * responsible for the last interrupt
+ *
+ * We keep the SPURIOUS_DEFERRED bit
+ * set in threads_handled_last as we
+ * need to account for the current
+ * interrupt as well.
+ */
+ action_ret = IRQ_NONE;
+ }
+ } else {
+ /*
+ * One of the primary handlers returned
+ * IRQ_HANDLED. So we don't care about the
+ * threaded handlers on the same line. Clear
+ * the deferred detection bit.
+ *
+ * In theory we could/should check whether the
+ * deferred bit is set and take the result of
+ * the previous run into account here as
+ * well. But it's really not worth the
+ * trouble. If every other interrupt is
+ * handled we never trigger the spurious
+ * detector. And if this is just the one out
+ * of 100k unhandled ones which is handled
+ * then we merily delay the spurious detection
+ * by one hard interrupt. Not a real problem.
+ */
+ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
+ }
+ }
+
+ if (unlikely(action_ret == IRQ_NONE)) {
/*
* If we are seeing only the odd spurious IRQ caused by
* bus asynchronicity then don't eventually trigger an error,
- * otherwise the couter becomes a doomsday timer for otherwise
+ * otherwise the counter becomes a doomsday timer for otherwise
* working systems
*/
if (time_after(jiffies, desc->last_unhandled + HZ/10))
@@ -185,8 +396,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
else
desc->irqs_unhandled++;
desc->last_unhandled = jiffies;
- if (unlikely(action_ret != IRQ_NONE))
- report_bad_irq(irq, desc, action_ret);
}
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
@@ -209,14 +418,17 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
* Now kill the IRQ
*/
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
- desc->status |= IRQ_DISABLED;
- desc->depth = 1;
- desc->chip->disable(irq);
+ desc->istate |= IRQS_SPURIOUS_DISABLED;
+ desc->depth++;
+ irq_disable(desc);
+
+ mod_timer(&poll_spurious_irq_timer,
+ jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
desc->irqs_unhandled = 0;
}
-int noirqdebug __read_mostly;
+bool noirqdebug __read_mostly;
int noirqdebug_setup(char *str)
{
@@ -241,7 +453,6 @@ static int __init irqfixup_setup(char *str)
__setup("irqfixup", irqfixup_setup);
module_param(irqfixup, int, 0644);
-MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode 2: irqpoll mode");
static int __init irqpoll_setup(char *str)
{