aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/irq.c')
-rw-r--r--arch/arm64/kernel/irq.c84
1 files changed, 74 insertions, 10 deletions
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0373c6609ea..0f08dfd69eb 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -25,7 +25,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/of_irq.h>
+#include <linux/irqchip.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
@@ -67,18 +67,82 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-/*
- * Interrupt controllers supported by the kernel.
- */
-static const struct of_device_id intctrl_of_match[] __initconst = {
- /* IRQ controllers { .compatible, .data } info to go here */
- {}
-};
+void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+ if (handle_arch_irq)
+ return;
+
+ handle_arch_irq = handle_irq;
+}
void __init init_IRQ(void)
{
- of_irq_init(intctrl_of_match);
-
+ irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+ ret = true;
+
+ /*
+ * when using forced irq_set_affinity we must ensure that the cpu
+ * being offlined is not present in the affinity mask, it may be
+ * selected as the target CPU otherwise
+ */
+ affinity = cpu_online_mask;
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */