aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq/migration.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/migration.c')
-rw-r--r--kernel/irq/migration.c65
1 files changed, 31 insertions, 34 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 77b7acc875c..ca3f4aaff70 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,75 +1,72 @@
#include <linux/irq.h>
+#include <linux/interrupt.h>
-void set_pending_irq(unsigned int irq, cpumask_t mask)
-{
- struct irq_desc *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- desc->status |= IRQ_MOVE_PENDING;
- irq_desc[irq].pending_mask = mask;
- spin_unlock_irqrestore(&desc->lock, flags);
-}
+#include "internals.h"
-void move_masked_irq(int irq)
+void irq_move_masked_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_desc + irq;
- cpumask_t tmp;
+ struct irq_desc *desc = irq_data_to_desc(idata);
+ struct irq_chip *chip = idata->chip;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
- if (CHECK_IRQ_PER_CPU(desc->status)) {
+ if (!irqd_can_balance(&desc->irq_data)) {
WARN_ON(1);
return;
}
- desc->status &= ~IRQ_MOVE_PENDING;
+ irqd_clr_move_pending(&desc->irq_data);
- if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
+ if (unlikely(cpumask_empty(desc->pending_mask)))
return;
- if (!desc->chip->set_affinity)
+ if (!chip->irq_set_affinity)
return;
- assert_spin_locked(&desc->lock);
-
- cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
- * when an active trigger is comming in. This could
+ * when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (likely(!cpus_empty(tmp))) {
- desc->chip->set_affinity(irq,tmp);
- }
- cpus_clear(irq_desc[irq].pending_mask);
+ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+ irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
+
+ cpumask_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void irq_move_irq(struct irq_data *idata)
{
- struct irq_desc *desc = irq_desc + irq;
+ bool masked;
- if (likely(!(desc->status & IRQ_MOVE_PENDING)))
+ if (likely(!irqd_is_setaffinity_pending(idata)))
return;
- if (unlikely(desc->status & IRQ_DISABLED))
+ if (unlikely(irqd_irq_disabled(idata)))
return;
- desc->chip->mask(irq);
- move_masked_irq(irq);
- desc->chip->unmask(irq);
+ /*
+ * Be careful vs. already masked interrupts. If this is a
+ * threaded interrupt with ONESHOT set, we can end up with an
+ * interrupt storm.
+ */
+ masked = irqd_irq_masked(idata);
+ if (!masked)
+ idata->chip->irq_mask(idata);
+ irq_move_masked_irq(idata);
+ if (!masked)
+ idata->chip->irq_unmask(idata);
}
-