diff options
Diffstat (limited to 'kernel/irq/migration.c')
| -rw-r--r-- | kernel/irq/migration.c | 46 | 
1 files changed, 24 insertions, 22 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 1d254194048..ca3f4aaff70 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -4,23 +4,23 @@  #include "internals.h" -void move_masked_irq(int irq) +void irq_move_masked_irq(struct irq_data *idata)  { -	struct irq_desc *desc = irq_to_desc(irq); -	struct irq_chip *chip = desc->irq_data.chip; +	struct irq_desc *desc = irq_data_to_desc(idata); +	struct irq_chip *chip = idata->chip; -	if (likely(!(desc->status & IRQ_MOVE_PENDING))) +	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))  		return;  	/*  	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.  	 */ -	if (CHECK_IRQ_PER_CPU(desc->status)) { +	if (!irqd_can_balance(&desc->irq_data)) {  		WARN_ON(1);  		return;  	} -	desc->status &= ~IRQ_MOVE_PENDING; +	irqd_clr_move_pending(&desc->irq_data);  	if (unlikely(cpumask_empty(desc->pending_mask)))  		return; @@ -35,36 +35,38 @@ void move_masked_irq(int irq)  	 * do the disable, re-program, enable sequence.  	 * This is *not* particularly important for level triggered  	 * but in a edge trigger case, we might be setting rte -	 * when an active trigger is comming in. This could +	 * when an active trigger is coming in. This could  	 * cause some ioapics to mal-function.  	 * Being paranoid i guess!  	 *  	 * For correct operation this depends on the caller  	 * masking the irqs.  	 */ -	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) -		   < nr_cpu_ids)) -		if (!chip->irq_set_affinity(&desc->irq_data, -					    desc->pending_mask, false)) { -			cpumask_copy(desc->irq_data.affinity, desc->pending_mask); -			irq_set_thread_affinity(desc); -		} +	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) +		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);  	cpumask_clear(desc->pending_mask);  } -void move_native_irq(int irq) +void irq_move_irq(struct irq_data *idata)  { -	struct irq_desc *desc = irq_to_desc(irq); +	bool masked; -	if (likely(!(desc->status & IRQ_MOVE_PENDING))) +	if (likely(!irqd_is_setaffinity_pending(idata)))  		return; -	if (unlikely(desc->status & IRQ_DISABLED)) +	if (unlikely(irqd_irq_disabled(idata)))  		return; -	desc->irq_data.chip->irq_mask(&desc->irq_data); -	move_masked_irq(irq); -	desc->irq_data.chip->irq_unmask(&desc->irq_data); +	/* +	 * Be careful vs. already masked interrupts. If this is a +	 * threaded interrupt with ONESHOT set, we can end up with an +	 * interrupt storm. +	 */ +	masked = irqd_irq_masked(idata); +	if (!masked) +		idata->chip->irq_mask(idata); +	irq_move_masked_irq(idata); +	if (!masked) +		idata->chip->irq_unmask(idata);  } -  | 
