diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 133 | 
1 files changed, 57 insertions, 76 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c69440cef7a..248ee7e5beb 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -304,7 +304,7 @@ void notrace restore_interrupts(void)   * being re-enabled and generally sanitized the lazy irq state,   * and in the latter case it will leave with interrupts hard   * disabled and marked as such, so the local_irq_enable() call - * in cpu_idle() will properly re-enable everything. + * in arch_cpu_idle() will properly re-enable everything.   */  bool prep_irq_for_idle(void)  { @@ -354,8 +354,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)  	seq_printf(p, "%*s: ", prec, "LOC");  	for_each_online_cpu(j) -		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); -        seq_printf(p, "  Local timer interrupts\n"); +		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); +        seq_printf(p, "  Local timer interrupts for timer event device\n"); + +	seq_printf(p, "%*s: ", prec, "LOC"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); +        seq_printf(p, "  Local timer interrupts for others\n");  	seq_printf(p, "%*s: ", prec, "SPU");  	for_each_online_cpu(j) @@ -389,11 +394,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)   */  u64 arch_irq_stat_cpu(unsigned int cpu)  { -	u64 sum = per_cpu(irq_stat, cpu).timer_irqs; +	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;  	sum += per_cpu(irq_stat, cpu).pmu_irqs;  	sum += per_cpu(irq_stat, cpu).mce_exceptions;  	sum += per_cpu(irq_stat, cpu).spurious_irqs; +	sum += per_cpu(irq_stat, cpu).timer_irqs_others;  #ifdef CONFIG_PPC_DOORBELL  	sum += per_cpu(irq_stat, cpu).doorbell_irqs;  #endif @@ -441,50 +447,6 @@ void migrate_irqs(void)  }  #endif -static inline void handle_one_irq(unsigned int irq) -{ -	struct thread_info *curtp, *irqtp; -	unsigned long saved_sp_limit; -	struct irq_desc *desc; - -	desc = irq_to_desc(irq); -	if (!desc) -		return; - -	/* Switch to the irq stack to handle this */ -	curtp = current_thread_info(); -	irqtp = hardirq_ctx[smp_processor_id()]; - -	if (curtp == irqtp) { -		/* We're already on the irq stack, just handle it */ -		desc->handle_irq(irq, desc); -		return; -	} - -	saved_sp_limit = current->thread.ksp_limit; - -	irqtp->task = curtp->task; -	irqtp->flags = 0; - -	/* Copy the softirq bits in preempt_count so that the -	 * softirq checks work in the hardirq context. */ -	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | -			       (curtp->preempt_count & SOFTIRQ_MASK); - -	current->thread.ksp_limit = (unsigned long)irqtp + -		_ALIGN_UP(sizeof(struct thread_info), 16); - -	call_handle_irq(irq, desc, irqtp, desc->handle_irq); -	current->thread.ksp_limit = saved_sp_limit; -	irqtp->task = NULL; - -	/* Set any flag that may have been set on the -	 * alternate stack -	 */ -	if (irqtp->flags) -		set_bits(irqtp->flags, &curtp->flags); -} -  static inline void check_stack_overflow(void)  {  #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -501,9 +463,8 @@ static inline void check_stack_overflow(void)  #endif  } -void do_IRQ(struct pt_regs *regs) +void __do_irq(struct pt_regs *regs)  { -	struct pt_regs *old_regs = set_irq_regs(regs);  	unsigned int irq;  	irq_enter(); @@ -519,18 +480,54 @@ void do_IRQ(struct pt_regs *regs)  	 */  	irq = ppc_md.get_irq(); -	/* We can hard enable interrupts now */ +	/* We can hard enable interrupts now to allow perf interrupts */  	may_hard_irq_enable();  	/* And finally process it */ -	if (irq != NO_IRQ) -		handle_one_irq(irq); -	else +	if (unlikely(irq == NO_IRQ))  		__get_cpu_var(irq_stat).spurious_irqs++; +	else +		generic_handle_irq(irq);  	trace_irq_exit(regs);  	irq_exit(); +} + +void do_IRQ(struct pt_regs *regs) +{ +	struct pt_regs *old_regs = set_irq_regs(regs); +	struct thread_info *curtp, *irqtp, *sirqtp; + +	/* Switch to the irq stack to handle this */ +	curtp = current_thread_info(); +	irqtp = hardirq_ctx[raw_smp_processor_id()]; +	sirqtp = softirq_ctx[raw_smp_processor_id()]; + +	/* Already there ? */ +	if (unlikely(curtp == irqtp || curtp == sirqtp)) { +		__do_irq(regs); +		set_irq_regs(old_regs); +		return; +	} + +	/* Prepare the thread_info in the irq stack */ +	irqtp->task = curtp->task; +	irqtp->flags = 0; + +	/* Copy the preempt_count so that the [soft]irq checks work. */ +	irqtp->preempt_count = curtp->preempt_count; + +	/* Switch stack and call */ +	call_do_irq(regs, irqtp); + +	/* Restore stack limit */ +	irqtp->task = NULL; + +	/* Copy back updates to the thread_info */ +	if (irqtp->flags) +		set_bits(irqtp->flags, &curtp->flags); +  	set_irq_regs(old_regs);  } @@ -558,8 +555,13 @@ void exc_lvl_ctx_init(void)  #ifdef CONFIG_PPC64  		cpu_nr = i;  #else +#ifdef CONFIG_SMP  		cpu_nr = get_hard_smp_processor_id(i); +#else +		cpu_nr = 0;  #endif +#endif +  		memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);  		tp = critirq_ctx[cpu_nr];  		tp->cpu = cpu_nr; @@ -592,28 +594,22 @@ void irq_ctx_init(void)  		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);  		tp = softirq_ctx[i];  		tp->cpu = i; -		tp->preempt_count = 0;  		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);  		tp = hardirq_ctx[i];  		tp->cpu = i; -		tp->preempt_count = HARDIRQ_OFFSET;  	}  } -static inline void do_softirq_onstack(void) +void do_softirq_own_stack(void)  {  	struct thread_info *curtp, *irqtp; -	unsigned long saved_sp_limit = current->thread.ksp_limit;  	curtp = current_thread_info();  	irqtp = softirq_ctx[smp_processor_id()];  	irqtp->task = curtp->task;  	irqtp->flags = 0; -	current->thread.ksp_limit = (unsigned long)irqtp + -				    _ALIGN_UP(sizeof(struct thread_info), 16);  	call_do_softirq(irqtp); -	current->thread.ksp_limit = saved_sp_limit;  	irqtp->task = NULL;  	/* Set any flag that may have been set on the @@ -623,21 +619,6 @@ static inline void do_softirq_onstack(void)  		set_bits(irqtp->flags, &curtp->flags);  } -void do_softirq(void) -{ -	unsigned long flags; - -	if (in_interrupt()) -		return; - -	local_irq_save(flags); - -	if (local_softirq_pending()) -		do_softirq_onstack(); - -	local_irq_restore(flags); -} -  irq_hw_number_t virq_to_hw(unsigned int virq)  {  	struct irq_data *irq_data = irq_get_irq_data(virq);  | 
