diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 49 | 
1 files changed, 21 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 57d286a78f8..248ee7e5beb 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -304,7 +304,7 @@ void notrace restore_interrupts(void)   * being re-enabled and generally sanitized the lazy irq state,   * and in the latter case it will leave with interrupts hard   * disabled and marked as such, so the local_irq_enable() call - * in cpu_idle() will properly re-enable everything. + * in arch_cpu_idle() will properly re-enable everything.   */  bool prep_irq_for_idle(void)  { @@ -354,8 +354,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)  	seq_printf(p, "%*s: ", prec, "LOC");  	for_each_online_cpu(j) -		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); -        seq_printf(p, "  Local timer interrupts\n"); +		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); +        seq_printf(p, "  Local timer interrupts for timer event device\n"); + +	seq_printf(p, "%*s: ", prec, "LOC"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); +        seq_printf(p, "  Local timer interrupts for others\n");  	seq_printf(p, "%*s: ", prec, "SPU");  	for_each_online_cpu(j) @@ -389,11 +394,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)   */  u64 arch_irq_stat_cpu(unsigned int cpu)  { -	u64 sum = per_cpu(irq_stat, cpu).timer_irqs; +	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;  	sum += per_cpu(irq_stat, cpu).pmu_irqs;  	sum += per_cpu(irq_stat, cpu).mce_exceptions;  	sum += per_cpu(irq_stat, cpu).spurious_irqs; +	sum += per_cpu(irq_stat, cpu).timer_irqs_others;  #ifdef CONFIG_PPC_DOORBELL  	sum += per_cpu(irq_stat, cpu).doorbell_irqs;  #endif @@ -459,7 +465,6 @@ static inline void check_stack_overflow(void)  void __do_irq(struct pt_regs *regs)  { -	struct irq_desc *desc;  	unsigned int irq;  	irq_enter(); @@ -481,11 +486,8 @@ void __do_irq(struct pt_regs *regs)  	/* And finally process it */  	if (unlikely(irq == NO_IRQ))  		__get_cpu_var(irq_stat).spurious_irqs++; -	else { -		desc = irq_to_desc(irq); -		if (likely(desc)) -			desc->handle_irq(irq, desc); -	} +	else +		generic_handle_irq(irq);  	trace_irq_exit(regs); @@ -495,14 +497,15 @@ void __do_irq(struct pt_regs *regs)  void do_IRQ(struct pt_regs *regs)  {  	struct pt_regs *old_regs = set_irq_regs(regs); -	struct thread_info *curtp, *irqtp; +	struct thread_info *curtp, *irqtp, *sirqtp;  	/* Switch to the irq stack to handle this */  	curtp = current_thread_info();  	irqtp = hardirq_ctx[raw_smp_processor_id()]; +	sirqtp = softirq_ctx[raw_smp_processor_id()];  	/* Already there ? */ -	if (unlikely(curtp == irqtp)) { +	if (unlikely(curtp == irqtp || curtp == sirqtp)) {  		__do_irq(regs);  		set_irq_regs(old_regs);  		return; @@ -552,8 +555,13 @@ void exc_lvl_ctx_init(void)  #ifdef CONFIG_PPC64  		cpu_nr = i;  #else +#ifdef CONFIG_SMP  		cpu_nr = get_hard_smp_processor_id(i); +#else +		cpu_nr = 0; +#endif  #endif +  		memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);  		tp = critirq_ctx[cpu_nr];  		tp->cpu = cpu_nr; @@ -593,7 +601,7 @@ void irq_ctx_init(void)  	}  } -static inline void do_softirq_onstack(void) +void do_softirq_own_stack(void)  {  	struct thread_info *curtp, *irqtp; @@ -611,21 +619,6 @@ static inline void do_softirq_onstack(void)  		set_bits(irqtp->flags, &curtp->flags);  } -void do_softirq(void) -{ -	unsigned long flags; - -	if (in_interrupt()) -		return; - -	local_irq_save(flags); - -	if (local_softirq_pending()) -		do_softirq_onstack(); - -	local_irq_restore(flags); -} -  irq_hw_number_t virq_to_hw(unsigned int virq)  {  	struct irq_data *irq_data = irq_get_irq_data(virq);  | 
