diff options
Diffstat (limited to 'arch/parisc/kernel/irq.c')
| -rw-r--r-- | arch/parisc/kernel/irq.c | 278 | 
1 files changed, 229 insertions, 49 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 5024f643b3b..cfe056fe7f5 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -27,11 +27,11 @@  #include <linux/interrupt.h>  #include <linux/kernel_stat.h>  #include <linux/seq_file.h> -#include <linux/spinlock.h>  #include <linux/types.h>  #include <asm/io.h>  #include <asm/smp.h> +#include <asm/ldcw.h>  #undef PARISC_IRQ_CR16_COUNTS @@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;  */  static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; -static void cpu_mask_irq(unsigned int irq) +static void cpu_mask_irq(struct irq_data *d)  { -	unsigned long eirr_bit = EIEM_MASK(irq); +	unsigned long eirr_bit = EIEM_MASK(d->irq);  	cpu_eiem &= ~eirr_bit;  	/* Do nothing on the other CPUs.  If they get this interrupt, @@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq)  	 * then gets disabled */  } -static void cpu_unmask_irq(unsigned int irq) +static void __cpu_unmask_irq(unsigned int irq)  {  	unsigned long eirr_bit = EIEM_MASK(irq); @@ -75,12 +75,14 @@ static void cpu_unmask_irq(unsigned int irq)  	smp_send_all_nop();  } -void no_ack_irq(unsigned int irq) { } -void no_end_irq(unsigned int irq) { } +static void cpu_unmask_irq(struct irq_data *d) +{ +	__cpu_unmask_irq(d->irq); +} -void cpu_ack_irq(unsigned int irq) +void cpu_ack_irq(struct irq_data *d)  { -	unsigned long mask = EIEM_MASK(irq); +	unsigned long mask = EIEM_MASK(d->irq);  	int cpu = smp_processor_id();  	/* Clear in EIEM so we can no longer process */ @@ -93,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)  	mtctl(mask, 23);  } -void cpu_eoi_irq(unsigned int irq) +void cpu_eoi_irq(struct irq_data *d)  { -	unsigned long mask = EIEM_MASK(irq); +	unsigned long mask = EIEM_MASK(d->irq);  	int cpu = smp_processor_id();  	/* set it in the eiems---it's no longer in process */ @@ -106,53 +108,93 @@ void cpu_eoi_irq(unsigned int irq)  }  #ifdef CONFIG_SMP -int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) +int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)  {  	int cpu_dest;  	/* timer and ipi have to always be received on all CPUs */ -	if (CHECK_IRQ_PER_CPU(irq)) { -		/* Bad linux design decision.  The mask has already -		 * been set; we must reset it */ -		cpumask_setall(irq_desc[irq].affinity); +	if (irqd_is_per_cpu(d))  		return -EINVAL; -	}  	/* whatever mask they set, we just allow one CPU */ -	cpu_dest = first_cpu(*dest); +	cpu_dest = cpumask_first_and(dest, cpu_online_mask);  	return cpu_dest;  } -static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) +static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, +				bool force)  {  	int cpu_dest; -	cpu_dest = cpu_check_affinity(irq, dest); +	cpu_dest = cpu_check_affinity(d, dest);  	if (cpu_dest < 0)  		return -1; -	cpumask_copy(irq_desc[irq].affinity, dest); +	cpumask_copy(d->affinity, dest);  	return 0;  }  #endif  static struct irq_chip cpu_interrupt_type = { -	.name		= "CPU", -	.mask		= cpu_mask_irq, -	.unmask		= cpu_unmask_irq, -	.ack		= cpu_ack_irq, -	.eoi		= cpu_eoi_irq, +	.name			= "CPU", +	.irq_mask		= cpu_mask_irq, +	.irq_unmask		= cpu_unmask_irq, +	.irq_ack		= cpu_ack_irq, +	.irq_eoi		= cpu_eoi_irq,  #ifdef CONFIG_SMP -	.set_affinity	= cpu_set_affinity_irq, +	.irq_set_affinity	= cpu_set_affinity_irq,  #endif  	/* XXX: Needs to be written.  We managed without it so far, but  	 * we really ought to write it.  	 */ -	.retrigger	= NULL, +	.irq_retrigger	= NULL,  }; +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +#define irq_stats(x)		(&per_cpu(irq_stat, x)) + +/* + * /proc/interrupts printing for arch specific interrupts + */ +int arch_show_interrupts(struct seq_file *p, int prec) +{ +	int j; + +#ifdef CONFIG_DEBUG_STACKOVERFLOW +	seq_printf(p, "%*s: ", prec, "STK"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); +	seq_puts(p, "  Kernel stack usage\n"); +# ifdef CONFIG_IRQSTACKS +	seq_printf(p, "%*s: ", prec, "IST"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); +	seq_puts(p, "  Interrupt stack usage\n"); +# endif +#endif +#ifdef CONFIG_SMP +	seq_printf(p, "%*s: ", prec, "RES"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); +	seq_puts(p, "  Rescheduling interrupts\n"); +#endif +	seq_printf(p, "%*s: ", prec, "UAH"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count); +	seq_puts(p, "  Unaligned access handler traps\n"); +	seq_printf(p, "%*s: ", prec, "FPA"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count); +	seq_puts(p, "  Floating point assist traps\n"); +	seq_printf(p, "%*s: ", prec, "TLB"); +	for_each_online_cpu(j) +		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); +	seq_puts(p, "  TLB shootdowns\n"); +	return 0; +} +  int show_interrupts(struct seq_file *p, void *v)  {  	int i = *(loff_t *) v, j; @@ -170,10 +212,11 @@ int show_interrupts(struct seq_file *p, void *v)  	}  	if (i < NR_IRQS) { +		struct irq_desc *desc = irq_to_desc(i);  		struct irqaction *action; -		raw_spin_lock_irqsave(&irq_desc[i].lock, flags); -		action = irq_desc[i].action; +		raw_spin_lock_irqsave(&desc->lock, flags); +		action = desc->action;  		if (!action)  			goto skip;  		seq_printf(p, "%3d: ", i); @@ -184,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v)  		seq_printf(p, "%10u ", kstat_irqs(i));  #endif -		seq_printf(p, " %14s", irq_desc[i].chip->name); +		seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);  #ifndef PARISC_IRQ_CR16_COUNTS  		seq_printf(p, "  %s", action->name); @@ -216,9 +259,12 @@ int show_interrupts(struct seq_file *p, void *v)  		seq_putc(p, '\n');   skip: -		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); +		raw_spin_unlock_irqrestore(&desc->lock, flags);  	} +	if (i == NR_IRQS) +		arch_show_interrupts(p, 3); +  	return 0;  } @@ -234,16 +280,16 @@ int show_interrupts(struct seq_file *p, void *v)  int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)  { -	if (irq_desc[irq].action) +	if (irq_has_action(irq))  		return -EBUSY; -	if (irq_desc[irq].chip != &cpu_interrupt_type) +	if (irq_get_chip(irq) != &cpu_interrupt_type)  		return -EBUSY;  	/* for iosapic interrupts */  	if (type) { -		set_irq_chip_and_handler(irq, type, handle_level_irq); -		set_irq_chip_data(irq, data); -		cpu_unmask_irq(irq); +		irq_set_chip_and_handler(irq, type, handle_percpu_irq); +		irq_set_chip_data(irq, data); +		__cpu_unmask_irq(irq);  	}  	return 0;  } @@ -292,7 +338,8 @@ int txn_alloc_irq(unsigned int bits_wide)  unsigned long txn_affinity_addr(unsigned int irq, int cpu)  {  #ifdef CONFIG_SMP -	cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); +	struct irq_data *d = irq_get_irq_data(irq); +	cpumask_copy(d->affinity, cpumask_of(cpu));  #endif  	return per_cpu(cpu_data, cpu).txn_addr; @@ -329,6 +376,131 @@ static inline int eirr_to_irq(unsigned long eirr)  	return (BITS_PER_LONG - bit) + TIMER_IRQ;  } +#ifdef CONFIG_IRQSTACKS +/* + * IRQ STACK - used for irq handler + */ +#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */ + +union irq_stack_union { +	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; +	volatile unsigned int slock[4]; +	volatile unsigned int lock[1]; +}; + +DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { +		.slock = { 1,1,1,1 }, +	}; +#endif + + +int sysctl_panic_on_stackoverflow = 1; + +static inline void stack_overflow_check(struct pt_regs *regs) +{ +#ifdef CONFIG_DEBUG_STACKOVERFLOW +	#define STACK_MARGIN	(256*6) + +	/* Our stack starts directly behind the thread_info struct. */ +	unsigned long stack_start = (unsigned long) current_thread_info(); +	unsigned long sp = regs->gr[30]; +	unsigned long stack_usage; +	unsigned int *last_usage; +	int cpu = smp_processor_id(); + +	/* if sr7 != 0, we interrupted a userspace process which we do not want +	 * to check for stack overflow. We will only check the kernel stack. */ +	if (regs->sr[7]) +		return; + +	/* calculate kernel stack usage */ +	stack_usage = sp - stack_start; +#ifdef CONFIG_IRQSTACKS +	if (likely(stack_usage <= THREAD_SIZE)) +		goto check_kernel_stack; /* found kernel stack */ + +	/* check irq stack usage */ +	stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; +	stack_usage = sp - stack_start; + +	last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); +	if (unlikely(stack_usage > *last_usage)) +		*last_usage = stack_usage; + +	if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) +		return; + +	pr_emerg("stackcheck: %s will most likely overflow irq stack " +		 "(sp:%lx, stk bottom-top:%lx-%lx)\n", +		current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); +	goto panic_check; + +check_kernel_stack: +#endif + +	/* check kernel stack usage */ +	last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); + +	if (unlikely(stack_usage > *last_usage)) +		*last_usage = stack_usage; + +	if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN))) +		return; + +	pr_emerg("stackcheck: %s will most likely overflow kernel stack " +		 "(sp:%lx, stk bottom-top:%lx-%lx)\n", +		current->comm, sp, stack_start, stack_start + THREAD_SIZE); + +#ifdef CONFIG_IRQSTACKS +panic_check: +#endif +	if (sysctl_panic_on_stackoverflow) +		panic("low stack detected by irq handler - check messages\n"); +#endif +} + +#ifdef CONFIG_IRQSTACKS +/* in entry.S: */ +void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); + +static void execute_on_irq_stack(void *func, unsigned long param1) +{ +	union irq_stack_union *union_ptr; +	unsigned long irq_stack; +	volatile unsigned int *irq_stack_in_use; + +	union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); +	irq_stack = (unsigned long) &union_ptr->stack; +	irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), +			 64); /* align for stack frame usage */ + +	/* We may be called recursive. If we are already using the irq stack, +	 * just continue to use it. Use spinlocks to serialize +	 * the irq stack usage. +	 */ +	irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr); +	if (!__ldcw(irq_stack_in_use)) { +		void (*direct_call)(unsigned long p1) = func; + +		/* We are using the IRQ stack already. +		 * Do direct call on current stack. */ +		direct_call(param1); +		return; +	} + +	/* This is where we switch to the IRQ stack. */ +	call_on_stack(param1, func, irq_stack); + +	/* free up irq stack usage. */ +	*irq_stack_in_use = 1; +} + +void do_softirq_own_stack(void) +{ +	execute_on_irq_stack(__do_softirq, 0); +} +#endif /* CONFIG_IRQSTACKS */ +  /* ONLY called from entry.S:intr_extint() */  void do_cpu_irq_mask(struct pt_regs *regs)  { @@ -336,6 +508,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)  	unsigned long eirr_val;  	int irq, cpu = smp_processor_id();  #ifdef CONFIG_SMP +	struct irq_desc *desc;  	cpumask_t dest;  #endif @@ -349,8 +522,9 @@ void do_cpu_irq_mask(struct pt_regs *regs)  	irq = eirr_to_irq(eirr_val);  #ifdef CONFIG_SMP -	cpumask_copy(&dest, irq_desc[irq].affinity); -	if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && +	desc = irq_to_desc(irq); +	cpumask_copy(&dest, desc->irq_data.affinity); +	if (irqd_is_per_cpu(&desc->irq_data) &&  	    !cpu_isset(smp_processor_id(), dest)) {  		int cpu = first_cpu(dest); @@ -361,7 +535,13 @@ void do_cpu_irq_mask(struct pt_regs *regs)  		goto set_out;  	}  #endif +	stack_overflow_check(regs); + +#ifdef CONFIG_IRQSTACKS +	execute_on_irq_stack(&generic_handle_irq, irq); +#else  	generic_handle_irq(irq); +#endif /* CONFIG_IRQSTACKS */   out:  	irq_exit(); @@ -376,14 +556,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)  static struct irqaction timer_action = {  	.handler = timer_interrupt,  	.name = "timer", -	.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, +	.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,  };  #ifdef CONFIG_SMP  static struct irqaction ipi_action = {  	.handler = ipi_interrupt,  	.name = "IPI", -	.flags = IRQF_DISABLED | IRQF_PERCPU, +	.flags = IRQF_PERCPU,  };  #endif @@ -391,14 +571,14 @@ static void claim_cpu_irqs(void)  {  	int i;  	for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { -		set_irq_chip_and_handler(i, &cpu_interrupt_type, -			handle_level_irq); +		irq_set_chip_and_handler(i, &cpu_interrupt_type, +					 handle_percpu_irq);  	} -	set_irq_handler(TIMER_IRQ, handle_percpu_irq); +	irq_set_handler(TIMER_IRQ, handle_percpu_irq);  	setup_irq(TIMER_IRQ, &timer_action);  #ifdef CONFIG_SMP -	set_irq_handler(IPI_IRQ, handle_percpu_irq); +	irq_set_handler(IPI_IRQ, handle_percpu_irq);  	setup_irq(IPI_IRQ, &ipi_action);  #endif  } @@ -407,14 +587,14 @@ void __init init_IRQ(void)  {  	local_irq_disable();	/* PARANOID - should already be disabled */  	mtctl(~0UL, 23);	/* EIRR : clear all pending external intr */ -	claim_cpu_irqs();  #ifdef CONFIG_SMP -	if (!cpu_eiem) +	if (!cpu_eiem) { +		claim_cpu_irqs();  		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); +	}  #else +	claim_cpu_irqs();  	cpu_eiem = EIEM_MASK(TIMER_IRQ);  #endif          set_eiem(cpu_eiem);	/* EIEM : enable all external intr */ -  } -  | 
