diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
| -rw-r--r-- | arch/sh/kernel/irq.c | 150 | 
1 files changed, 27 insertions, 123 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 68ecbe6c881..65a1ecd77f9 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -13,6 +13,7 @@  #include <linux/seq_file.h>  #include <linux/ftrace.h>  #include <linux/delay.h> +#include <linux/ratelimit.h>  #include <asm/processor.h>  #include <asm/machvec.h>  #include <asm/uaccess.h> @@ -34,9 +35,9 @@ void ack_bad_irq(unsigned int irq)  #if defined(CONFIG_PROC_FS)  /* - * /proc/interrupts printing: + * /proc/interrupts printing for arch specific interrupts   */ -static int show_other_interrupts(struct seq_file *p, int prec) +int arch_show_interrupts(struct seq_file *p, int prec)  {  	int j; @@ -49,63 +50,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)  	return 0;  } - -int show_interrupts(struct seq_file *p, void *v) -{ -	unsigned long flags, any_count = 0; -	int i = *(loff_t *)v, j, prec; -	struct irqaction *action; -	struct irq_desc *desc; -	struct irq_data *data; -	struct irq_chip *chip; - -	if (i > nr_irqs) -		return 0; - -	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) -		j *= 10; - -	if (i == nr_irqs) -		return show_other_interrupts(p, prec); - -	if (i == 0) { -		seq_printf(p, "%*s", prec + 8, ""); -		for_each_online_cpu(j) -			seq_printf(p, "CPU%-8d", j); -		seq_putc(p, '\n'); -	} - -	desc = irq_to_desc(i); -	if (!desc) -		return 0; - -	data = irq_get_irq_data(i); -	chip = irq_data_get_irq_chip(data); - -	raw_spin_lock_irqsave(&desc->lock, flags); -	for_each_online_cpu(j) -		any_count |= kstat_irqs_cpu(i, j); -	action = desc->action; -	if (!action && !any_count) -		goto out; - -	seq_printf(p, "%*d: ", prec, i); -	for_each_online_cpu(j) -		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); -	seq_printf(p, " %14s", chip->name); -	seq_printf(p, "-%-8s", desc->name); - -	if (action) { -		seq_printf(p, "  %s", action->name); -		while ((action = action->next) != NULL) -			seq_printf(p, ", %s", action->name); -	} - -	seq_putc(p, '\n'); -out: -	raw_spin_unlock_irqrestore(&desc->lock, flags); -	return 0; -}  #endif  #ifdef CONFIG_IRQSTACKS @@ -205,47 +149,32 @@ void irq_ctx_exit(int cpu)  	hardirq_ctx[cpu] = NULL;  } -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void)  { -	unsigned long flags;  	struct thread_info *curctx;  	union irq_ctx *irqctx;  	u32 *isp; -	if (in_interrupt()) -		return; - -	local_irq_save(flags); - -	if (local_softirq_pending()) { -		curctx = current_thread_info(); -		irqctx = softirq_ctx[smp_processor_id()]; -		irqctx->tinfo.task = curctx->task; -		irqctx->tinfo.previous_sp = current_stack_pointer; - -		/* build the stack frame on the softirq stack */ -		isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); - -		__asm__ __volatile__ ( -			"mov	r15, r9		\n" -			"jsr	@%0		\n" -			/* switch to the softirq stack */ -			" mov	%1, r15		\n" -			/* restore the thread stack */ -			"mov	r9, r15		\n" -			: /* no outputs */ -			: "r" (__do_softirq), "r" (isp) -			: "memory", "r0", "r1", "r2", "r3", "r4", -			  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" -		); - -		/* -		 * Shouldnt happen, we returned above if in_interrupt(): -		 */ -		WARN_ON_ONCE(softirq_count()); -	} - -	local_irq_restore(flags); +	curctx = current_thread_info(); +	irqctx = softirq_ctx[smp_processor_id()]; +	irqctx->tinfo.task = curctx->task; +	irqctx->tinfo.previous_sp = current_stack_pointer; + +	/* build the stack frame on the softirq stack */ +	isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + +	__asm__ __volatile__ ( +		"mov	r15, r9		\n" +		"jsr	@%0		\n" +		/* switch to the softirq stack */ +		" mov	%1, r15		\n" +		/* restore the thread stack */ +		"mov	r9, r15		\n" +		: /* no outputs */ +		: "r" (__do_softirq), "r" (isp) +		: "memory", "r0", "r1", "r2", "r3", "r4", +		  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" +	);  }  #else  static inline void handle_one_irq(unsigned int irq) @@ -287,28 +216,7 @@ void __init init_IRQ(void)  	irq_ctx_init(smp_processor_id());  } -#ifdef CONFIG_SPARSE_IRQ -int __init arch_probe_nr_irqs(void) -{ -	nr_irqs = sh_mv.mv_nr_irqs; -	return NR_IRQS_LEGACY; -} -#endif -  #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) -{ -	struct irq_desc *desc = irq_to_desc(irq); -	struct irq_chip *chip = irq_data_get_irq_chip(data); - -	printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", -	       irq, data->node, cpu); - -	raw_spin_lock_irq(&desc->lock); -	chip->irq_set_affinity(data, cpumask_of(cpu), false); -	raw_spin_unlock_irq(&desc->lock); -} -  /*   * The CPU has been marked offline.  Migrate IRQs off this CPU.  If   * the affinity settings do not allow other CPUs, force them onto any @@ -325,16 +233,12 @@ void migrate_irqs(void)  			unsigned int newcpu = cpumask_any_and(data->affinity,  							      cpu_online_mask);  			if (newcpu >= nr_cpu_ids) { -				if (printk_ratelimit()) -					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", -					       irq, cpu); +				pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", +						    irq, cpu);  				cpumask_setall(data->affinity); -				newcpu = cpumask_any_and(data->affinity, -							 cpu_online_mask);  			} - -			route_irq(data, irq, newcpu); +			irq_set_affinity(irq, data->affinity);  		}  	}  }  | 
