diff options
Diffstat (limited to 'arch/mn10300/kernel/irq.c')
| -rw-r--r-- | arch/mn10300/kernel/irq.c | 225 | 
1 files changed, 55 insertions, 170 deletions
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index c2e44597c22..6ab3b73efcf 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -37,8 +37,9 @@ atomic_t irq_err_count;  /*   * MN10300 interrupt controller operations   */ -static void mn10300_cpupic_ack(unsigned int irq) +static void mn10300_cpupic_ack(struct irq_data *d)  { +	unsigned int irq = d->irq;  	unsigned long flags;  	u16 tmp; @@ -61,13 +62,14 @@ static void __mask_and_set_icr(unsigned int irq,  	arch_local_irq_restore(flags);  } -static void mn10300_cpupic_mask(unsigned int irq) +static void mn10300_cpupic_mask(struct irq_data *d)  { -	__mask_and_set_icr(irq, GxICR_LEVEL, 0); +	__mask_and_set_icr(d->irq, GxICR_LEVEL, 0);  } -static void mn10300_cpupic_mask_ack(unsigned int irq) +static void mn10300_cpupic_mask_ack(struct irq_data *d)  { +	unsigned int irq = d->irq;  #ifdef CONFIG_SMP  	unsigned long flags;  	u16 tmp; @@ -85,7 +87,7 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)  		tmp2 = GxICR(irq);  		irq_affinity_online[irq] = -			any_online_cpu(*irq_desc[irq].affinity); +			cpumask_any_and(d->affinity, cpu_online_mask);  		CROSS_GxICR(irq, irq_affinity_online[irq]) =  			(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;  		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); @@ -97,13 +99,14 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)  #endif /* CONFIG_SMP */  } -static void mn10300_cpupic_unmask(unsigned int irq) +static void mn10300_cpupic_unmask(struct irq_data *d)  { -	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE); +	__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);  } -static void mn10300_cpupic_unmask_clear(unsigned int irq) +static void mn10300_cpupic_unmask_clear(struct irq_data *d)  { +	unsigned int irq = d->irq;  	/* the MN10300 PIC latches its interrupt request bit, even after the  	 * device has ceased to assert its interrupt line and the interrupt  	 * channel has been disabled in the PIC, so for level-triggered @@ -121,7 +124,8 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)  	} else {  		tmp = GxICR(irq); -		irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity); +		irq_affinity_online[irq] = cpumask_any_and(d->affinity, +							   cpu_online_mask);  		CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;  		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);  	} @@ -134,60 +138,15 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)  #ifdef CONFIG_SMP  static int -mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask) +mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, +			   bool force)  {  	unsigned long flags; -	int err;  	flags = arch_local_cli_save(); - -	/* check irq no */ -	switch (irq) { -	case TMJCIRQ: -	case RESCHEDULE_IPI: -	case CALL_FUNC_SINGLE_IPI: -	case LOCAL_TIMER_IPI: -	case FLUSH_CACHE_IPI: -	case CALL_FUNCTION_NMI_IPI: -	case GDB_NMI_IPI: -#ifdef CONFIG_MN10300_TTYSM0 -	case SC0RXIRQ: -	case SC0TXIRQ: -#ifdef CONFIG_MN10300_TTYSM0_TIMER8 -	case TM8IRQ: -#elif CONFIG_MN10300_TTYSM0_TIMER2 -	case TM2IRQ: -#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ -#endif /* CONFIG_MN10300_TTYSM0 */ - -#ifdef CONFIG_MN10300_TTYSM1 -	case SC1RXIRQ: -	case SC1TXIRQ: -#ifdef CONFIG_MN10300_TTYSM1_TIMER12 -	case TM12IRQ: -#elif CONFIG_MN10300_TTYSM1_TIMER9 -	case TM9IRQ: -#elif CONFIG_MN10300_TTYSM1_TIMER3 -	case TM3IRQ: -#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ -#endif /* CONFIG_MN10300_TTYSM1 */ - -#ifdef CONFIG_MN10300_TTYSM2 -	case SC2RXIRQ: -	case SC2TXIRQ: -	case TM10IRQ: -#endif /* CONFIG_MN10300_TTYSM2 */ -		err = -1; -		break; - -	default: -		set_bit(irq, irq_affinity_request); -		err = 0; -		break; -	} - +	set_bit(d->irq, irq_affinity_request);  	arch_local_irq_restore(flags); -	return err; +	return 0;  }  #endif /* CONFIG_SMP */ @@ -202,15 +161,15 @@ mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)   * mask_ack() is provided), and mask_ack() just masks.   */  static struct irq_chip mn10300_cpu_pic_level = { -	.name		= "cpu_l", -	.disable	= mn10300_cpupic_mask, -	.enable		= mn10300_cpupic_unmask_clear, -	.ack		= NULL, -	.mask		= mn10300_cpupic_mask, -	.mask_ack	= mn10300_cpupic_mask, -	.unmask		= mn10300_cpupic_unmask_clear, +	.name			= "cpu_l", +	.irq_disable		= mn10300_cpupic_mask, +	.irq_enable		= mn10300_cpupic_unmask_clear, +	.irq_ack		= NULL, +	.irq_mask		= mn10300_cpupic_mask, +	.irq_mask_ack		= mn10300_cpupic_mask, +	.irq_unmask		= mn10300_cpupic_unmask_clear,  #ifdef CONFIG_SMP -	.set_affinity	= mn10300_cpupic_setaffinity, +	.irq_set_affinity	= mn10300_cpupic_setaffinity,  #endif  }; @@ -220,15 +179,15 @@ static struct irq_chip mn10300_cpu_pic_level = {   * We use the latch clearing function of the PIC as the 'ACK' function.   */  static struct irq_chip mn10300_cpu_pic_edge = { -	.name		= "cpu_e", -	.disable	= mn10300_cpupic_mask, -	.enable		= mn10300_cpupic_unmask, -	.ack		= mn10300_cpupic_ack, -	.mask		= mn10300_cpupic_mask, -	.mask_ack	= mn10300_cpupic_mask_ack, -	.unmask		= mn10300_cpupic_unmask, +	.name			= "cpu_e", +	.irq_disable		= mn10300_cpupic_mask, +	.irq_enable		= mn10300_cpupic_unmask, +	.irq_ack		= mn10300_cpupic_ack, +	.irq_mask		= mn10300_cpupic_mask, +	.irq_mask_ack		= mn10300_cpupic_mask_ack, +	.irq_unmask		= mn10300_cpupic_unmask,  #ifdef CONFIG_SMP -	.set_affinity	= mn10300_cpupic_setaffinity, +	.irq_set_affinity	= mn10300_cpupic_setaffinity,  #endif  }; @@ -252,39 +211,13 @@ void set_intr_level(int irq, u16 level)  	__mask_and_set_icr(irq, GxICR_ENABLE, level);  } -void mn10300_intc_set_level(unsigned int irq, unsigned int level) -{ -	set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL); -} - -void mn10300_intc_clear(unsigned int irq) -{ -	__mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT); -} - -void mn10300_intc_set(unsigned int irq) -{ -	__mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT); -} - -void mn10300_intc_enable(unsigned int irq) -{ -	mn10300_cpupic_unmask(irq); -} - -void mn10300_intc_disable(unsigned int irq) -{ -	mn10300_cpupic_mask(irq); -} -  /*   * mark an interrupt to be ACK'd after interrupt handlers have been run rather   * than before - * - see Documentation/mn10300/features.txt   */  void mn10300_set_lateack_irq_type(int irq)  { -	set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, +	irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,  				 handle_level_irq);  } @@ -296,12 +229,12 @@ void __init init_IRQ(void)  	int irq;  	for (irq = 0; irq < NR_IRQS; irq++) -		if (irq_desc[irq].chip == &no_irq_chip) +		if (irq_get_chip(irq) == &no_irq_chip)  			/* due to the PIC latching interrupt requests, even  			 * when the IRQ is disabled, IRQ_PENDING is superfluous  			 * and we can use handle_level_irq() for edge-triggered  			 * interrupts */ -			set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, +			irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,  						 handle_level_irq);  	unit_init_IRQ(); @@ -356,91 +289,42 @@ asmlinkage void do_IRQ(void)  /*   * Display interrupt management information through /proc/interrupts   */ -int show_interrupts(struct seq_file *p, void *v) +int arch_show_interrupts(struct seq_file *p, int prec)  { -	int i = *(loff_t *) v, j, cpu; -	struct irqaction *action; -	unsigned long flags; - -	switch (i) { -		/* display column title bar naming CPUs */ -	case 0: -		seq_printf(p, "           "); -		for (j = 0; j < NR_CPUS; j++) -			if (cpu_online(j)) -				seq_printf(p, "CPU%d       ", j); -		seq_putc(p, '\n'); -		break; - -		/* display information rows, one per active CPU */ -	case 1 ... NR_IRQS - 1: -		raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - -		action = irq_desc[i].action; -		if (action) { -			seq_printf(p, "%3d: ", i); -			for_each_present_cpu(cpu) -				seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); - -			if (i < NR_CPU_IRQS) -				seq_printf(p, " %14s.%u", -					   irq_desc[i].chip->name, -					   (GxICR(i) & GxICR_LEVEL) >> -					   GxICR_LEVEL_SHIFT); -			else -				seq_printf(p, " %14s", -					   irq_desc[i].chip->name); - -			seq_printf(p, "  %s", action->name); - -			for (action = action->next; -			     action; -			     action = action->next) -				seq_printf(p, ", %s", action->name); - -			seq_putc(p, '\n'); -		} - -		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); -		break; - -		/* polish off with NMI and error counters */ -	case NR_IRQS:  #ifdef CONFIG_MN10300_WD_TIMER -		seq_printf(p, "NMI: "); -		for (j = 0; j < NR_CPUS; j++) -			if (cpu_online(j)) -				seq_printf(p, "%10u ", nmi_count(j)); -		seq_putc(p, '\n'); -#endif +	int j; -		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); -		break; -	} +	seq_printf(p, "%*s: ", prec, "NMI"); +	for (j = 0; j < NR_CPUS; j++) +		if (cpu_online(j)) +			seq_printf(p, "%10u ", nmi_count(j)); +	seq_putc(p, '\n'); +#endif +	seq_printf(p, "%*s: ", prec, "ERR"); +	seq_printf(p, "%10u\n", atomic_read(&irq_err_count));  	return 0;  }  #ifdef CONFIG_HOTPLUG_CPU  void migrate_irqs(void)  { -	irq_desc_t *desc;  	int irq;  	unsigned int self, new;  	unsigned long flags;  	self = smp_processor_id();  	for (irq = 0; irq < NR_IRQS; irq++) { -		desc = irq_desc + irq; +		struct irq_data *data = irq_get_irq_data(irq); -		if (desc->status == IRQ_PER_CPU) +		if (irqd_is_per_cpu(data))  			continue; -		if (cpu_isset(self, irq_desc[irq].affinity) && -		    !cpus_intersects(irq_affinity[irq], cpu_online_map)) { +		if (cpumask_test_cpu(self, &data->affinity) && +		    !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {  			int cpu_id; -			cpu_id = first_cpu(cpu_online_map); -			cpu_set(cpu_id, irq_desc[irq].affinity); +			cpu_id = cpumask_first(cpu_online_mask); +			cpumask_set_cpu(cpu_id, &data->affinity);  		}  		/* We need to operate irq_affinity_online atomically. */  		arch_local_cli_save(flags); @@ -451,7 +335,8 @@ void migrate_irqs(void)  			GxICR(irq) = x & GxICR_LEVEL;  			tmp = GxICR(irq); -			new = any_online_cpu(irq_desc[irq].affinity); +			new = cpumask_any_and(&data->affinity, +					      cpu_online_mask);  			irq_affinity_online[irq] = new;  			CROSS_GxICR(irq, new) = @@ -459,7 +344,7 @@ void migrate_irqs(void)  			tmp = CROSS_GxICR(irq, new);  			x &= GxICR_LEVEL | GxICR_ENABLE; -			if (GxICR(irq) & GxICR_REQUEST) { +			if (GxICR(irq) & GxICR_REQUEST)  				x |= GxICR_REQUEST | GxICR_DETECT;  			CROSS_GxICR(irq, new) = x;  			tmp = CROSS_GxICR(irq, new);  | 
