diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-30 23:53:32 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-30 23:53:32 +0200 |
commit | 65fb0d23fcddd8697c871047b700c78817bdaa43 (patch) | |
tree | 119e6e5f276622c4c862f6c9b6d795264ba1603a /arch/parisc/kernel/irq.c | |
parent | 8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff) | |
parent | dfbbe89e197a77f2c8046a51c74e33e35f878080 (diff) |
Merge branch 'linus' into cpumask-for-linus
Conflicts:
arch/x86/kernel/cpu/common.c
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r-- | arch/parisc/kernel/irq.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 49482806863..1c740f5cbd6 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -112,7 +112,7 @@ void cpu_end_irq(unsigned int irq) } #ifdef CONFIG_SMP -int cpu_check_affinity(unsigned int irq, cpumask_t *dest) +int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) { int cpu_dest; @@ -120,23 +120,25 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - cpumask_setall(irq_desc[irq].affinity); + cpumask_setall(&irq_desc[irq].affinity); return -EINVAL; } /* whatever mask they set, we just allow one CPU */ cpu_dest = first_cpu(*dest); - *dest = cpumask_of_cpu(cpu_dest); - return 0; + return cpu_dest; } static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { - if (cpu_check_affinity(irq, dest)) + int cpu_dest; + + cpu_dest = cpu_check_affinity(irq, dest); + if (cpu_dest < 0) return; - cpumask_copy(irq_desc[irq].affinity, dest); + cpumask_copy(&irq_desc[irq].affinity, dest); } #endif @@ -183,7 +185,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #else seq_printf(p, "%10u ", kstat_irqs(i)); #endif @@ -295,7 +297,7 @@ int txn_alloc_irq(unsigned int bits_wide) unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); + cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; @@ -352,7 +354,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP - cpumask_copy(&dest, irq_desc[irq].affinity); + cpumask_copy(&dest, &irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); |