diff options
Diffstat (limited to 'arch/mips/sibyte/bcm1480')
| -rw-r--r-- | arch/mips/sibyte/bcm1480/Makefile | 2 | ||||
| -rw-r--r-- | arch/mips/sibyte/bcm1480/irq.c | 107 | ||||
| -rw-r--r-- | arch/mips/sibyte/bcm1480/setup.c | 3 | ||||
| -rw-r--r-- | arch/mips/sibyte/bcm1480/smp.c | 38 |
4 files changed, 55 insertions, 95 deletions
diff --git a/arch/mips/sibyte/bcm1480/Makefile b/arch/mips/sibyte/bcm1480/Makefile index f292f7df0cf..cdc4c56c3e2 100644 --- a/arch/mips/sibyte/bcm1480/Makefile +++ b/arch/mips/sibyte/bcm1480/Makefile @@ -1,5 +1,3 @@ obj-y := setup.o irq.o time.o obj-$(CONFIG_SMP) += smp.o - -EXTRA_CFLAGS += -Werror diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index 12b465d404d..373fbbc8425 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@ -19,15 +19,14 @@ #include <linux/init.h> #include <linux/linkage.h> #include <linux/interrupt.h> +#include <linux/smp.h> #include <linux/spinlock.h> #include <linux/mm.h> -#include <linux/slab.h> #include <linux/kernel_stat.h> #include <asm/errno.h> #include <asm/irq_regs.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/sibyte/bcm1480_regs.h> @@ -44,42 +43,21 @@ * for interrupt lines */ - -static void end_bcm1480_irq(unsigned int irq); -static void enable_bcm1480_irq(unsigned int irq); -static void disable_bcm1480_irq(unsigned int irq); -static void ack_bcm1480_irq(unsigned int irq); -#ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); -#endif - #ifdef CONFIG_PCI extern unsigned long ht_eoi_space; #endif -static struct irq_chip bcm1480_irq_type = { - .name = "BCM1480-IMR", - .ack = ack_bcm1480_irq, - .mask = disable_bcm1480_irq, - .mask_ack = ack_bcm1480_irq, - .unmask = enable_bcm1480_irq, - .end = end_bcm1480_irq, -#ifdef CONFIG_SMP - .set_affinity = bcm1480_set_affinity -#endif -}; - /* Store the CPU id (not the logical number) */ int bcm1480_irq_owner[BCM1480_NR_IRQS]; -DEFINE_SPINLOCK(bcm1480_imr_lock); +static DEFINE_RAW_SPINLOCK(bcm1480_imr_lock); void bcm1480_mask_irq(int cpu, int irq) { unsigned long flags, hl_spacing; u64 cur_ints; - spin_lock_irqsave(&bcm1480_imr_lock, flags); + raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); hl_spacing = 0; if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) { hl_spacing = BCM1480_IMR_HL_SPACING; @@ -88,7 +66,7 @@ void bcm1480_mask_irq(int cpu, int irq) cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); cur_ints |= (((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); - spin_unlock_irqrestore(&bcm1480_imr_lock, flags); + raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); } void bcm1480_unmask_irq(int cpu, int irq) @@ -96,7 +74,7 @@ void bcm1480_unmask_irq(int cpu, int irq) unsigned long flags, hl_spacing; u64 cur_ints; - spin_lock_irqsave(&bcm1480_imr_lock, flags); + raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); hl_spacing = 0; if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) { hl_spacing = BCM1480_IMR_HL_SPACING; @@ -105,30 +83,25 @@ void bcm1480_unmask_irq(int cpu, int irq) cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); cur_ints &= ~(((u64) 1) << irq); ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); - spin_unlock_irqrestore(&bcm1480_imr_lock, flags); + raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); } #ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) +static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask, + bool force) { + unsigned int irq_dirty, irq = d->irq; int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; - struct irq_desc *desc = irq_desc + irq; unsigned long flags; - unsigned int irq_dirty; - if (cpumask_weight(mask) != 1) { - printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return; - } - i = cpumask_first(mask); + i = cpumask_first_and(mask, cpu_online_mask); /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); /* Protect against other affinity changers and IMR manipulation */ - spin_lock_irqsave(&desc->lock, flags); - spin_lock(&bcm1480_imr_lock); + raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); /* Swizzle each CPU's IMR (but leave the IP selection alone) */ old_cpu = bcm1480_irq_owner[irq]; @@ -153,29 +126,34 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); } } - spin_unlock(&bcm1480_imr_lock); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); + + return 0; } #endif /*****************************************************************************/ -static void disable_bcm1480_irq(unsigned int irq) +static void disable_bcm1480_irq(struct irq_data *d) { + unsigned int irq = d->irq; + bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); } -static void enable_bcm1480_irq(unsigned int irq) +static void enable_bcm1480_irq(struct irq_data *d) { + unsigned int irq = d->irq; + bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq); } -static void ack_bcm1480_irq(unsigned int irq) +static void ack_bcm1480_irq(struct irq_data *d) { + unsigned int irq_dirty, irq = d->irq; u64 pending; - unsigned int irq_dirty; int k; /* @@ -222,21 +200,23 @@ static void ack_bcm1480_irq(unsigned int irq) bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); } - -static void end_bcm1480_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq); - } -} - +static struct irq_chip bcm1480_irq_type = { + .name = "BCM1480-IMR", + .irq_mask_ack = ack_bcm1480_irq, + .irq_mask = disable_bcm1480_irq, + .irq_unmask = enable_bcm1480_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = bcm1480_set_affinity +#endif +}; void __init init_bcm1480_irqs(void) { int i; for (i = 0; i < BCM1480_NR_IRQS; i++) { - set_irq_chip(i, &bcm1480_irq_type); + irq_set_chip_and_handler(i, &bcm1480_irq_type, + handle_level_irq); bcm1480_irq_owner[i] = 0; } } @@ -258,7 +238,7 @@ void __init init_bcm1480_irqs(void) * On the second cpu, everything is set to IP5, which is * ignored, EXCEPT the mailbox interrupt. That one is * set to IP[2] so it is handled. This is needed so we - * can do cross-cpu function calls, as requred by SMP + * can do cross-cpu function calls, as required by SMP */ #define IMR_IP2_VAL K_BCM1480_INT_MAP_I0 @@ -303,10 +283,10 @@ void __init arch_init_irq(void) for (cpu = 0; cpu < 4; cpu++) { __raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (K_BCM1480_INT_MBOX_0_0 << 3))); - } + } - /* Clear the mailboxes. The firmware may leave them dirty */ + /* Clear the mailboxes. The firmware may leave them dirty */ for (cpu = 0; cpu < 4; cpu++) { __raw_writeq(0xffffffffffffffffULL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU))); @@ -327,7 +307,7 @@ void __init arch_init_irq(void) /* * Note that the timer interrupts are also mapped, but this is - * done in bcm1480_time_init(). Also, the profiling driver + * done in bcm1480_time_init(). Also, the profiling driver * does its own management of IP7. */ @@ -345,7 +325,7 @@ static inline void dispatch_ip2(void) /* * Default...we've hit an IP[2] interrupt, which means we've got to - * check the 1480 interrupt registers to figure out what to do. Need + * check the 1480 interrupt registers to figure out what to do. Need * to detect which CPU we're on, now that smp_affinity is supported. */ base = A_BCM1480_IMR_MAPPER(cpu); @@ -367,19 +347,8 @@ asmlinkage void plat_irq_dispatch(void) unsigned int cpu = smp_processor_id(); unsigned int pending; -#ifdef CONFIG_SIBYTE_BCM1480_PROF - /* Set compare to count to silence count/compare timer interrupts */ - write_c0_compare(read_c0_count()); -#endif - pending = read_c0_cause() & read_c0_status(); -#ifdef CONFIG_SIBYTE_BCM1480_PROF - if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */ - sbprof_cpu_intr(); - else -#endif - if (pending & CAUSEF_IP4) do_IRQ(K_BCM1480_INT_TIMER_0 + cpu); #ifdef CONFIG_SMP diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c index 05ed92c92b6..8e2e04f7787 100644 --- a/arch/mips/sibyte/bcm1480/setup.c +++ b/arch/mips/sibyte/bcm1480/setup.c @@ -22,6 +22,7 @@ #include <linux/string.h> #include <asm/bootinfo.h> +#include <asm/cpu.h> #include <asm/mipsregs.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> @@ -119,7 +120,7 @@ void __init bcm1480_setup(void) uint64_t sys_rev; int plldiv; - sb1_pass = read_c0_prid() & 0xff; + sb1_pass = read_c0_prid() & PRID_REV_MASK; sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); soc_type = SYS_SOC_TYPE(sys_rev); part_type = G_SYS_PART(sys_rev); diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index dddfda8e829..af7d44edd9a 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -20,6 +20,7 @@ #include <linux/delay.h> #include <linux/smp.h> #include <linux/kernel_stat.h> +#include <linux/sched.h> #include <asm/mmu_context.h> #include <asm/io.h> @@ -59,7 +60,7 @@ static void *mailbox_0_regs[] = { /* * SMP init and finish on secondary CPUs */ -void __cpuinit bcm1480_smp_init(void) +void bcm1480_smp_init(void) { unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | STATUSF_IP1 | STATUSF_IP0; @@ -82,18 +83,19 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action) __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); } -static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) +static void bcm1480_send_ipi_mask(const struct cpumask *mask, + unsigned int action) { unsigned int i; - for_each_cpu_mask(i, mask) + for_each_cpu(i, mask) bcm1480_send_ipi_single(i, action); } /* * Code to run on secondary just after probing the CPU */ -static void __cpuinit bcm1480_init_secondary(void) +static void bcm1480_init_secondary(void) { extern void bcm1480_smp_init(void); @@ -104,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void) * Do any tidying up before marking online and running the idle * loop */ -static void __cpuinit bcm1480_smp_finish(void) +static void bcm1480_smp_finish(void) { extern void sb1480_clockevent_init(void); @@ -113,17 +115,10 @@ static void __cpuinit bcm1480_smp_finish(void) } /* - * Final cleanup after all secondaries booted - */ -static void bcm1480_cpus_done(void) -{ -} - -/* * Setup the PC, SP, and GP of a secondary processor and start it * running! */ -static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) +static void bcm1480_boot_secondary(int cpu, struct task_struct *idle) { int retval; @@ -136,7 +131,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) /* * Use CFE to find out how many CPUs are available, setting up - * cpu_possible_map and the logical/physical mappings. + * cpu_possible_mask and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started @@ -145,14 +140,13 @@ static void __init bcm1480_smp_setup(void) { int i, num; - cpus_clear(cpu_possible_map); - cpu_set(0, cpu_possible_map); + init_cpu_possible(cpumask_of(0)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } @@ -169,7 +163,6 @@ struct plat_smp_ops bcm1480_smp_ops = { .send_ipi_mask = bcm1480_send_ipi_mask, .init_secondary = bcm1480_init_secondary, .smp_finish = bcm1480_smp_finish, - .cpus_done = bcm1480_cpus_done, .boot_secondary = bcm1480_boot_secondary, .smp_setup = bcm1480_smp_setup, .prepare_cpus = bcm1480_prepare_cpus, @@ -178,19 +171,18 @@ struct plat_smp_ops bcm1480_smp_ops = { void bcm1480_mailbox_interrupt(void) { int cpu = smp_processor_id(); + int irq = K_BCM1480_INT_MBOX_0_0; unsigned int action; - kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++; + kstat_incr_irq_this_cpu(irq); /* Load the mailbox register to figure out what we're supposed to do */ action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff; /* Clear the mailbox to clear the interrupt */ __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); - /* - * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the - * interrupt will do the reschedule for us - */ + if (action & SMP_RESCHEDULE_YOURSELF) + scheduler_ipi(); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); |
