diff options
Diffstat (limited to 'arch/cris/arch-v32/kernel/smp.c')
| -rw-r--r-- | arch/cris/arch-v32/kernel/smp.c | 95 |
1 files changed, 43 insertions, 52 deletions
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 52e16c6436f..0698582467c 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -7,7 +7,7 @@ #include <asm/mmu_context.h> #include <hwregs/asm/mmu_defs_asm.h> #include <hwregs/supp_reg.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/err.h> #include <linux/init.h> @@ -26,14 +26,12 @@ #define FLUSH_ALL (void*)0xffffffff /* Vector of locks used for various atomic operations */ -spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; +spinlock_t cris_atomic_locks[] = { + [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks) +}; /* CPU masks */ -cpumask_t cpu_online_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); cpumask_t phys_cpu_present_map = CPU_MASK_NONE; -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(phys_cpu_present_map); /* Variables used during SMP boot */ @@ -56,8 +54,6 @@ static struct mm_struct* flush_mm; static struct vm_area_struct* flush_vma; static unsigned long flush_addr; -extern int setup_irq(int, struct irqaction *); - /* Mode registers */ static unsigned long irq_regs[NR_CPUS] = { regi_irq, @@ -68,8 +64,7 @@ static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id); static int send_ipi(int vector, int wait, cpumask_t cpu_mask); static struct irqaction irq_ipi = { .handler = crisv32_ipi_interrupt, - .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, + .flags = 0, .name = "ipi", }; @@ -86,10 +81,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* Mark all possible CPUs as present */ for (i = 0; i < max_cpus; i++) - cpu_set(i, phys_cpu_present_map); + cpumask_set_cpu(i, &phys_cpu_present_map); } -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { /* PGD pointer has moved after per_cpu initialization so * update the MMU. @@ -102,9 +97,9 @@ void __devinit smp_prepare_boot_cpu(void) SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); - cpu_set(0, cpu_online_map); - cpu_set(0, phys_cpu_present_map); - cpu_set(0, cpu_possible_map); + set_cpu_online(0, true); + cpumask_set_cpu(0, &phys_cpu_present_map); + set_cpu_possible(0, true); } void __init smp_cpus_done(unsigned int max_cpus) @@ -113,16 +108,12 @@ void __init smp_cpus_done(unsigned int max_cpus) /* Bring one cpu online.*/ static int __init -smp_boot_one_cpu(int cpuid) +smp_boot_one_cpu(int cpuid, struct task_struct idle) { unsigned timeout; - struct task_struct *idle; - cpumask_t cpu_mask = CPU_MASK_NONE; - - idle = fork_idle(cpuid); - if (IS_ERR(idle)) - panic("SMP: fork failed for CPU:%d", cpuid); + cpumask_t cpu_mask; + cpumask_clear(&cpu_mask); task_thread_info(idle)->cpu = cpuid; /* Information to the CPU that is about to boot */ @@ -130,10 +121,10 @@ smp_boot_one_cpu(int cpuid) cpu_now_booting = cpuid; /* Kick it */ - cpu_set(cpuid, cpu_online_map); - cpu_set(cpuid, cpu_mask); + set_cpu_online(cpuid, true); + cpumask_set_cpu(cpuid, &cpu_mask); send_ipi(IPI_BOOT, 0, cpu_mask); - cpu_clear(cpuid, cpu_online_map); + set_cpu_online(cpuid, false); /* Wait for CPU to come online */ for (timeout = 0; timeout < 10000; timeout++) { @@ -146,9 +137,6 @@ smp_boot_one_cpu(int cpuid) barrier(); } - put_task_struct(idle); - idle = NULL; - printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; } @@ -157,8 +145,6 @@ smp_boot_one_cpu(int cpuid) * specific stuff such as the local timer and the MMU. */ void __init smp_callin(void) { - extern void cpu_idle(void); - int cpu = cpu_now_booting; reg_intr_vect_rw_mask vect_mask = {0}; @@ -175,14 +161,14 @@ void __init smp_callin(void) /* Enable IRQ and idle */ REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); - unmask_irq(IPI_INTR_VECT); - unmask_irq(TIMER0_INTR_VECT); + crisv32_unmask_irq(IPI_INTR_VECT); + crisv32_unmask_irq(TIMER0_INTR_VECT); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); - cpu_set(cpu, cpu_online_map); - cpu_idle(); + set_cpu_online(cpu, true); + cpu_startup_entry(CPUHP_ONLINE); } /* Stop execution on this CPU.*/ @@ -211,16 +197,17 @@ int setup_profiling_timer(unsigned int multiplier) */ unsigned long cache_decay_ticks = 1; -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - smp_boot_one_cpu(cpu); + smp_boot_one_cpu(cpu, tidle); return cpu_online(cpu) ? 0 : -ENOSYS; } void smp_send_reschedule(int cpu) { - cpumask_t cpu_mask = CPU_MASK_NONE; - cpu_set(cpu, cpu_mask); + cpumask_t cpu_mask; + cpumask_clear(&cpu_mask); + cpumask_set_cpu(cpu, &cpu_mask); send_ipi(IPI_SCHEDULE, 0, cpu_mask); } @@ -236,8 +223,8 @@ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned cpumask_t cpu_mask; spin_lock_irqsave(&tlbstate_lock, flags); - cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask); - cpu_clear(smp_processor_id(), cpu_mask); + cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); flush_mm = mm; flush_vma = vma; flush_addr = addr; @@ -256,8 +243,8 @@ void flush_tlb_mm(struct mm_struct *mm) __flush_tlb_mm(mm); flush_tlb_common(mm, FLUSH_ALL, 0); /* No more mappings in other CPUs */ - cpus_clear(mm->cpu_vm_mask); - cpu_set(smp_processor_id(), mm->cpu_vm_mask); + cpumask_clear(mm_cpumask(mm)); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); } void flush_tlb_page(struct vm_area_struct *vma, @@ -282,10 +269,10 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) int ret = 0; /* Calculate CPUs to send to. */ - cpus_and(cpu_mask, cpu_mask, cpu_online_map); + cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask); /* Send the IPI. */ - for_each_cpu_mask(i, cpu_mask) + for_each_cpu(i, &cpu_mask) { ipi.vector |= vector; REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); @@ -293,7 +280,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) /* Wait for IPI to finish on other CPUS */ if (wait) { - for_each_cpu_mask(i, cpu_mask) { + for_each_cpu(i, &cpu_mask) { int j; for (j = 0 ; j < 1000; j++) { ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); @@ -319,11 +306,12 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) */ int smp_call_function(void (*func)(void *info), void *info, int wait) { - cpumask_t cpu_mask = CPU_MASK_ALL; + cpumask_t cpu_mask; struct call_data_struct data; int ret; - cpu_clear(smp_processor_id(), cpu_mask); + cpumask_setall(&cpu_mask); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); WARN_ON(irqs_disabled()); @@ -347,15 +335,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); + if (ipi.vector & IPI_SCHEDULE) { + scheduler_ipi(); + } if (ipi.vector & IPI_CALL) { - func(info); + func(info); } if (ipi.vector & IPI_FLUSH_TLB) { - if (flush_mm == FLUSH_ALL) - __flush_tlb_all(); - else if (flush_vma == FLUSH_ALL) + if (flush_mm == FLUSH_ALL) + __flush_tlb_all(); + else if (flush_vma == FLUSH_ALL) __flush_tlb_mm(flush_mm); - else + else __flush_tlb_page(flush_vma, flush_addr); } |
