diff options
Diffstat (limited to 'arch/mips/kernel/smp.c')
| -rw-r--r-- | arch/mips/kernel/smp.c | 167 |
1 files changed, 101 insertions, 66 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 6cdca1956b7..9bad52ede90 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -34,21 +34,22 @@ #include <linux/err.h> #include <linux/ftrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> +#include <asm/idle.h> #include <asm/r4k-timer.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/time.h> - -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif /* CONFIG_MIPS_MT_SMTC */ +#include <asm/setup.h> volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ + int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +EXPORT_SYMBOL(__cpu_number_map); + int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_logical_map); /* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1; @@ -61,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +cpumask_t cpu_coherent_mask; + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -79,8 +82,9 @@ static inline void set_cpu_sibling_map(int cpu) } struct plat_smp_ops *mp_ops; +EXPORT_SYMBOL(mp_ops); -__cpuinit void register_smp_ops(struct plat_smp_ops *ops) +void register_smp_ops(struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); @@ -92,17 +96,13 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops) * First C code run on the secondary CPUs after being started up by * the master. */ -asmlinkage __cpuinit void start_secondary(void) +asmlinkage void start_secondary(void) { unsigned int cpu; -#ifdef CONFIG_MIPS_MT_SMTC - /* Only do cpu_probe for first TC of CPU */ - if ((read_c0_tcbind() & TCBIND_CURTC) == 0) -#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); - per_cpu_trap_init(); + per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); @@ -116,16 +116,25 @@ asmlinkage __cpuinit void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + cpu_set(cpu, cpu_coherent_mask); notify_cpu_starting(cpu); - mp_ops->smp_finish(); + set_cpu_online(cpu, true); + set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); - synchronise_count_slave(); + synchronise_count_slave(cpu); - cpu_idle(); + /* + * irq will be enabled in ->smp_finish(), enabling it too early + * is dangerous. + */ + WARN_ON_ONCE(!irqs_disabled()); + mp_ops->smp_finish(); + + cpu_startup_entry(CPUHP_ONLINE); } /* @@ -134,7 +143,6 @@ asmlinkage __cpuinit void start_secondary(void) void __irq_entry smp_call_function_interrupt(void) { irq_enter(); - generic_smp_call_function_single_interrupt(); generic_smp_call_function_interrupt(); irq_exit(); } @@ -144,7 +152,7 @@ static void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), false); for (;;) { if (cpu_wait) (*cpu_wait)(); /* Wait if available. */ @@ -158,8 +166,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { - mp_ops->cpus_done(); - synchronise_count_master(); } /* called from main before smp_init() */ @@ -170,46 +176,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU - init_cpu_present(&cpu_possible_map); + init_cpu_present(cpu_possible_mask); #endif + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); cpu_set(0, cpu_callin_map); } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -static struct task_struct *cpu_idle_thread[NR_CPUS]; - -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct task_struct *idle; - - /* - * Processor goes to start_secondary(), sets online flag - * The following code is purely to make sure - * Linux can schedule processes on this slave. - */ - if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; - - if (IS_ERR(idle)) - panic(KERN_ERR "Fork failed for CPU %d", cpu); - } else { - idle = cpu_idle_thread[cpu]; - init_idle(idle, cpu); - } - - mp_ops->boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, tidle); /* * Trust is futile. We should really have timeouts ... @@ -217,8 +199,7 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); - cpu_set(cpu, cpu_online_map); - + synchronise_count_master(cpu); return 0; } @@ -250,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm) * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { -#ifndef CONFIG_MIPS_MT_SMTC smp_call_function(func, info, 1); -#endif } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) @@ -289,13 +267,12 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_mm(mm); @@ -329,13 +306,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_range(vma, start, end); preempt_enable(); @@ -376,13 +352,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, vma->vm_mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; + } } local_flush_tlb_page(vma, page); preempt_enable(); @@ -402,3 +377,63 @@ void flush_tlb_one(unsigned long vaddr) EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); + +#if defined(CONFIG_KEXEC) +void (*dump_ipi_function_ptr)(void *) = NULL; +void dump_send_ipi(void (*dump_ipi_callback)(void *)) +{ + int i; + int cpu = smp_processor_id(); + + dump_ipi_function_ptr = dump_ipi_callback; + smp_mb(); + for_each_online_cpu(i) + if (i != cpu) + mp_ops->send_ipi_single(i, SMP_DUMP); + +} +EXPORT_SYMBOL(dump_send_ipi); +#endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + +static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); +static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); + +void tick_broadcast(const struct cpumask *mask) +{ + atomic_t *count; + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, mask) { + count = &per_cpu(tick_broadcast_count, cpu); + csd = &per_cpu(tick_broadcast_csd, cpu); + + if (atomic_inc_return(count) == 1) + smp_call_function_single_async(cpu, csd); + } +} + +static void tick_broadcast_callee(void *info) +{ + int cpu = smp_processor_id(); + tick_receive_broadcast(); + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); +} + +static int __init tick_broadcast_init(void) +{ + struct call_single_data *csd; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + csd = &per_cpu(tick_broadcast_csd, cpu); + csd->func = tick_broadcast_callee; + } + + return 0; +} +early_initcall(tick_broadcast_init); + +#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ |
