diff options
Diffstat (limited to 'arch/mips/kernel/smp.c')
| -rw-r--r-- | arch/mips/kernel/smp.c | 329 |
1 files changed, 120 insertions, 209 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9d41dab90a8..9bad52ede90 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/module.h> @@ -31,28 +32,24 @@ #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/err.h> +#include <linux/ftrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> -#include <asm/system.h> +#include <asm/idle.h> +#include <asm/r4k-timer.h> #include <asm/mmu_context.h> #include <asm/time.h> +#include <asm/setup.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif /* CONFIG_MIPS_MT_SMTC */ - -cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ -cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ -int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ -int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ -EXPORT_SYMBOL(phys_cpu_present_map); -EXPORT_SYMBOL(cpu_online_map); +int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +EXPORT_SYMBOL(__cpu_number_map); -extern void cpu_idle(void); +int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_logical_map); /* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1; @@ -65,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +cpumask_t cpu_coherent_mask; + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -83,11 +82,12 @@ static inline void set_cpu_sibling_map(int cpu) } struct plat_smp_ops *mp_ops; +EXPORT_SYMBOL(mp_ops); -__cpuinit void register_smp_ops(struct plat_smp_ops *ops) +void register_smp_ops(struct plat_smp_ops *ops) { - if (ops) - printk(KERN_WARNING "Overriding previous set SMP ops\n"); + if (mp_ops) + printk(KERN_WARNING "Overriding previously set SMP ops\n"); mp_ops = ops; } @@ -96,17 +96,13 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops) * First C code run on the secondary CPUs after being started up by * the master. */ -asmlinkage __cpuinit void start_secondary(void) +asmlinkage void start_secondary(void) { unsigned int cpu; -#ifdef CONFIG_MIPS_MT_SMTC - /* Only do cpu_probe for first TC of CPU */ - if ((read_c0_tcbind() & TCBIND_CURTC) == 0) -#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); - per_cpu_trap_init(); + per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); @@ -120,153 +116,35 @@ asmlinkage __cpuinit void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; - mp_ops->smp_finish(); - set_cpu_sibling_map(cpu); + cpu_set(cpu, cpu_coherent_mask); + notify_cpu_starting(cpu); - cpu_set(cpu, cpu_callin_map); + set_cpu_online(cpu, true); - cpu_idle(); -} + set_cpu_sibling_map(cpu); -DEFINE_SPINLOCK(smp_call_lock); + cpu_set(cpu, cpu_callin_map); -struct call_data_struct *call_data; - -/* - * Run a function on all other CPUs. - * - * <mask> cpuset_t of all processors to run the function on. - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <retry> If true, keep retrying until ready. - * <wait> If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute <func> - * or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler: - * - * CPU A CPU B - * Disable interrupts - * smp_call_function() - * Take call_lock - * Send IPIs - * Wait for all cpus to acknowledge IPI - * CPU A has not responded, spin waiting - * for cpu A to respond, holding call_lock - * smp_call_function() - * Spin waiting for call_lock - * Deadlock Deadlock - */ -int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), - void *info, int retry, int wait) -{ - struct call_data_struct data; - int cpu = smp_processor_id(); - int cpus; + synchronise_count_slave(cpu); /* - * Can die spectacularly if this CPU isn't yet marked online + * irq will be enabled in ->smp_finish(), enabling it too early + * is dangerous. */ - BUG_ON(!cpu_online(cpu)); - - cpu_clear(cpu, mask); - cpus = cpus_weight(mask); - if (!cpus) - return 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock(&smp_call_lock); - call_data = &data; - smp_mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); - - /* Wait for response */ - /* FIXME: lock-up detection, backtrace on lock-up */ - while (atomic_read(&data.started) != cpus) - barrier(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - barrier(); - call_data = NULL; - spin_unlock(&smp_call_lock); - - return 0; -} + WARN_ON_ONCE(!irqs_disabled()); + mp_ops->smp_finish(); -int smp_call_function(void (*func) (void *info), void *info, int retry, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, retry, wait); + cpu_startup_entry(CPUHP_ONLINE); } -void smp_call_function_interrupt(void) +/* + * Call into both interrupt handlers, as we share the IPI for them + */ +void __irq_entry smp_call_function_interrupt(void) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function. - */ - smp_mb(); - atomic_inc(&call_data->started); - - /* - * At this point the info structure may be out of scope unless wait==1. - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); irq_exit(); - - if (wait) { - smp_mb(); - atomic_inc(&call_data->finished); - } -} - -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int retry, int wait) -{ - int ret, me; - - /* - * Can die spectacularly if this CPU isn't yet marked online - */ - if (!cpu_online(cpu)) - return 0; - - me = get_cpu(); - BUG_ON(!cpu_online(me)); - - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, - wait); - - put_cpu(); - return 0; } static void stop_this_cpu(void *dummy) @@ -274,19 +152,20 @@ static void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); - local_irq_enable(); /* May need to service _machine_restart IPI */ - for (;;); /* Wait if available. */ + set_cpu_online(smp_processor_id(), false); + for (;;) { + if (cpu_wait) + (*cpu_wait)(); /* Wait if available. */ + } } void smp_send_stop(void) { - smp_call_function(stop_this_cpu, NULL, 1, 0); + smp_call_function(stop_this_cpu, NULL, 0); } void __init smp_cpus_done(unsigned int max_cpus) { - mp_ops->cpus_done(); } /* called from main before smp_init() */ @@ -297,43 +176,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU - cpu_present_map = cpu_possible_map; + init_cpu_present(cpu_possible_mask); #endif + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { - /* - * This assumes that bootup is always handled by the processor - * with the logic and physical number 0. - */ - __cpu_number_map[0] = 0; - __cpu_logical_map[0] = 0; - cpu_set(0, phys_cpu_present_map); - cpu_set(0, cpu_online_map); + set_cpu_possible(0, true); + set_cpu_online(0, true); cpu_set(0, cpu_callin_map); } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct task_struct *idle; - - /* - * Processor goes to start_secondary(), sets online flag - * The following code is purely to make sure - * Linux can schedule processes on this slave. - */ - idle = fork_idle(cpu); - if (IS_ERR(idle)) - panic(KERN_ERR "Fork failed for CPU %d", cpu); - - mp_ops->boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, tidle); /* * Trust is futile. We should really have timeouts ... @@ -341,8 +199,7 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); - cpu_set(cpu, cpu_online_map); - + synchronise_count_master(cpu); return 0; } @@ -359,7 +216,7 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); + on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -374,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm) * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { -#ifndef CONFIG_MIPS_MT_SMTC - smp_call_function(func, info, 1, 1); -#endif + smp_call_function(func, info, 1); } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) @@ -413,13 +267,12 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_mm(mm); @@ -453,13 +306,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_range(vma, start, end); preempt_enable(); @@ -479,7 +331,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) .addr2 = end, }; - on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); + on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); } static void flush_tlb_page_ipi(void *info) @@ -500,13 +352,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, vma->vm_mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; + } } local_flush_tlb_page(vma, page); preempt_enable(); @@ -526,3 +377,63 @@ void flush_tlb_one(unsigned long vaddr) EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); + +#if defined(CONFIG_KEXEC) +void (*dump_ipi_function_ptr)(void *) = NULL; +void dump_send_ipi(void (*dump_ipi_callback)(void *)) +{ + int i; + int cpu = smp_processor_id(); + + dump_ipi_function_ptr = dump_ipi_callback; + smp_mb(); + for_each_online_cpu(i) + if (i != cpu) + mp_ops->send_ipi_single(i, SMP_DUMP); + +} +EXPORT_SYMBOL(dump_send_ipi); +#endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + +static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); +static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); + +void tick_broadcast(const struct cpumask *mask) +{ + atomic_t *count; + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, mask) { + count = &per_cpu(tick_broadcast_count, cpu); + csd = &per_cpu(tick_broadcast_csd, cpu); + + if (atomic_inc_return(count) == 1) + smp_call_function_single_async(cpu, csd); + } +} + +static void tick_broadcast_callee(void *info) +{ + int cpu = smp_processor_id(); + tick_receive_broadcast(); + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); +} + +static int __init tick_broadcast_init(void) +{ + struct call_single_data *csd; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + csd = &per_cpu(tick_broadcast_csd, cpu); + csd->func = tick_broadcast_callee; + } + + return 0; +} +early_initcall(tick_broadcast_init); + +#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ |
