diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 301 |
1 files changed, 135 insertions, 166 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e..7c4fada440f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -25,6 +25,7 @@ #include <linux/clockchips.h> #include <linux/completion.h> #include <linux/cpufreq.h> +#include <linux/irq_work.h> #include <linux/atomic.h> #include <asm/smp.h> @@ -41,10 +42,10 @@ #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> -#include <asm/localtimer.h> #include <asm/smp_plat.h> #include <asm/virt.h> #include <asm/mach/arch.h> +#include <asm/mpu.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -57,7 +58,7 @@ struct secondary_data secondary_data; * control for which core is the next to come out of the secondary * boot "holding pen" */ -volatile int __cpuinitdata pen_release = -1; +volatile int pen_release = -1; enum ipi_msg_type { IPI_WAKEUP, @@ -66,6 +67,8 @@ enum ipi_msg_type { IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_IRQ_WORK, + IPI_COMPLETION, }; static DECLARE_COMPLETION(cpu_running); @@ -78,7 +81,14 @@ void __init smp_set_ops(struct smp_operations *ops) smp_ops = *ops; }; -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +static unsigned long get_arch_pgd(pgd_t *pgd) +{ + phys_addr_t pgdir = virt_to_idmap(pgd); + BUG_ON(pgdir & ARCH_PGD_MASK); + return pgdir >> ARCH_PGD_SHIFT; +} + +int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -87,10 +97,15 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; - secondary_data.pgdir = virt_to_phys(idmap_pgd); - secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); - __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); - outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); +#ifdef CONFIG_ARM_MPU + secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; +#endif + +#ifdef CONFIG_MMU + secondary_data.pgdir = get_arch_pgd(idmap_pgd); + secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); +#endif + sync_cache_w(&secondary_data); /* * Now bring the CPU into our world. @@ -112,9 +127,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } - secondary_data.stack = NULL; - secondary_data.pgdir = 0; + memset(&secondary_data, 0, sizeof(secondary_data)); return ret; } @@ -125,28 +139,24 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ - if (smp_ops.smp_prepare_cpus) - smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ - if (smp_ops.smp_secondary_init) - smp_ops.smp_secondary_init(cpu); -} - -int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +int boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) return smp_ops.smp_boot_secondary(cpu, idle); return -ENOSYS; } +int platform_can_cpu_hotplug(void) +{ #ifdef CONFIG_HOTPLUG_CPU -static void percpu_timer_stop(void); + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} +#ifdef CONFIG_HOTPLUG_CPU static int platform_cpu_kill(unsigned int cpu) { if (smp_ops.cpu_kill) @@ -154,12 +164,6 @@ static int platform_cpu_kill(unsigned int cpu) return 1; } -static void platform_cpu_die(unsigned int cpu) -{ - if (smp_ops.cpu_die) - smp_ops.cpu_die(cpu); -} - static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) @@ -175,7 +179,7 @@ static int platform_cpu_disable(unsigned int cpu) /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuinit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; @@ -196,11 +200,6 @@ int __cpuinit __cpu_disable(void) migrate_irqs(); /* - * Stop the local timer for this CPU. - */ - percpu_timer_stop(); - - /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * @@ -221,7 +220,7 @@ static DECLARE_COMPLETION(cpu_died); * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuinit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); @@ -229,6 +228,13 @@ void __cpuinit __cpu_die(unsigned int cpu) } printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); + /* + * platform_cpu_kill() is generally expected to do the powering off + * and/or cutting of clocks to the dying CPU. Optionally, this may + * be done by the CPU which is dying in preference to supporting + * this call, but that means there is _no_ synchronisation between + * the requesting CPU and the dying CPU actually losing power. + */ if (!platform_cpu_kill(cpu)) printk("CPU%u: unable to kill\n", cpu); } @@ -248,16 +254,47 @@ void __ref cpu_die(void) idle_task_exit(); local_irq_disable(); - mb(); - /* Tell __cpu_die() that this CPU is now safe to dispose of */ - RCU_NONIDLE(complete(&cpu_died)); + /* + * Flush the data out of the L1 cache for this CPU. This must be + * before the completion to ensure that data is safely written out + * before platform_cpu_kill() gets called - which may disable + * *this* CPU and power down its cache. + */ + flush_cache_louis(); + + /* + * Tell __cpu_die() that this CPU is now safe to dispose of. Once + * this returns, power and/or clocks can be removed at any point + * from this CPU and its cache by platform_cpu_kill(). + */ + complete(&cpu_died); /* - * actual CPU shutdown procedure is at least platform (if not - * CPU) specific. + * Ensure that the cache lines associated with that completion are + * written out. This covers the case where _this_ CPU is doing the + * powering down, to ensure that the completion is visible to the + * CPU waiting for this one. */ - platform_cpu_die(cpu); + flush_cache_louis(); + + /* + * The actual CPU shutdown procedure is at least platform (if not + * CPU) specific. This may remove power, or it may simply spin. + * + * Platforms are generally expected *NOT* to return from this call, + * although there are some which do because they have no way to + * power down the CPU. These platforms are the _only_ reason we + * have a return path which uses the fragment of assembly below. + * + * The return path should not be used for platforms which can + * power off the CPU. + */ + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); + + pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", + cpu); /* * Do not return to the idle loop - jump back to the secondary @@ -276,7 +313,7 @@ void __ref cpu_die(void) * Called by both boot and secondaries to move global data into * per-processor storage. */ -static void __cpuinit smp_store_cpu_info(unsigned int cpuid) +static void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); @@ -286,13 +323,11 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) store_cpu_topology(cpuid); } -static void percpu_timer_setup(void); - /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void __cpuinit secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; @@ -302,6 +337,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); @@ -324,7 +360,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Give the platform a chance to do its own initialisation. */ - platform_secondary_init(cpu); + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); @@ -340,33 +377,19 @@ asmlinkage void __cpuinit secondary_start_kernel(void) set_cpu_online(cpu, true); complete(&cpu_running); - /* - * Setup the percpu timer for this CPU. - */ - percpu_timer_setup(); - local_irq_enable(); local_fiq_enable(); /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) { - int cpu; - unsigned long bogosum = 0; - - for_each_online_cpu(cpu) - bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; - - printk(KERN_INFO "SMP: Total of %d processors activated " - "(%lu.%02lu BogoMIPS).\n", - num_online_cpus(), - bogosum / (500000/HZ), - (bogosum / (5000/HZ)) % 100); + printk(KERN_INFO "SMP: Total of %d processors activated.\n", + num_online_cpus()); hyp_mode_check(); } @@ -391,16 +414,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = ncores; if (ncores > 1 && max_cpus) { /* - * Enable the local timer or broadcast device for the - * boot CPU, but only if we have more than one CPU. - */ - percpu_timer_setup(); - - /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should - * re-initialize the map in platform_smp_prepare_cpus() if - * present != possible (e.g. physical hotplug). + * re-initialize the map in the platforms smp_prepare_cpus() + * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); @@ -408,7 +425,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) * Initialise the SCU if there are more than one CPU * and let them know where to start. */ - platform_smp_prepare_cpus(max_cpus); + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); } } @@ -416,7 +434,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { - smp_cross_call = fn; + if (!smp_cross_call) + smp_cross_call = fn; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -434,6 +453,14 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + if (is_smp()) + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + static const char *ipi_types[NR_IPI] = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -442,6 +469,8 @@ static const char *ipi_types[NR_IPI] = { S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_IRQ_WORK, "IRQ work interrupts"), + S(IPI_COMPLETION, "completion interrupts"), }; void show_ipi_list(struct seq_file *p, int prec) @@ -470,86 +499,11 @@ u64 smp_irq_stat_cpu(unsigned int cpu) return sum; } -/* - * Timer (local or broadcast) support - */ -static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); - -static void ipi_timer(void) -{ - struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - evt->event_handler(evt); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static void smp_timer_broadcast(const struct cpumask *mask) +void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } -#else -#define smp_timer_broadcast NULL -#endif - -static void broadcast_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) -{ - evt->name = "dummy_timer"; - evt->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DUMMY; - evt->rating = 400; - evt->mult = 1; - evt->set_mode = broadcast_timer_set_mode; - - clockevents_register_device(evt); -} - -static struct local_timer_ops *lt_ops; - -#ifdef CONFIG_LOCAL_TIMERS -int local_timer_register(struct local_timer_ops *ops) -{ - if (!is_smp() || !setup_max_cpus) - return -ENXIO; - - if (lt_ops) - return -EBUSY; - - lt_ops = ops; - return 0; -} -#endif - -static void __cpuinit percpu_timer_setup(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - evt->cpumask = cpumask_of(cpu); - evt->broadcast = smp_timer_broadcast; - - if (!lt_ops || lt_ops->setup(evt)) - broadcast_timer_setup(evt); -} - -#ifdef CONFIG_HOTPLUG_CPU -/* - * The generic clock events code purposely does not stop the local timer - * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it - * manually here. - */ -static void percpu_timer_stop(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - if (lt_ops) - lt_ops->stop(evt); -} #endif static DEFINE_RAW_SPINLOCK(stop_lock); @@ -576,6 +530,19 @@ static void ipi_cpu_stop(unsigned int cpu) cpu_relax(); } +static DEFINE_PER_CPU(struct completion *, cpu_completion); + +int register_ipi_completion(struct completion *completion, int cpu) +{ + per_cpu(cpu_completion, cpu) = completion; + return IPI_COMPLETION; +} + +static void ipi_complete(unsigned int cpu) +{ + complete(per_cpu(cpu_completion, cpu)); +} + /* * Main handler for inter-processor interrupts */ @@ -596,11 +563,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_WAKEUP: break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); - ipi_timer(); + tick_receive_broadcast(); irq_exit(); break; +#endif case IPI_RESCHEDULE: scheduler_ipi(); @@ -624,6 +593,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; +#ifdef CONFIG_IRQ_WORK + case IPI_IRQ_WORK: + irq_enter(); + irq_work_run(); + irq_exit(); + break; +#endif + + case IPI_COMPLETION: + irq_enter(); + ipi_complete(cpu); + irq_exit(); + break; + default: printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); @@ -637,17 +620,6 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } -#ifdef CONFIG_HOTPLUG_CPU -static void smp_kill_cpus(cpumask_t *mask) -{ - unsigned int cpu; - for_each_cpu(cpu, mask) - platform_cpu_kill(cpu); -} -#else -static void smp_kill_cpus(cpumask_t *mask) { } -#endif - void smp_send_stop(void) { unsigned long timeout; @@ -665,8 +637,6 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); - - smp_kill_cpus(&mask); } /* @@ -704,8 +674,7 @@ static int cpufreq_callback(struct notifier_block *nb, } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || - (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); |
