diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 159 |
1 files changed, 57 insertions, 102 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9..7c4fada440f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -25,6 +25,7 @@ #include <linux/clockchips.h> #include <linux/completion.h> #include <linux/cpufreq.h> +#include <linux/irq_work.h> #include <linux/atomic.h> #include <asm/smp.h> @@ -41,7 +42,6 @@ #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> -#include <asm/localtimer.h> #include <asm/smp_plat.h> #include <asm/virt.h> #include <asm/mach/arch.h> @@ -67,6 +67,8 @@ enum ipi_msg_type { IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_IRQ_WORK, + IPI_COMPLETION, }; static DECLARE_COMPLETION(cpu_running); @@ -81,7 +83,7 @@ void __init smp_set_ops(struct smp_operations *ops) static unsigned long get_arch_pgd(pgd_t *pgd) { - phys_addr_t pgdir = virt_to_phys(pgd); + phys_addr_t pgdir = virt_to_idmap(pgd); BUG_ON(pgdir & ARCH_PGD_MASK); return pgdir >> ARCH_PGD_SHIFT; } @@ -103,8 +105,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.pgdir = get_arch_pgd(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif - __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); - outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); + sync_cache_w(&secondary_data); /* * Now bring the CPU into our world. @@ -145,9 +146,17 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) return -ENOSYS; } +int platform_can_cpu_hotplug(void) +{ #ifdef CONFIG_HOTPLUG_CPU -static void percpu_timer_stop(void); + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} +#ifdef CONFIG_HOTPLUG_CPU static int platform_cpu_kill(unsigned int cpu) { if (smp_ops.cpu_kill) @@ -191,11 +200,6 @@ int __cpu_disable(void) migrate_irqs(); /* - * Stop the local timer for this CPU. - */ - percpu_timer_stop(); - - /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * @@ -289,6 +293,9 @@ void __ref cpu_die(void) if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); + pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", + cpu); + /* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs @@ -316,8 +323,6 @@ static void smp_store_cpu_info(unsigned int cpuid) store_cpu_topology(cpuid); } -static void percpu_timer_setup(void); - /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -372,11 +377,6 @@ asmlinkage void secondary_start_kernel(void) set_cpu_online(cpu, true); complete(&cpu_running); - /* - * Setup the percpu timer for this CPU. - */ - percpu_timer_setup(); - local_irq_enable(); local_fiq_enable(); @@ -388,17 +388,8 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { - int cpu; - unsigned long bogosum = 0; - - for_each_online_cpu(cpu) - bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; - - printk(KERN_INFO "SMP: Total of %d processors activated " - "(%lu.%02lu BogoMIPS).\n", - num_online_cpus(), - bogosum / (500000/HZ), - (bogosum / (5000/HZ)) % 100); + printk(KERN_INFO "SMP: Total of %d processors activated.\n", + num_online_cpus()); hyp_mode_check(); } @@ -423,12 +414,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = ncores; if (ncores > 1 && max_cpus) { /* - * Enable the local timer or broadcast device for the - * boot CPU, but only if we have more than one CPU. - */ - percpu_timer_setup(); - - /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in the platforms smp_prepare_cpus() @@ -468,6 +453,14 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + if (is_smp()) + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + static const char *ipi_types[NR_IPI] = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -476,6 +469,8 @@ static const char *ipi_types[NR_IPI] = { S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_IRQ_WORK, "IRQ work interrupts"), + S(IPI_COMPLETION, "completion interrupts"), }; void show_ipi_list(struct seq_file *p, int prec) @@ -504,11 +499,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu) return sum; } -/* - * Timer (local or broadcast) support - */ -static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { @@ -516,67 +506,6 @@ void tick_broadcast(const struct cpumask *mask) } #endif -static void broadcast_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void broadcast_timer_setup(struct clock_event_device *evt) -{ - evt->name = "dummy_timer"; - evt->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DUMMY; - evt->rating = 100; - evt->mult = 1; - evt->set_mode = broadcast_timer_set_mode; - - clockevents_register_device(evt); -} - -static struct local_timer_ops *lt_ops; - -#ifdef CONFIG_LOCAL_TIMERS -int local_timer_register(struct local_timer_ops *ops) -{ - if (!is_smp() || !setup_max_cpus) - return -ENXIO; - - if (lt_ops) - return -EBUSY; - - lt_ops = ops; - return 0; -} -#endif - -static void percpu_timer_setup(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - evt->cpumask = cpumask_of(cpu); - - if (!lt_ops || lt_ops->setup(evt)) - broadcast_timer_setup(evt); -} - -#ifdef CONFIG_HOTPLUG_CPU -/* - * The generic clock events code purposely does not stop the local timer - * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it - * manually here. - */ -static void percpu_timer_stop(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - if (lt_ops) - lt_ops->stop(evt); -} -#endif - static DEFINE_RAW_SPINLOCK(stop_lock); /* @@ -601,6 +530,19 @@ static void ipi_cpu_stop(unsigned int cpu) cpu_relax(); } +static DEFINE_PER_CPU(struct completion *, cpu_completion); + +int register_ipi_completion(struct completion *completion, int cpu) +{ + per_cpu(cpu_completion, cpu) = completion; + return IPI_COMPLETION; +} + +static void ipi_complete(unsigned int cpu) +{ + complete(per_cpu(cpu_completion, cpu)); +} + /* * Main handler for inter-processor interrupts */ @@ -651,6 +593,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; +#ifdef CONFIG_IRQ_WORK + case IPI_IRQ_WORK: + irq_enter(); + irq_work_run(); + irq_exit(); + break; +#endif + + case IPI_COMPLETION: + irq_enter(); + ipi_complete(cpu); + irq_exit(); + break; + default: printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); @@ -718,8 +674,7 @@ static int cpufreq_callback(struct notifier_block *nb, } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || - (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); |
