diff options
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
| -rw-r--r-- | arch/powerpc/kernel/smp.c | 549 |
1 files changed, 369 insertions, 180 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 68034bbf2e4..1007fb802e6 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -18,7 +18,7 @@ #undef DEBUG #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -27,14 +27,16 @@ #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/err.h> -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/topology.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq.h> +#include <asm/hw_irq.h> +#include <asm/kvm_ppc.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/prom.h> @@ -43,12 +45,13 @@ #include <asm/machdep.h> #include <asm/cputhreads.h> #include <asm/cputable.h> -#include <asm/system.h> #include <asm/mpic.h> #include <asm/vdso_datapage.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> #endif +#include <asm/vdso.h> +#include <asm/debug.h> #ifdef DEBUG #include <asm/udbg.h> @@ -57,6 +60,11 @@ #define DBG(fmt...) #endif +#ifdef CONFIG_HOTPLUG_CPU +/* State of each CPU during hotplug phases */ +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -75,8 +83,30 @@ int smt_enabled_at_boot = 1; static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; +/* + * Returns 1 if the specified cpu should be brought up during boot. + * Used to inhibit booting threads if they've been disabled or + * limited on the command line + */ +int smp_generic_cpu_bootable(unsigned int nr) +{ + /* Special case - we inhibit secondary thread startup + * during boot if the user requests it. + */ + if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { + if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) + return 0; + if (smt_enabled_at_boot + && cpu_thread_in_core(nr) >= smt_enabled_at_boot) + return 0; + } + + return 1; +} + + #ifdef CONFIG_PPC64 -void __devinit smp_generic_kick_cpu(int nr) +int smp_generic_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); @@ -85,39 +115,25 @@ void __devinit smp_generic_kick_cpu(int nr) * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ - paca[nr].cpu_start = 1; - smp_mb(); -} -#endif - -void smp_message_recv(int msg) -{ - switch(msg) { - case PPC_MSG_CALL_FUNCTION: - generic_smp_call_function_interrupt(); - break; - case PPC_MSG_RESCHEDULE: - /* we notice need_resched on exit */ - break; - case PPC_MSG_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - case PPC_MSG_DEBUGGER_BREAK: - if (crash_ipi_function_ptr) { - crash_ipi_function_ptr(get_irq_regs()); - break; - } -#ifdef CONFIG_DEBUGGER - debugger_ipi(get_irq_regs()); - break; -#endif /* CONFIG_DEBUGGER */ - /* FALLTHROUGH */ - default: - printk("SMP %d: smp_message_recv(): unknown msg %d\n", - smp_processor_id(), msg); - break; + if (!paca[nr].cpu_start) { + paca[nr].cpu_start = 1; + smp_mb(); + return 0; } + +#ifdef CONFIG_HOTPLUG_CPU + /* + * Ok it's not there, so it might be soft-unplugged, let's + * try to bring it back + */ + generic_set_cpu_up(nr); + smp_wmb(); + smp_send_reschedule(nr); +#endif /* CONFIG_HOTPLUG_CPU */ + + return 0; } +#endif /* CONFIG_PPC64 */ static irqreturn_t call_function_action(int irq, void *data) { @@ -127,33 +143,41 @@ static irqreturn_t call_function_action(int irq, void *data) static irqreturn_t reschedule_action(int irq, void *data) { - /* we just need the return path side effect of checking need_resched */ + scheduler_ipi(); return IRQ_HANDLED; } -static irqreturn_t call_function_single_action(int irq, void *data) +static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) { - generic_smp_call_function_single_interrupt(); + tick_broadcast_ipi_handler(); return IRQ_HANDLED; } static irqreturn_t debug_ipi_action(int irq, void *data) { - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); + if (crash_ipi_function_ptr) { + crash_ipi_function_ptr(get_irq_regs()); + return IRQ_HANDLED; + } + +#ifdef CONFIG_DEBUGGER + debugger_ipi(get_irq_regs()); +#endif /* CONFIG_DEBUGGER */ + return IRQ_HANDLED; } static irq_handler_t smp_ipi_action[] = { [PPC_MSG_CALL_FUNCTION] = call_function_action, [PPC_MSG_RESCHEDULE] = reschedule_action, - [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, + [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, }; const char *smp_ipi_name[] = { [PPC_MSG_CALL_FUNCTION] = "ipi call function", [PPC_MSG_RESCHEDULE] = "ipi reschedule", - [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", + [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", }; @@ -170,23 +194,95 @@ int smp_request_message_ipi(int virq, int msg) return 1; } #endif - err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, - smp_ipi_name[msg], 0); + err = request_irq(virq, smp_ipi_action[msg], + IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, + smp_ipi_name[msg], NULL); WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", virq, smp_ipi_name[msg], err); return err; } +#ifdef CONFIG_PPC_SMP_MUXED_IPI +struct cpu_messages { + int messages; /* current messages */ + unsigned long data; /* data for cause ipi */ +}; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); + +void smp_muxed_ipi_set_data(int cpu, unsigned long data) +{ + struct cpu_messages *info = &per_cpu(ipi_message, cpu); + + info->data = data; +} + +void smp_muxed_ipi_message_pass(int cpu, int msg) +{ + struct cpu_messages *info = &per_cpu(ipi_message, cpu); + char *message = (char *)&info->messages; + + /* + * Order previous accesses before accesses in the IPI handler. + */ + smp_mb(); + message[msg] = 1; + /* + * cause_ipi functions are required to include a full barrier + * before doing whatever causes the IPI. + */ + smp_ops->cause_ipi(cpu, info->data); +} + +#ifdef __BIG_ENDIAN__ +#define IPI_MESSAGE(A) (1 << (24 - 8 * (A))) +#else +#define IPI_MESSAGE(A) (1 << (8 * (A))) +#endif + +irqreturn_t smp_ipi_demux(void) +{ + struct cpu_messages *info = &__get_cpu_var(ipi_message); + unsigned int all; + + mb(); /* order any irq clear */ + + do { + all = xchg(&info->messages, 0); + if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) + generic_smp_call_function_interrupt(); + if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) + scheduler_ipi(); + if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) + tick_broadcast_ipi_handler(); + if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) + debug_ipi_action(0, NULL); + } while (info->messages); + + return IRQ_HANDLED; +} +#endif /* CONFIG_PPC_SMP_MUXED_IPI */ + +static inline void do_message_pass(int cpu, int msg) +{ + if (smp_ops->message_pass) + smp_ops->message_pass(cpu, msg); +#ifdef CONFIG_PPC_SMP_MUXED_IPI + else + smp_muxed_ipi_message_pass(cpu, msg); +#endif +} + void smp_send_reschedule(int cpu) { if (likely(smp_ops)) - smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); + do_message_pass(cpu, PPC_MSG_RESCHEDULE); } +EXPORT_SYMBOL_GPL(smp_send_reschedule); void arch_send_call_function_single_ipi(int cpu) { - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); + do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -194,14 +290,31 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) unsigned int cpu; for_each_cpu(cpu, mask) - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); + do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); } -#ifdef CONFIG_DEBUGGER -void smp_send_debugger_break(int cpu) +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) { - if (likely(smp_ops)) - smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); + unsigned int cpu; + + for_each_cpu(cpu, mask) + do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); +} +#endif + +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) +void smp_send_debugger_break(void) +{ + int cpu; + int me = raw_smp_processor_id(); + + if (unlikely(!smp_ops)) + return; + + for_each_online_cpu(cpu) + if (cpu != me) + do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); } #endif @@ -209,9 +322,9 @@ void smp_send_debugger_break(int cpu) void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { crash_ipi_function_ptr = crash_ipi_callback; - if (crash_ipi_callback && smp_ops) { + if (crash_ipi_callback) { mb(); - smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); + smp_send_debugger_break(); } } #endif @@ -233,26 +346,13 @@ void smp_send_stop(void) struct thread_info *current_set[NR_CPUS]; -static void __devinit smp_store_cpu_info(int id) +static void smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); -} - -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#ifdef CONFIG_PPC_FSL_BOOK3E + per_cpu(next_tlbcam_idx, id) + = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -281,31 +381,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); - if (smp_ops) - if (smp_ops->probe) - max_cpus = smp_ops->probe(); - else - max_cpus = NR_CPUS; - else - max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); + if (smp_ops && smp_ops->probe) + smp_ops->probe(); } -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); #ifdef CONFIG_PPC64 paca[boot_cpuid].__current = current; #endif + set_numa_node(numa_cpu_lookup_table[boot_cpuid]); current_set[boot_cpuid] = task_thread_info(current); } #ifdef CONFIG_HOTPLUG_CPU -/* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -317,30 +407,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); -#endif - return 0; -} - -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); #endif + migrate_irqs(); return 0; } @@ -362,37 +430,75 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); } + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; +} + +/* + * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise + * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), + * which makes the delay in generic_cpu_die() not happen. + */ +void generic_set_cpu_up(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; +} + +int generic_check_cpu_restart(unsigned int cpu) +{ + return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; +} + +static bool secondaries_inhibited(void) +{ + return kvm_hv_mode_active(); +} + +#else /* HOTPLUG_CPU */ + +#define secondaries_inhibited() 0 + #endif -static int __devinit cpu_enable(unsigned int cpu) +static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) { - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); + struct thread_info *ti = task_thread_info(idle); - return -ENOSYS; +#ifdef CONFIG_PPC64 + paca[cpu].__current = idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + secondary_ti = current_set[cpu] = ti; } -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - int c; + int rc, c; - secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; + /* + * Don't allow secondary threads to come online if inhibited + */ + if (threads_per_core > 1 && secondaries_inhibited() && + cpu_thread_in_subcore(cpu)) + return -EBUSY; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + cpu_idle_thread_init(cpu, tidle); + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ @@ -406,7 +512,11 @@ int __cpuinit __cpu_up(unsigned int cpu) /* wake up cpus */ DBG("smp: kicking cpu %d\n", cpu); - smp_ops->kick_cpu(cpu); + rc = smp_ops->kick_cpu(cpu); + if (rc) { + pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); + return rc; + } /* * wait to see if the cpu made a callin (is actually up). @@ -449,7 +559,7 @@ int __cpuinit __cpu_up(unsigned int cpu) int cpu_to_core_id(int cpu) { struct device_node *np; - const int *reg; + const __be32 *reg; int id = -1; np = of_get_cpu_node(cpu, NULL); @@ -460,12 +570,52 @@ int cpu_to_core_id(int cpu) if (!reg) goto out; - id = *reg; + id = be32_to_cpup(reg); out: of_node_put(np); return id; } +/* Helper routines for cpu to core mapping */ +int cpu_core_index_of_thread(int cpu) +{ + return cpu >> threads_shift; +} +EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); + +int cpu_first_thread_of_core(int core) +{ + return core << threads_shift; +} +EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); + +static void traverse_siblings_chip_id(int cpu, bool add, int chipid) +{ + const struct cpumask *mask; + struct device_node *np; + int i, plen; + const __be32 *prop; + + mask = add ? cpu_online_mask : cpu_present_mask; + for_each_cpu(i, mask) { + np = of_get_cpu_node(i, NULL); + if (!np) + continue; + prop = of_get_property(np, "ibm,chip-id", &plen); + if (prop && plen == sizeof(int) && + of_read_number(prop, 1) == chipid) { + if (add) { + cpumask_set_cpu(cpu, cpu_core_mask(i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + } else { + cpumask_clear_cpu(cpu, cpu_core_mask(i)); + cpumask_clear_cpu(i, cpu_core_mask(cpu)); + } + } + of_node_put(np); + } +} + /* Must be called when no change can occur to cpu_present_mask, * i.e. during cpu online or offline. */ @@ -488,11 +638,51 @@ static struct device_node *cpu_to_l2cache(int cpu) return cache; } +static void traverse_core_siblings(int cpu, bool add) +{ + struct device_node *l2_cache, *np; + const struct cpumask *mask; + int i, chip, plen; + const __be32 *prop; + + /* First see if we have ibm,chip-id properties in cpu nodes */ + np = of_get_cpu_node(cpu, NULL); + if (np) { + chip = -1; + prop = of_get_property(np, "ibm,chip-id", &plen); + if (prop && plen == sizeof(int)) + chip = of_read_number(prop, 1); + of_node_put(np); + if (chip >= 0) { + traverse_siblings_chip_id(cpu, add, chip); + return; + } + } + + l2_cache = cpu_to_l2cache(cpu); + mask = add ? cpu_online_mask : cpu_present_mask; + for_each_cpu(i, mask) { + np = cpu_to_l2cache(i); + if (!np) + continue; + if (np == l2_cache) { + if (add) { + cpumask_set_cpu(cpu, cpu_core_mask(i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + } else { + cpumask_clear_cpu(cpu, cpu_core_mask(i)); + cpumask_clear_cpu(i, cpu_core_mask(cpu)); + } + } + of_node_put(np); + } + of_node_put(l2_cache); +} + /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); - struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); @@ -510,13 +700,16 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); - ipi_call_lock(); - notify_cpu_starting(cpu); - set_cpu_online(cpu, true); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; + + vdso_getcpu_init(); +#endif /* Update sibling maps */ - base = cpu_first_thread_in_core(cpu); + base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { - if (cpu_is_offline(base + i)) + if (cpu_is_offline(base + i) && (cpu != base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); @@ -528,24 +721,23 @@ int __devinit start_secondary(void *unused) cpumask_set_cpu(cpu, cpu_core_mask(base + i)); cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } - l2_cache = cpu_to_l2cache(cpu); - for_each_online_cpu(i) { - struct device_node *np = cpu_to_l2cache(i); - if (!np) - continue; - if (np == l2_cache) { - cpumask_set_cpu(cpu, cpu_core_mask(i)); - cpumask_set_cpu(i, cpu_core_mask(cpu)); - } - of_node_put(np); - } - of_node_put(l2_cache); - ipi_call_unlock(); + traverse_core_siblings(cpu, true); + + /* + * numa_node_id() works after this. + */ + set_numa_node(numa_cpu_lookup_table[cpu]); + set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); + + smp_wmb(); + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); local_irq_enable(); - cpu_idle(); - return 0; + cpu_startup_entry(CPUHP_ONLINE); + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -553,6 +745,28 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymetric SMT dependancy */ +static int powerpc_smt_flags(void) +{ + int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; + + if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { + printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); + flags |= SD_ASYM_PACKING; + } + return flags; +} +#endif + +static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + void __init smp_cpus_done(unsigned int max_cpus) { cpumask_var_t old_mask; @@ -562,7 +776,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, ¤t->cpus_allowed); + cpumask_copy(old_mask, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) @@ -572,22 +786,18 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); -} -int arch_sd_sibling_asym_packing(void) -{ - if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { - printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); - return SD_ASYM_PACKING; - } - return 0; + set_sched_topology(powerpc_topology); + } #ifdef CONFIG_HOTPLUG_CPU int __cpu_disable(void) { - struct device_node *l2_cache; int cpu = smp_processor_id(); int base, i; int err; @@ -600,27 +810,14 @@ int __cpu_disable(void) return err; /* Update sibling maps */ - base = cpu_first_thread_in_core(cpu); + base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); } - - l2_cache = cpu_to_l2cache(cpu); - for_each_present_cpu(i) { - struct device_node *np = cpu_to_l2cache(i); - if (!np) - continue; - if (np == l2_cache) { - cpumask_clear_cpu(cpu, cpu_core_mask(i)); - cpumask_clear_cpu(i, cpu_core_mask(cpu)); - } - of_node_put(np); - } - of_node_put(l2_cache); - + traverse_core_siblings(cpu, false); return 0; } @@ -631,21 +828,13 @@ void __cpu_die(unsigned int cpu) smp_ops->cpu_die(cpu); } -static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); - -void cpu_hotplug_driver_lock() -{ - mutex_lock(&powerpc_cpu_hotplug_driver_mutex); -} - -void cpu_hotplug_driver_unlock() -{ - mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); -} - void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif |
