diff options
Diffstat (limited to 'arch/ia64/kernel/smpboot.c')
| -rw-r--r-- | arch/ia64/kernel/smpboot.c | 491 |
1 files changed, 233 insertions, 258 deletions
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 7d72c0d872b..547a48d78bd 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -21,7 +21,6 @@ * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> * Setup cpu_sibling_map and cpu_core_map */ -#include <linux/config.h> #include <linux/module.h> #include <linux/acpi.h> @@ -36,30 +35,29 @@ #include <linux/mm.h> #include <linux/notifier.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/efi.h> #include <linux/percpu.h> #include <linux/bitops.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/cache.h> #include <asm/current.h> #include <asm/delay.h> -#include <asm/ia32.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/machvec.h> #include <asm/mca.h> #include <asm/page.h> +#include <asm/paravirt.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> +#include <asm/sn/arch.h> #define SMP_DEBUG 0 @@ -70,12 +68,11 @@ #endif #ifdef CONFIG_HOTPLUG_CPU -/* - * Store all idle threads, this can be reused instead of creating - * a new thread. Also avoids complicated thread destroy functionality - * for idle threads. - */ -struct task_struct *idle_thread_array[NR_CPUS]; +#ifdef CONFIG_PERMIT_BSP_REMOVE +#define bsp_remove_ok 1 +#else +#define bsp_remove_ok 0 +#endif /* * Global array allocated for NR_CPUS at boot time @@ -90,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); -#define get_idle_for_cpu(x) (idle_thread_array[(x)]) -#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) - #else - -#define get_idle_for_cpu(x) (NULL) -#define set_idle_for_cpu(x,p) #define set_brendez_area(x) #endif @@ -104,7 +95,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; /* * ITC synchronization related stuff: */ -#define MASTER 0 +#define MASTER (0) #define SLAVE (SMP_CACHE_BYTES/8) #define NUM_ROUNDS 64 /* magic value */ @@ -115,27 +106,22 @@ static volatile unsigned long go[SLAVE + 1]; #define DEBUG_ITC_SYNC 0 -extern void __devinit calibrate_delay (void); extern void start_ap (void); extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu; /* * State for each CPU */ DEFINE_PER_CPU(int, cpu_state); -/* Bitmasks of currently online, and possible CPUs */ -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); - cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; -cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_core_map); +DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); +EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); + int smp_num_siblings = 1; -int smp_num_cpucores = 1; /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; @@ -151,6 +137,27 @@ char __initdata no_int_routing; unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ +#ifdef CONFIG_FORCE_CPEI_RETARGET +#define CPEI_OVERRIDE_DEFAULT (1) +#else +#define CPEI_OVERRIDE_DEFAULT (0) +#endif + +unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; + +static int __init +cmdl_force_cpei(char *str) +{ + int value=0; + + get_option (&str, &value); + force_cpei_retarget = value; + + return 1; +} + +__setup("force_cpei=", cmdl_force_cpei); + static int __init nointroute (char *str) { @@ -161,6 +168,27 @@ nointroute (char *str) __setup("nointroute", nointroute); +static void fix_b0_for_bsp(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + int cpuid; + static int fix_bsp_b0 = 1; + + cpuid = smp_processor_id(); + + /* + * Cache the b0 value on the first AP that comes up + */ + if (!(fix_bsp_b0 && cpuid)) + return; + + sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; + printk ("Fixed BSP b0 value from CPU %d\n", cpuid); + + fix_bsp_b0 = 0; +#endif +} + void sync_master (void *arg) { @@ -270,7 +298,7 @@ ia64_sync_itc (unsigned int master) go[MASTER] = 1; - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } @@ -319,16 +347,17 @@ ia64_sync_itc (unsigned int master) /* * Ideally sets up per-cpu profiling hooks. Doesn't do much now... */ -static inline void __devinit -smp_setup_percpu_timer (void) +static inline void smp_setup_percpu_timer(void) { } -static void __devinit +static void smp_callin (void) { - int cpuid, phys_id; + int cpuid, phys_id, itc_master; + struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; extern void ia64_init_itm(void); + extern volatile int time_keeper_id; #ifdef CONFIG_PERFMON extern void pfm_init_percpu(void); @@ -336,6 +365,7 @@ smp_callin (void) cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); + itc_master = time_keeper_id; if (cpu_online(cpuid)) { printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", @@ -343,10 +373,21 @@ smp_callin (void) BUG(); } - lock_ipi_calllock(); - cpu_set(cpuid, cpu_online_map); - unlock_ipi_calllock(); + fix_b0_for_bsp(); + + /* + * numa_node_id() works after this. + */ + set_numa_node(cpu_to_node_map[cpuid]); + set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); + + spin_lock(&vector_lock); + /* Setup the per cpu irq handling data structures */ + __setup_vector_irq(cpuid); + notify_cpu_starting(cpuid); + set_cpu_online(cpuid, true); per_cpu(cpu_state, cpuid) = CPU_ONLINE; + spin_unlock(&vector_lock); smp_setup_percpu_timer(); @@ -365,20 +406,30 @@ smp_callin (void) * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls * local_bh_enable(), which bugs out if irqs are not enabled... */ - Dprintk("Going to syncup ITC with BP.\n"); - ia64_sync_itc(0); + Dprintk("Going to syncup ITC with ITC Master.\n"); + ia64_sync_itc(itc_master); } /* * Get our bogomips. */ ia64_init_itm(); - calibrate_delay(); - local_cpu_data->loops_per_jiffy = loops_per_jiffy; -#ifdef CONFIG_IA32_SUPPORT - ia32_gdt_init(); -#endif + /* + * Delay calibration can be skipped if new processor is identical to the + * previous processor. + */ + last_cpuinfo = cpu_data(cpuid - 1); + this_cpuinfo = local_cpu_data; + if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || + last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || + last_cpuinfo->features != this_cpuinfo->features || + last_cpuinfo->revision != this_cpuinfo->revision || + last_cpuinfo->family != this_cpuinfo->family || + last_cpuinfo->archrev != this_cpuinfo->archrev || + last_cpuinfo->model != this_cpuinfo->model) + calibrate_delay(); + local_cpu_data->loops_per_jiffy = loops_per_jiffy; /* * Allow the master to continue. @@ -391,74 +442,29 @@ smp_callin (void) /* * Activate a secondary processor. head.S calls this. */ -int __devinit +int start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); +#ifndef CONFIG_PRINTK_TIME Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); +#endif efi_map_pal_code(); cpu_init(); + preempt_disable(); smp_callin(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } -struct pt_regs * __devinit idle_regs(struct pt_regs *regs) -{ - return NULL; -} - -struct create_idle { - struct task_struct *idle; - struct completion done; - int cpu; -}; - -void -do_fork_idle(void *_c_idle) -{ - struct create_idle *c_idle = _c_idle; - - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - -static int __devinit -do_boot_cpu (int sapicid, int cpu) +static int +do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) { int timeout; - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER(c_idle.done), - }; - DECLARE_WORK(work, do_fork_idle, &c_idle); - - c_idle.idle = get_idle_for_cpu(cpu); - if (c_idle.idle) { - init_idle(c_idle.idle, cpu); - goto do_rest; - } - - /* - * We can't use kernel_thread since we must avoid to reschedule the child. - */ - if (!keventd_up() || current_is_keventd()) - work.func(work.data); - else { - schedule_work(&work); - wait_for_completion(&c_idle.done); - } - - if (IS_ERR(c_idle.idle)) - panic("failed fork for CPU %d", cpu); - - set_idle_for_cpu(cpu, c_idle.idle); - -do_rest: - task_for_booting_cpu = c_idle.idle; + task_for_booting_cpu = idle; Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); set_brendez_area(cpu); @@ -478,7 +484,7 @@ do_rest: if (!cpu_isset(cpu, cpu_callin_map)) { printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); ia64_cpu_to_sapicid[cpu] = -1; - cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */ + set_cpu_online(cpu, false); /* was set in smp_callin() */ return -EINVAL; } return 0; @@ -505,21 +511,17 @@ smp_build_cpu_map (void) for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; -#ifdef CONFIG_HOTPLUG_CPU - cpu_set(cpu, cpu_possible_map); -#endif } ia64_cpu_to_sapicid[0] = boot_cpu_id; - cpus_clear(cpu_present_map); - cpu_set(0, cpu_present_map); - cpu_set(0, cpu_possible_map); + init_cpu_present(cpumask_of(0)); + set_cpu_possible(0, true); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; - cpu_set(cpu, cpu_present_map); - cpu_set(cpu, cpu_possible_map); + set_cpu_present(cpu, true); + set_cpu_possible(cpu, true); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } @@ -539,10 +541,6 @@ smp_prepare_cpus (unsigned int max_cpus) smp_setup_percpu_timer(); - /* - * We have the boot CPU online for sure. - */ - cpu_set(0, cpu_online_map); cpu_set(0, cpu_callin_map); local_cpu_data->loops_per_jiffy = loops_per_jiffy; @@ -557,59 +555,34 @@ smp_prepare_cpus (unsigned int max_cpus) */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); - cpus_clear(cpu_online_map); - cpus_clear(cpu_present_map); - cpus_clear(cpu_possible_map); - cpu_set(0, cpu_online_map); - cpu_set(0, cpu_present_map); - cpu_set(0, cpu_possible_map); + init_cpu_online(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + init_cpu_possible(cpumask_of(0)); return; } } -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { - cpu_set(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), true); cpu_set(smp_processor_id(), cpu_callin_map); + set_numa_node(cpu_to_node_map[smp_processor_id()]); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; + paravirt_post_smp_prepare_boot_cpu(); } -/* - * mt_info[] is a temporary store for all info returned by - * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the - * specific cpu comes. - */ -static struct { - __u32 socket_id; - __u16 core_id; - __u16 thread_id; - __u16 proc_fixed_addr; - __u8 valid; -} mt_info[NR_CPUS] __devinitdata; - #ifdef CONFIG_HOTPLUG_CPU static inline void -remove_from_mtinfo(int cpu) -{ - int i; - - for_each_cpu(i) - if (mt_info[i].valid && mt_info[i].socket_id == - cpu_data(cpu)->socket_id) - mt_info[i].valid = 0; -} - -static inline void clear_cpu_sibling_map(int cpu) { int i; - for_each_cpu_mask(i, cpu_sibling_map[cpu]) - cpu_clear(cpu, cpu_sibling_map[i]); + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) + cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); for_each_cpu_mask(i, cpu_core_map[cpu]) cpu_clear(cpu, cpu_core_map[i]); - cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; + per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; } static void @@ -620,7 +593,7 @@ remove_siblinginfo(int cpu) if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpu_clear(cpu, cpu_core_map[cpu]); - cpu_clear(cpu, cpu_sibling_map[cpu]); + cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); return; } @@ -628,15 +601,50 @@ remove_siblinginfo(int cpu) /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); +} + +extern void fixup_irqs(void); - /* if this cpu is the last in the core group, remove all its info - * from mt_info structure +int migrate_platform_irqs(unsigned int cpu) +{ + int new_cpei_cpu; + struct irq_data *data = NULL; + const struct cpumask *mask; + int retval = 0; + + /* + * dont permit CPEI target to removed. */ - if (last) - remove_from_mtinfo(cpu); + if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { + printk ("CPU (%d) is CPEI Target\n", cpu); + if (can_cpei_retarget()) { + /* + * Now re-target the CPEI to a different processor + */ + new_cpei_cpu = cpumask_any(cpu_online_mask); + mask = cpumask_of(new_cpei_cpu); + set_cpei_target_cpu(new_cpei_cpu); + data = irq_get_irq_data(ia64_cpe_irq); + /* + * Switch for now, immediately, we need to do fake intr + * as other interrupts, but need to study CPEI behaviour with + * polling before making changes. + */ + if (data && data->chip) { + data->chip->irq_disable(data); + data->chip->irq_set_affinity(data, mask, false); + data->chip->irq_enable(data); + printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu); + } + } + if (!data) { + printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); + retval = -EBUSY; + } + } + return retval; } -extern void fixup_irqs(void); /* must be called with cpucontrol mutex held */ int __cpu_disable(void) { @@ -645,11 +653,24 @@ int __cpu_disable(void) /* * dont permit boot processor for now */ - if (cpu == 0) + if (cpu == 0 && !bsp_remove_ok) { + printk ("Your platform does not support removal of BSP\n"); + return (-EBUSY); + } + + if (ia64_platform_is("sn2")) { + if (!sn_cpu_disable_allowed(cpu)) + return -EBUSY; + } + + set_cpu_online(cpu, false); + + if (migrate_platform_irqs(cpu)) { + set_cpu_online(cpu, true); return -EBUSY; + } remove_siblinginfo(cpu); - cpu_clear(cpu, cpu_online_map); fixup_irqs(); local_flush_tlb_all(); cpu_clear(cpu, cpu_callin_map); @@ -671,17 +692,6 @@ void __cpu_die(unsigned int cpu) } printk(KERN_ERR "CPU %u didn't die...\n", cpu); } -#else /* !CONFIG_HOTPLUG_CPU */ -int __cpu_disable(void) -{ - return -ENOSYS; -} - -void __cpu_die(unsigned int cpu) -{ - /* We said "no" in __cpu_disable */ - BUG(); -} #endif /* CONFIG_HOTPLUG_CPU */ void @@ -694,16 +704,15 @@ smp_cpus_done (unsigned int dummy) * Allow the user to impress friends. */ - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (cpu_online(cpu)) - bogosum += cpu_data(cpu)->loops_per_jiffy; + for_each_online_cpu(cpu) { + bogosum += cpu_data(cpu)->loops_per_jiffy; + } printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); } -static inline void __devinit -set_cpu_sibling_map(int cpu) +static inline void set_cpu_sibling_map(int cpu) { int i; @@ -712,15 +721,15 @@ set_cpu_sibling_map(int cpu) cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { - cpu_set(i, cpu_sibling_map[cpu]); - cpu_set(cpu, cpu_sibling_map[i]); + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); } } } } -int __devinit -__cpu_up (unsigned int cpu) +int +__cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret; int sapicid; @@ -738,13 +747,13 @@ __cpu_up (unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Processor goes to start_secondary(), sets online flag */ - ret = do_boot_cpu(sapicid, cpu); + ret = do_boot_cpu(sapicid, cpu, tidle); if (ret < 0) return ret; if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { - cpu_set(cpu, cpu_sibling_map[cpu]); + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, cpu_core_map[cpu]); return 0; } @@ -755,7 +764,7 @@ __cpu_up (unsigned int cpu) } /* - * Assume that CPU's have been discovered by some platform-dependent interface. For + * Assume that CPUs have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). @@ -769,7 +778,7 @@ init_smp_config(void) } *ap_startup; long sal_ret; - /* Tell SAL where to drop the AP's. */ + /* Tell SAL where to drop the APs. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); @@ -778,106 +787,72 @@ init_smp_config(void) ia64_sal_strerror(sal_ret)); } -static inline int __devinit -check_for_mtinfo_index(void) -{ - int i; - - for_each_cpu(i) - if (!mt_info[i].valid) - return i; - - return -1; -} - -/* - * Search the mt_info to find out if this socket's cid/tid information is - * cached or not. If the socket exists, fill in the core_id and thread_id - * in cpuinfo - */ -static int __devinit -check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c) -{ - int i; - __u32 sid = c->socket_id; - - for_each_cpu(i) { - if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address - && mt_info[i].socket_id == sid) { - c->core_id = mt_info[i].core_id; - c->thread_id = mt_info[i].thread_id; - return 1; /* not a new socket */ - } - } - return 0; -} - /* * identify_siblings(cpu) gets called from identify_cpu. This populates the * information related to logical execution units in per_cpu_data structure. */ -void __devinit -identify_siblings(struct cpuinfo_ia64 *c) +void identify_siblings(struct cpuinfo_ia64 *c) { - s64 status; + long status; u16 pltid; - u64 proc_fixed_addr; - int count, i; pal_logical_to_physical_t info; - if (smp_num_cpucores == 1 && smp_num_siblings == 1) - return; + status = ia64_pal_logical_to_phys(-1, &info); + if (status != PAL_STATUS_SUCCESS) { + if (status != PAL_STATUS_UNIMPLEMENTED) { + printk(KERN_ERR + "ia64_pal_logical_to_phys failed with %ld\n", + status); + return; + } - if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", - status); - return; - } - if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); - return; + info.overview_ppid = 0; + info.overview_cpp = 1; + info.overview_tpc = 1; } - if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status); + + status = ia64_sal_physical_id_info(&pltid); + if (status != PAL_STATUS_SUCCESS) { + if (status != PAL_STATUS_UNIMPLEMENTED) + printk(KERN_ERR + "ia64_sal_pltid failed with %ld\n", + status); return; } c->socket_id = (pltid << 8) | info.overview_ppid; - c->cores_per_socket = info.overview_cpp; - c->threads_per_core = info.overview_tpc; - count = c->num_log = info.overview_num_log; - /* If the thread and core id information is already cached, then - * we will simply update cpu_info and return. Otherwise, we will - * do the PAL calls and cache core and thread id's of all the siblings. - */ - if (check_for_new_socket(proc_fixed_addr, c)) + if (info.overview_cpp == 1 && info.overview_tpc == 1) return; - for (i = 0; i < count; i++) { - int index; - - if (i && (status = ia64_pal_logical_to_phys(i, &info)) - != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed" - " with %ld\n", status); - return; - } - if (info.log2_la == proc_fixed_addr) { - c->core_id = info.log1_cid; - c->thread_id = info.log1_tid; - } + c->cores_per_socket = info.overview_cpp; + c->threads_per_core = info.overview_tpc; + c->num_log = info.overview_num_log; - index = check_for_mtinfo_index(); - /* We will not do the mt_info caching optimization in this case. - */ - if (index < 0) - continue; + c->core_id = info.log1_cid; + c->thread_id = info.log1_tid; +} - mt_info[index].valid = 1; - mt_info[index].socket_id = c->socket_id; - mt_info[index].core_id = info.log1_cid; - mt_info[index].thread_id = info.log1_tid; - mt_info[index].proc_fixed_addr = info.log2_la; +/* + * returns non zero, if multi-threading is enabled + * on at least one physical package. Due to hotplug cpu + * and (maxcpus=), all threads may not necessarily be enabled + * even though the processor supports multi-threading. + */ +int is_multithreading_enabled(void) +{ + int i, j; + + for_each_present_cpu(i) { + for_each_present_cpu(j) { + if (j == i) + continue; + if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { + if (cpu_data(j)->core_id == cpu_data(i)->core_id) + return 1; + } + } } + return 0; } +EXPORT_SYMBOL_GPL(is_multithreading_enabled); |
