diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 181 |
1 files changed, 99 insertions, 82 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a38aafaefc2..35b4f6a9c8e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -5,7 +5,6 @@ #include <linux/module.h> #include <linux/percpu.h> #include <linux/bootmem.h> -#include <asm/semaphore.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/msr.h> @@ -62,9 +61,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; static int cachesize_override __cpuinitdata = -1; static int disable_x86_serial_nr __cpuinitdata = 1; -struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; +struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; -static void __cpuinit default_init(struct cpuinfo_x86 * c) +static void __cpuinit default_init(struct cpuinfo_x86 *c) { /* Not much we can do here... */ /* Check if at least it has cpuid */ @@ -81,11 +80,11 @@ static struct cpu_dev __cpuinitdata default_cpu = { .c_init = default_init, .c_vendor = "Unknown", }; -static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; +static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; static int __init cachesize_setup(char *str) { - get_option (&str, &cachesize_override); + get_option(&str, &cachesize_override); return 1; } __setup("cachesize=", cachesize_setup); @@ -107,12 +106,12 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) /* Intel chips right-justify this string for some dumb reason; undo that brain damage */ p = q = &c->x86_model_id[0]; - while ( *p == ' ' ) + while (*p == ' ') p++; - if ( p != q ) { - while ( *p ) + if (p != q) { + while (*p) *q++ = *p++; - while ( q <= &c->x86_model_id[48] ) + while (q <= &c->x86_model_id[48]) *q++ = '\0'; /* Zero-pad the rest */ } @@ -130,7 +129,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); - c->x86_cache_size=(ecx>>24)+(edx>>24); + c->x86_cache_size = (ecx>>24)+(edx>>24); } if (n < 0x80000006) /* Some chips just has a large L1. */ @@ -138,16 +137,16 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) ecx = cpuid_ecx(0x80000006); l2size = ecx >> 16; - + /* do processor-specific cache resizing */ if (this_cpu->c_size_cache) - l2size = this_cpu->c_size_cache(c,l2size); + l2size = this_cpu->c_size_cache(c, l2size); /* Allow user to override all this if necessary. */ if (cachesize_override != -1) l2size = cachesize_override; - if ( l2size == 0 ) + if (l2size == 0) return; /* Again, no L2 cache is possible */ c->x86_cache_size = l2size; @@ -156,16 +155,19 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) l2size, ecx & 0xFF); } -/* Naming convention should be: <Name> [(<Codename>)] */ -/* This table only is used unless init_<vendor>() below doesn't set it; */ -/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ +/* + * Naming convention should be: <Name> [(<Codename>)] + * This table only is used unless init_<vendor>() below doesn't set it; + * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used + * + */ /* Look up CPU names by table lookup. */ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) { struct cpu_model_info *info; - if ( c->x86_model >= 16 ) + if (c->x86_model >= 16) return NULL; /* Range check */ if (!this_cpu) @@ -190,9 +192,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_devs[i]) { - if (!strcmp(v,cpu_devs[i]->c_ident[0]) || - (cpu_devs[i]->c_ident[1] && - !strcmp(v,cpu_devs[i]->c_ident[1]))) { + if (!strcmp(v, cpu_devs[i]->c_ident[0]) || + (cpu_devs[i]->c_ident[1] && + !strcmp(v, cpu_devs[i]->c_ident[1]))) { c->x86_vendor = i; if (!early) this_cpu = cpu_devs[i]; @@ -210,7 +212,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) } -static int __init x86_fxsr_setup(char * s) +static int __init x86_fxsr_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_FXSR); setup_clear_cpu_cap(X86_FEATURE_XMM); @@ -219,7 +221,7 @@ static int __init x86_fxsr_setup(char * s) __setup("nofxsr", x86_fxsr_setup); -static int __init x86_sep_setup(char * s) +static int __init x86_sep_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_SEP); return 1; @@ -306,14 +308,30 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) } -} + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } -/* Do minimum CPU detection early. - Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. - The others are not touched to avoid unwanted side effects. +} - WARNING: this function is only called on the BP. Don't add code here - that is supposed to run on all CPUs. */ +/* + * Do minimum CPU detection early. + * Fields really needed: vendor, cpuid_level, family, model, mask, + * cache alignment. + * The others are not touched to avoid unwanted side effects. + * + * WARNING: this function is only called on the BP. Don't add code here + * that is supposed to run on all CPUs. + */ static void __init early_cpu_detect(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -328,19 +346,14 @@ static void __init early_cpu_detect(void) get_cpu_vendor(c, 1); - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - early_init_amd(c); - break; - case X86_VENDOR_INTEL: - early_init_intel(c); - break; - } + if (c->x86_vendor != X86_VENDOR_UNKNOWN && + cpu_devs[c->x86_vendor]->c_early_init) + cpu_devs[c->x86_vendor]->c_early_init(c); early_get_cap(c); } -static void __cpuinit generic_identify(struct cpuinfo_x86 * c) +static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { u32 tfms, xlvl; unsigned int ebx; @@ -351,13 +364,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) (unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]); - + get_cpu_vendor(c, 0); /* Initialize the standard set of capabilities */ /* Note that the vendor-specific code below might override */ - /* Intel-defined flags: level 0x00000001 */ - if ( c->cpuid_level >= 0x00000001 ) { + if (c->cpuid_level >= 0x00000001) { u32 capability, excap; cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; @@ -369,12 +381,14 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; + c->initial_apicid = (ebx >> 24) & 0xFF; #ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); + c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->phys_proc_id = c->initial_apicid; #else - c->apicid = (ebx >> 24) & 0xFF; + c->apicid = c->initial_apicid; #endif - if (c->x86_capability[0] & (1<<19)) + if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; } else { /* Have CPUID level 0 only - unheard of */ @@ -383,33 +397,42 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); - if ( (xlvl & 0xffff0000) == 0x80000000 ) { - if ( xlvl >= 0x80000001 ) { + if ((xlvl & 0xffff0000) == 0x80000000) { + if (xlvl >= 0x80000001) { c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001); } - if ( xlvl >= 0x80000004 ) + if (xlvl >= 0x80000004) get_model_name(c); /* Default name */ } init_scattered_cpuid_features(c); } -#ifdef CONFIG_X86_HT - c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; -#endif + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { - if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { + if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { /* Disable processor serial number */ - unsigned long lo,hi; - rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + unsigned long lo, hi; + rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); lo |= 0x200000; - wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); - clear_bit(X86_FEATURE_PN, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ c->cpuid_level = cpuid_eax(0); @@ -444,9 +467,11 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) memset(&c->x86_capability, 0, sizeof c->x86_capability); if (!have_cpuid_p()) { - /* First of all, decide if this is a 486 or higher */ - /* It's a 486 if we can modify the AC flag */ - if ( flag_is_changeable_p(X86_EFLAGS_AC) ) + /* + * First of all, decide if this is a 486 or higher + * It's a 486 if we can modify the AC flag + */ + if (flag_is_changeable_p(X86_EFLAGS_AC)) c->x86 = 4; else c->x86 = 3; @@ -479,10 +504,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) */ /* If the model name is still unset, do table lookup. */ - if ( !c->x86_model_id[0] ) { + if (!c->x86_model_id[0]) { char *p; p = table_lookup_model(c); - if ( p ) + if (p) strcpy(c->x86_model_id, p); else /* Last resort... */ @@ -496,9 +521,9 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ - if ( c != &boot_cpu_data ) { + if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ - for ( i = 0 ; i < NCAPINTS ; i++ ) + for (i = 0 ; i < NCAPINTS ; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } @@ -542,7 +567,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); - } else if (smp_num_siblings > 1 ) { + } else if (smp_num_siblings > 1) { if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the " @@ -552,7 +577,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } index_msb = get_count_order(smp_num_siblings); - c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); + c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); @@ -563,7 +588,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); - c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & + c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); if (c->x86_max_cores > 1) @@ -597,7 +622,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) else printk("%s", c->x86_model_id); - if (c->x86_mask || c->cpuid_level >= 0) + if (c->x86_mask || c->cpuid_level >= 0) printk(" stepping %02x\n", c->x86_mask); else printk("\n"); @@ -616,23 +641,15 @@ __setup("clearcpuid=", setup_disablecpuid); cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; -/* This is hacky. :) - * We're emulating future behavior. - * In the future, the cpu-specific init functions will be called implicitly - * via the magic of initcalls. - * They will insert themselves into the cpu_devs structure. - * Then, when cpu_init() is called, we can just iterate over that array. - */ void __init early_cpu_init(void) { - intel_cpu_init(); - cyrix_init_cpu(); - nsc_init_cpu(); - amd_init_cpu(); - centaur_init_cpu(); - transmeta_init_cpu(); - nexgen_init_cpu(); - umc_init_cpu(); + struct cpu_vendor_dev *cvdev; + + for (cvdev = __x86cpuvendor_start ; + cvdev < __x86cpuvendor_end ; + cvdev++) + cpu_devs[cvdev->vendor] = cvdev->cpu_dev; + early_cpu_detect(); } @@ -666,7 +683,7 @@ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct * t = &per_cpu(init_tss, cpu); + struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; if (cpu_test_and_set(cpu, cpu_initialized)) { @@ -692,7 +709,7 @@ void __cpuinit cpu_init(void) enter_lazy_tlb(&init_mm, curr); load_sp0(t, thread); - set_tss_desc(cpu,t); + set_tss_desc(cpu, t); load_TR_desc(); load_LDT(&init_mm.context); |