diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 133 | 
1 files changed, 79 insertions, 54 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index ec7299566f7..f9e4fdd3b87 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1,4 +1,3 @@ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/string.h> @@ -32,11 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)  	/* Unmask CPUID levels if masked: */  	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { -		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - -		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { -			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; -			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); +		if (msr_clear_bit(MSR_IA32_MISC_ENABLE, +				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {  			c->cpuid_level = cpuid_eax(0);  			get_cpu_cap(c);  		} @@ -93,7 +89,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)  		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);  		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);  		if (!check_tsc_unstable()) -			sched_clock_stable = 1; +			set_sched_clock_stable();  	}  	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ @@ -130,16 +126,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)  	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon  	 * (model 2) with the same problem.  	 */ -	if (c->x86 == 15) { -		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - -		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { -			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); - -			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; -			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); -		} -	} +	if (c->x86 == 15) +		if (msr_clear_bit(MSR_IA32_MISC_ENABLE, +				  MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) +			pr_info("kmemcheck: Disabling fast string operations\n");  #endif  	/* @@ -196,10 +186,16 @@ static void intel_smp_check(struct cpuinfo_x86 *c)  	}  } -static void intel_workarounds(struct cpuinfo_x86 *c) +static int forcepae; +static int __init forcepae_setup(char *__unused)  { -	unsigned long lo, hi; +	forcepae = 1; +	return 1; +} +__setup("forcepae", forcepae_setup); +static void intel_workarounds(struct cpuinfo_x86 *c) +{  #ifdef CONFIG_X86_F00F_BUG  	/*  	 * All current models of Pentium and Pentium with MMX technology CPUs @@ -226,16 +222,26 @@ static void intel_workarounds(struct cpuinfo_x86 *c)  		clear_cpu_cap(c, X86_FEATURE_SEP);  	/* +	 * PAE CPUID issue: many Pentium M report no PAE but may have a +	 * functionally usable PAE implementation. +	 * Forcefully enable PAE if kernel parameter "forcepae" is present. +	 */ +	if (forcepae) { +		printk(KERN_WARNING "PAE forced!\n"); +		set_cpu_cap(c, X86_FEATURE_PAE); +		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); +	} + +	/*  	 * P4 Xeon errata 037 workaround.  	 * Hardware prefetcher may cause stale data to be loaded into the cache.  	 */  	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { -		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); -		if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { -			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); -			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); -			lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; -			wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); +		if (msr_set_bit(MSR_IA32_MISC_ENABLE, +				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) +		    > 0) { +			pr_info("CPU: C0 stepping P4 Xeon detected.\n"); +			pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");  		}  	} @@ -268,10 +274,6 @@ static void intel_workarounds(struct cpuinfo_x86 *c)  	}  #endif -#ifdef CONFIG_X86_NUMAQ -	numaq_tsc_disable(); -#endif -  	intel_smp_check(c);  }  #else @@ -368,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)  	 */  	detect_extended_topology(c); +	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { +		/* +		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology +		 * detection. +		 */ +		c->x86_max_cores = intel_num_cpu_cores(c); +#ifdef CONFIG_X86_32 +		detect_ht(c); +#endif +	} +  	l2 = init_intel_cacheinfo(c);  	if (c->cpuid_level > 9) {  		unsigned eax = cpuid_eax(10); @@ -387,7 +400,8 @@ static void init_intel(struct cpuinfo_x86 *c)  			set_cpu_cap(c, X86_FEATURE_PEBS);  	} -	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) +	if (c->x86 == 6 && cpu_has_clflush && +	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))  		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);  #ifdef CONFIG_X86_64 @@ -435,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)  		set_cpu_cap(c, X86_FEATURE_P3);  #endif -	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { -		/* -		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology -		 * detection. -		 */ -		c->x86_max_cores = intel_num_cpu_cores(c); -#ifdef CONFIG_X86_32 -		detect_ht(c); -#endif -	} -  	/* Work around errata */  	srat_detect_node(c); @@ -505,6 +508,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)  #define TLB_DATA0_2M_4M	0x23  #define STLB_4K		0x41 +#define STLB_4K_2M	0x42  static const struct _tlb_table intel_tlb_table[] = {  	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" }, @@ -525,13 +529,20 @@ static const struct _tlb_table intel_tlb_table[] = {  	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },  	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },  	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" }, +	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" }, +	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" }, +	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },  	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },  	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },  	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },  	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },  	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" }, +	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set ssociative" }, +	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set ssociative" },  	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },  	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, +	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" }, +	{ 0xc2, TLB_DATA_2M_4M,		16,	" DTLB 2 MByte/4MByte pages, 4-way associative" },  	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },  	{ 0x00, 0, 0 }  }; @@ -557,6 +568,20 @@ static void intel_tlb_lookup(const unsigned char desc)  		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)  			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;  		break; +	case STLB_4K_2M: +		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; +		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; +		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; +		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; +		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; +		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; +		break;  	case TLB_INST_ALL:  		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)  			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; @@ -602,6 +627,10 @@ static void intel_tlb_lookup(const unsigned char desc)  		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)  			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;  		break; +	case TLB_DATA_1G: +		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) +			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; +		break;  	}  } @@ -614,21 +643,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)  	case 0x61d: /* six-core 45 nm xeon "Dunnington" */  		tlb_flushall_shift = -1;  		break; +	case 0x63a: /* Ivybridge */ +		tlb_flushall_shift = 2; +		break;  	case 0x61a: /* 45 nm nehalem, "Bloomfield" */  	case 0x61e: /* 45 nm nehalem, "Lynnfield" */  	case 0x625: /* 32 nm nehalem, "Clarkdale" */  	case 0x62c: /* 32 nm nehalem, "Gulftown" */  	case 0x62e: /* 45 nm nehalem-ex, "Beckton" */  	case 0x62f: /* 32 nm Xeon E7 */ -		tlb_flushall_shift = 6; -		break;  	case 0x62a: /* SandyBridge */  	case 0x62d: /* SandyBridge, "Romely-EP" */ -		tlb_flushall_shift = 5; -		break; -	case 0x63a: /* Ivybridge */ -		tlb_flushall_shift = 1; -		break;  	default:  		tlb_flushall_shift = 6;  	} @@ -665,8 +690,8 @@ static const struct cpu_dev intel_cpu_dev = {  	.c_vendor	= "Intel",  	.c_ident	= { "GenuineIntel" },  #ifdef CONFIG_X86_32 -	.c_models = { -		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = +	.legacy_models = { +		{ .family = 4, .model_names =  		  {  			  [0] = "486 DX-25/33",  			  [1] = "486 DX-50", @@ -679,7 +704,7 @@ static const struct cpu_dev intel_cpu_dev = {  			  [9] = "486 DX/4-WB"  		  }  		}, -		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = +		{ .family = 5, .model_names =  		  {  			  [0] = "Pentium 60/66 A-step",  			  [1] = "Pentium 60/66", @@ -690,7 +715,7 @@ static const struct cpu_dev intel_cpu_dev = {  			  [8] = "Mobile Pentium MMX"  		  }  		}, -		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = +		{ .family = 6, .model_names =  		  {  			  [0] = "Pentium Pro A-step",  			  [1] = "Pentium Pro", @@ -704,7 +729,7 @@ static const struct cpu_dev intel_cpu_dev = {  			  [11] = "Pentium III (Tualatin)",  		  }  		}, -		{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = +		{ .family = 15, .model_names =  		  {  			  [0] = "Pentium 4 (Unknown)",  			  [1] = "Pentium 4 (Willamette)", @@ -714,7 +739,7 @@ static const struct cpu_dev intel_cpu_dev = {  		  }  		},  	}, -	.c_size_cache	= intel_size_cache, +	.legacy_cache_size = intel_size_cache,  #endif  	.c_detect_tlb	= intel_detect_tlb,  	.c_early_init   = early_init_intel,  | 
