diff options
Diffstat (limited to 'arch/x86/kernel/cpu/amd.c')
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 420 | 
1 files changed, 299 insertions, 121 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9e093f8fe78..ce8b8ff0e0e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1,21 +1,55 @@ -#include <linux/init.h> +#include <linux/export.h>  #include <linux/bitops.h> +#include <linux/elf.h>  #include <linux/mm.h>  #include <linux/io.h> +#include <linux/sched.h>  #include <asm/processor.h>  #include <asm/apic.h>  #include <asm/cpu.h>  #include <asm/pci-direct.h>  #ifdef CONFIG_X86_64 -# include <asm/numa_64.h>  # include <asm/mmconfig.h>  # include <asm/cacheflush.h>  #endif  #include "cpu.h" +static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) +{ +	u32 gprs[8] = { 0 }; +	int err; + +	WARN_ONCE((boot_cpu_data.x86 != 0xf), +		  "%s should only be used on K8!\n", __func__); + +	gprs[1] = msr; +	gprs[7] = 0x9c5a203a; + +	err = rdmsr_safe_regs(gprs); + +	*p = gprs[0] | ((u64)gprs[2] << 32); + +	return err; +} + +static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) +{ +	u32 gprs[8] = { 0 }; + +	WARN_ONCE((boot_cpu_data.x86 != 0xf), +		  "%s should only be used on K8!\n", __func__); + +	gprs[0] = (u32)val; +	gprs[1] = msr; +	gprs[2] = val >> 32; +	gprs[7] = 0x9c5a203a; + +	return wrmsr_safe_regs(gprs); +} +  #ifdef CONFIG_X86_32  /*   *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause @@ -23,17 +57,18 @@   *	contact AMD for precise details and a CPU swap.   *   *	See	http://www.multimania.com/poulot/k6bug.html - *		http://www.amd.com/K6/k6docs/revgd.html + *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" + *		(Publication # 21266  Issue Date: August 1998)   *   *	The following test is erm.. interesting. AMD neglected to up   *	the chip setting when fixing the bug but they also tweaked some   *	performance at the same time..   */ -extern void vide(void); -__asm__(".align 4\nvide: ret"); +extern __visible void vide(void); +__asm__(".globl vide\n\t.align 4\nvide: ret"); -static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) +static void init_amd_k5(struct cpuinfo_x86 *c)  {  /*   * General Systems BIOSen alias the cpu frequency registers @@ -51,10 +86,10 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)  } -static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) +static void init_amd_k6(struct cpuinfo_x86 *c)  {  	u32 l, h; -	int mbytes = num_physpages >> (20-PAGE_SHIFT); +	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);  	if (c->x86_model < 6) {  		/* Based on AMD doc 20734R - June 2000 */ @@ -91,7 +126,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)  				"system stability may be impaired when more than 32 MB are used.\n");  		else  			printk(KERN_CONT "probably OK (after B9730xxxx).\n"); -		printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");  	}  	/* K6 with old style WHCR */ @@ -144,9 +178,8 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)  	}  } -static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) +static void amd_k7_smp_check(struct cpuinfo_x86 *c)  { -#ifdef CONFIG_SMP  	/* calling is from identify_secondary_cpu() ? */  	if (!c->cpu_index)  		return; @@ -158,11 +191,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	/* Athlon 660/661 is valid. */  	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||  	    (c->x86_mask == 1))) -		goto valid_k7; +		return;  	/* Duron 670 is valid */  	if ((c->x86_model == 7) && (c->x86_mask == 0)) -		goto valid_k7; +		return;  	/*  	 * Athlon 662, Duron 671, and Athlon >model 7 have capability @@ -175,7 +208,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||  	     (c->x86_model > 7))  		if (cpu_has_mp) -			goto valid_k7; +			return;  	/* If we get here, not a certified SMP capable AMD system. */ @@ -185,15 +218,10 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	 */  	WARN_ONCE(1, "WARNING: This combination of AMD"  		" processors is not suitable for SMP.\n"); -	if (!test_taint(TAINT_UNSAFE_SMP)) -		add_taint(TAINT_UNSAFE_SMP); - -valid_k7: -	; -#endif +	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);  } -static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) +static void init_amd_k7(struct cpuinfo_x86 *c)  {  	u32 l, h; @@ -205,9 +233,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)  	if (c->x86_model >= 6 && c->x86_model <= 10) {  		if (!cpu_has(c, X86_FEATURE_XMM)) {  			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); -			rdmsr(MSR_K7_HWCR, l, h); -			l &= ~0x00008000; -			wrmsr(MSR_K7_HWCR, l, h); +			msr_clear_bit(MSR_K7_HWCR, 15);  			set_cpu_cap(c, X86_FEATURE_XMM);  		}  	} @@ -233,18 +259,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)  }  #endif -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) -static int __cpuinit nearby_node(int apicid) +#ifdef CONFIG_NUMA +/* + * To workaround broken NUMA config.  Read the comment in + * srat_detect_node(). + */ +static int nearby_node(int apicid)  {  	int i, node;  	for (i = apicid - 1; i >= 0; i--) { -		node = apicid_to_node[i]; +		node = __apicid_to_node[i];  		if (node != NUMA_NO_NODE && node_online(node))  			return node;  	}  	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { -		node = apicid_to_node[i]; +		node = __apicid_to_node[i];  		if (node != NUMA_NO_NODE && node_online(node))  			return node;  	} @@ -259,14 +289,14 @@ static int __cpuinit nearby_node(int apicid)   * (2) AMD processors supporting compute units   */  #ifdef CONFIG_X86_HT -static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) +static void amd_get_topology(struct cpuinfo_x86 *c)  { -	u32 nodes; +	u32 nodes, cores_per_cu = 1;  	u8 node_id;  	int cpu = smp_processor_id();  	/* get information required for multi-node processors */ -	if (cpu_has(c, X86_FEATURE_TOPOEXT)) { +	if (cpu_has_topoext) {  		u32 eax, ebx, ecx, edx;  		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); @@ -276,6 +306,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)  		/* get compute unit information */  		smp_num_siblings = ((ebx >> 8) & 3) + 1;  		c->compute_unit_id = ebx & 0xff; +		cores_per_cu += ((ebx >> 8) & 3);  	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {  		u64 value; @@ -288,24 +319,27 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)  	/* fixup multi-node processor information */  	if (nodes > 1) {  		u32 cores_per_node; +		u32 cus_per_node;  		set_cpu_cap(c, X86_FEATURE_AMD_DCM);  		cores_per_node = c->x86_max_cores / nodes; +		cus_per_node = cores_per_node / cores_per_cu;  		/* store NodeID, use llc_shared_map to store sibling info */  		per_cpu(cpu_llc_id, cpu) = node_id; -		/* core id to be in range from 0 to (cores_per_node - 1) */ -		c->cpu_core_id = c->cpu_core_id % cores_per_node; +		/* core id has to be in the [0 .. cores_per_node - 1] range */ +		c->cpu_core_id %= cores_per_node; +		c->compute_unit_id %= cus_per_node;  	}  }  #endif  /* - * On a AMD dual core setup the lower bits of the APIC id distingush the cores. + * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.   * Assumes number of cores is a power of two.   */ -static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) +static void amd_detect_cmp(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_HT  	unsigned bits; @@ -322,9 +356,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)  #endif  } -int amd_get_nb_id(int cpu) +u16 amd_get_nb_id(int cpu)  { -	int id = 0; +	u16 id = 0;  #ifdef CONFIG_SMP  	id = per_cpu(cpu_llc_id, cpu);  #endif @@ -332,33 +366,50 @@ int amd_get_nb_id(int cpu)  }  EXPORT_SYMBOL_GPL(amd_get_nb_id); -static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) +static void srat_detect_node(struct cpuinfo_x86 *c)  { -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA  	int cpu = smp_processor_id();  	int node;  	unsigned apicid = c->apicid; -	node = per_cpu(cpu_llc_id, cpu); +	node = numa_cpu_node(cpu); +	if (node == NUMA_NO_NODE) +		node = per_cpu(cpu_llc_id, cpu); -	if (apicid_to_node[apicid] != NUMA_NO_NODE) -		node = apicid_to_node[apicid]; -	if (!node_online(node)) { -		/* Two possibilities here: -		   - The CPU is missing memory and no node was created. -		   In that case try picking one from a nearby CPU -		   - The APIC IDs differ from the HyperTransport node IDs -		   which the K8 northbridge parsing fills in. -		   Assume they are all increased by a constant offset, -		   but in the same order as the HT nodeids. -		   If that doesn't result in a usable node fall back to the -		   path for the previous case.  */ +	/* +	 * On multi-fabric platform (e.g. Numascale NumaChip) a +	 * platform-specific handler needs to be called to fixup some +	 * IDs of the CPU. +	 */ +	if (x86_cpuinit.fixup_cpu_id) +		x86_cpuinit.fixup_cpu_id(c, node); +	if (!node_online(node)) { +		/* +		 * Two possibilities here: +		 * +		 * - The CPU is missing memory and no node was created.  In +		 *   that case try picking one from a nearby CPU. +		 * +		 * - The APIC IDs differ from the HyperTransport node IDs +		 *   which the K8 northbridge parsing fills in.  Assume +		 *   they are all increased by a constant offset, but in +		 *   the same order as the HT nodeids.  If that doesn't +		 *   result in a usable node fall back to the path for the +		 *   previous case. +		 * +		 * This workaround operates directly on the mapping between +		 * APIC ID and NUMA node, assuming certain relationship +		 * between APIC ID, HT node ID and NUMA topology.  As going +		 * through CPU mapping may alter the outcome, directly +		 * access __apicid_to_node[]. +		 */  		int ht_nodeid = c->initial_apicid;  		if (ht_nodeid >= 0 && -		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE) -			node = apicid_to_node[ht_nodeid]; +		    __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) +			node = __apicid_to_node[ht_nodeid];  		/* Pick a nearby node */  		if (!node_online(node))  			node = nearby_node(apicid); @@ -367,7 +418,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)  #endif  } -static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) +static void early_init_amd_mc(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_HT  	unsigned bits, ecx; @@ -393,7 +444,35 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)  #endif  } -static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +static void bsp_init_amd(struct cpuinfo_x86 *c) +{ +	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { + +		if (c->x86 > 0x10 || +		    (c->x86 == 0x10 && c->x86_model >= 0x2)) { +			u64 val; + +			rdmsrl(MSR_K7_HWCR, val); +			if (!(val & BIT(24))) +				printk(KERN_WARNING FW_BUG "TSC doesn't count " +					"with P0 frequency!\n"); +		} +	} + +	if (c->x86 == 0x15) { +		unsigned long upperbit; +		u32 cpuid, assoc; + +		cpuid	 = cpuid_edx(0x80000005); +		assoc	 = cpuid >> 16 & 0xff; +		upperbit = ((cpuid >> 24) << 10) / assoc; + +		va_align.mask	  = (upperbit - 1) & PAGE_MASK; +		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64; +	} +} + +static void early_init_amd(struct cpuinfo_x86 *c)  {  	early_init_amd_mc(c); @@ -404,6 +483,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)  	if (c->x86_power & (1 << 8)) {  		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);  		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); +		if (!check_tsc_unstable()) +			set_sched_clock_stable();  	}  #ifdef CONFIG_X86_64 @@ -425,29 +506,21 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)  	}  #endif -	/* We need to do the following only once */ -	if (c != &boot_cpu_data) -		return; - -	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { - -		if (c->x86 > 0x10 || -		    (c->x86 == 0x10 && c->x86_model >= 0x2)) { -			u64 val; - -			rdmsrl(MSR_K7_HWCR, val); -			if (!(val & BIT(24))) -				printk(KERN_WARNING FW_BUG "TSC doesn't count " -					"with P0 frequency!\n"); -		} -	} +	/* F16h erratum 793, CVE-2013-6885 */ +	if (c->x86 == 0x16 && c->x86_model <= 0xf) +		msr_set_bit(MSR_AMD64_LS_CFG, 15);  } -static void __cpuinit init_amd(struct cpuinfo_x86 *c) +static const int amd_erratum_383[]; +static const int amd_erratum_400[]; +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); + +static void init_amd(struct cpuinfo_x86 *c)  { -#ifdef CONFIG_SMP +	u32 dummy;  	unsigned long long value; +#ifdef CONFIG_SMP  	/*  	 * Disable TLB flush filter by setting HWCR.FFDIS on K8  	 * bit 6 of msr C001_0015 @@ -455,11 +528,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  	 * Errata 63 for SH-B3 steppings  	 * Errata 122 for all steppings (F+ have it disabled by default)  	 */ -	if (c->x86 == 0xf) { -		rdmsrl(MSR_K7_HWCR, value); -		value |= 1 << 6; -		wrmsrl(MSR_K7_HWCR, value); -	} +	if (c->x86 == 0xf) +		msr_set_bit(MSR_K7_HWCR, 6);  #endif  	early_init_amd(c); @@ -485,12 +555,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		 * (AMD Erratum #110, docId: 25759).  		 */  		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { -			u64 val; -  			clear_cpu_cap(c, X86_FEATURE_LAHF_LM); -			if (!rdmsrl_amd_safe(0xc001100d, &val)) { -				val &= ~(1ULL << 32); -				wrmsrl_amd_safe(0xc001100d, val); +			if (!rdmsrl_amd_safe(0xc001100d, &value)) { +				value &= ~(1ULL << 32); +				wrmsrl_amd_safe(0xc001100d, value);  			}  		} @@ -539,6 +607,33 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		}  	} +	/* re-enable TopologyExtensions if switched off by BIOS */ +	if ((c->x86 == 0x15) && +	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && +	    !cpu_has(c, X86_FEATURE_TOPOEXT)) { + +		if (msr_set_bit(0xc0011005, 54) > 0) { +			rdmsrl(0xc0011005, value); +			if (value & BIT_64(54)) { +				set_cpu_cap(c, X86_FEATURE_TOPOEXT); +				pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); +			} +		} +	} + +	/* +	 * The way access filter has a performance penalty on some workloads. +	 * Disable it on the affected CPUs. +	 */ +	if ((c->x86 == 0x15) && +	    (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + +		if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { +			value |= 0x1E; +			wrmsrl_safe(0xc0011021, value); +		} +	} +  	cpu_detect_cache_sizes(c);  	/* Multi core CPU? */ @@ -551,12 +646,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  	detect_ht(c);  #endif -	if (c->extended_cpuid_level >= 0x80000006) { -		if (cpuid_edx(0x80000006) & 0xf000) -			num_cache_leaves = 4; -		else -			num_cache_leaves = 3; -	} +	init_amd_cacheinfo(c);  	if (c->x86 >= 0xf)  		set_cpu_cap(c, X86_FEATURE_K8); @@ -584,21 +674,58 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		 * benefit in doing so.  		 */  		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { +			unsigned long pfn = tseg >> PAGE_SHIFT; +  			printk(KERN_DEBUG "tseg: %010llx\n", tseg); -			if ((tseg>>PMD_SHIFT) < -				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || -				((tseg>>PMD_SHIFT) < -				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && -				(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) +			if (pfn_range_is_mapped(pfn, pfn + 1))  				set_memory_4k((unsigned long)__va(tseg), 1);  		}  	}  #endif + +	/* +	 * Family 0x12 and above processors have APIC timer +	 * running in deep C states. +	 */ +	if (c->x86 > 0x11) +		set_cpu_cap(c, X86_FEATURE_ARAT); + +	if (c->x86 == 0x10) { +		/* +		 * Disable GART TLB Walk Errors on Fam10h. We do this here +		 * because this is always needed when GART is enabled, even in a +		 * kernel which has no MCE support built in. +		 * BIOS should disable GartTlbWlk Errors already. If +		 * it doesn't, do it here as suggested by the BKDG. +		 * +		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 +		 */ +		msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); + +		/* +		 * On family 10h BIOS may not have properly enabled WC+ support, +		 * causing it to be converted to CD memtype. This may result in +		 * performance degradation for certain nested-paging guests. +		 * Prevent this conversion by clearing bit 24 in +		 * MSR_AMD64_BU_CFG2. +		 * +		 * NOTE: we want to use the _safe accessors so as not to #GP kvm +		 * guests on older kvm hosts. +		 */ +		msr_clear_bit(MSR_AMD64_BU_CFG2, 24); + +		if (cpu_has_amd_erratum(c, amd_erratum_383)) +			set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); +	} + +	if (cpu_has_amd_erratum(c, amd_erratum_400)) +		set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); + +	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);  }  #ifdef CONFIG_X86_32 -static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, -							unsigned int size) +static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)  {  	/* AMD errata T13 (order #21922) */  	if ((c->x86 == 6)) { @@ -614,12 +741,68 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,  }  #endif -static const struct cpu_dev __cpuinitconst amd_cpu_dev = { +static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) +{ +	tlb_flushall_shift = 6; +} + +static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) +{ +	u32 ebx, eax, ecx, edx; +	u16 mask = 0xfff; + +	if (c->x86 < 0xf) +		return; + +	if (c->extended_cpuid_level < 0x80000006) +		return; + +	cpuid(0x80000006, &eax, &ebx, &ecx, &edx); + +	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; +	tlb_lli_4k[ENTRIES] = ebx & mask; + +	/* +	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB +	 * characteristics from the CPUID function 0x80000005 instead. +	 */ +	if (c->x86 == 0xf) { +		cpuid(0x80000005, &eax, &ebx, &ecx, &edx); +		mask = 0xff; +	} + +	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ +	if (!((eax >> 16) & mask)) +		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; +	else +		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; + +	/* a 4M entry uses two 2M entries */ +	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; + +	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ +	if (!(eax & mask)) { +		/* Erratum 658 */ +		if (c->x86 == 0x15 && c->x86_model <= 0x1f) { +			tlb_lli_2m[ENTRIES] = 1024; +		} else { +			cpuid(0x80000005, &eax, &ebx, &ecx, &edx); +			tlb_lli_2m[ENTRIES] = eax & 0xff; +		} +	} else +		tlb_lli_2m[ENTRIES] = eax & mask; + +	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; + +	cpu_set_tlb_flushall_shift(c); +} + +static const struct cpu_dev amd_cpu_dev = {  	.c_vendor	= "AMD",  	.c_ident	= { "AuthenticAMD" },  #ifdef CONFIG_X86_32 -	.c_models = { -		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names = +	.legacy_models = { +		{ .family = 4, .model_names =  		  {  			  [3] = "486 DX/2",  			  [7] = "486 DX/2-WB", @@ -630,9 +813,11 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = {  		  }  		},  	}, -	.c_size_cache	= amd_size_cache, +	.legacy_cache_size = amd_size_cache,  #endif  	.c_early_init   = early_init_amd, +	.c_detect_tlb	= cpu_detect_tlb_amd, +	.c_bsp_init	= bsp_init_amd,  	.c_init		= init_amd,  	.c_x86_vendor	= X86_VENDOR_AMD,  }; @@ -646,8 +831,7 @@ cpu_dev_register(amd_cpu_dev);   * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that   * have an OSVW id assigned, which it takes as first argument. Both take a   * variable number of family-specific model-stepping ranges created by - * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const - * int[] in arch/x86/include/asm/processor.h. + * AMD_MODEL_RANGE().   *   * Example:   * @@ -657,32 +841,28 @@ cpu_dev_register(amd_cpu_dev);   *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));   */ -const int amd_erratum_400[] = +#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 } +#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 } +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff) +#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff) +#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff) + +static const int amd_erratum_400[] =  	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),  			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_400); -const int amd_erratum_383[] = +static const int amd_erratum_383[] =  	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_383); -bool cpu_has_amd_erratum(const int *erratum) + +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)  { -	struct cpuinfo_x86 *cpu = ¤t_cpu_data;  	int osvw_id = *erratum++;  	u32 range;  	u32 ms; -	/* -	 * If called early enough that current_cpu_data hasn't been initialized -	 * yet, fall back to boot_cpu_data. -	 */ -	if (cpu->x86 == 0) -		cpu = &boot_cpu_data; - -	if (cpu->x86_vendor != X86_VENDOR_AMD) -		return false; -  	if (osvw_id >= 0 && osvw_id < 65536 &&  	    cpu_has(cpu, X86_FEATURE_OSVW)) {  		u64 osvw_len; @@ -707,5 +887,3 @@ bool cpu_has_amd_erratum(const int *erratum)  	return false;  } - -EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);  | 
