aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorTorsten Kaiser <just.for.lkml@googlemail.com>2013-07-23 19:40:49 +0200
committerBorislav Petkov <bp@suse.de>2013-08-12 18:25:00 +0200
commit8c6b79bb1211d91fb31bcbc2a1eea8d6963d3ad9 (patch)
tree4c7798be97b8f220f242eacb2c861de658b8f88f /arch/x86
parentd4e4ab86bcba5a72779c43dc1459f71fea3d89c8 (diff)
x86, microcode, AMD: Make cpu_has_amd_erratum() use the correct struct cpuinfo_x86
cpu_has_amd_erratum() is buggy, because it uses the per-cpu cpu_info before it is filled by smp_store_boot_cpu_info() / smp_store_cpu_info(). If early microcode loading is enabled its collect_cpu_info_amd_early() will fill ->x86 and so the fallback to boot_cpu_data is not used. But ->x86_vendor was not filled and is still X86_VENDOR_INTEL resulting in no errata fixes getting applied and my system hangs on boot. Using cpu_info in cpu_has_amd_erratum() is wrong anyway: its only caller init_amd() will have a struct cpuinfo_x86 as parameter and the set_cpu_bug() that is controlled by cpu_has_amd_erratum() also only uses that struct. So pass the struct cpuinfo_x86 from init_amd() to cpu_has_amd_erratum() and the broken fallback can be dropped. [ Boris: Drop WARN_ON() since we're called only from init_amd() ] Signed-off-by: Torsten Kaiser <just.for.lkml@googlemail.com> Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/amd.c20
1 files changed, 5 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5..08a089043cc 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
static const int amd_erratum_383[];
static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c)
{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
- if (cpu_has_amd_erratum(amd_erratum_383))
+ if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
- if (cpu_has_amd_erratum(amd_erratum_400))
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;