aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorRoedel, Joerg <Joerg.Roedel@amd.com>2011-05-19 11:13:39 +0200
committerPaul Gortmaker <paul.gortmaker@windriver.com>2012-05-17 11:20:29 -0400
commit08437d771f90d0414fd5ee67ad02ae3eb89a4d81 (patch)
treef2c7cda198512e98daa4833538a67d5280c0e167 /arch/x86/kernel/cpu
parent365671b9c6d56cac4ef73b83e47e2d8f37de0b89 (diff)
x86, amd: Use _safe() msr access for GartTlbWlk disable code
commit d47cc0db8fd6011de2248df505fc34990b7451bf upstream. The workaround for Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=33012 introduced a read and a write to the MC4 mask msr. Unfortunatly this MSR is not emulated by the KVM hypervisor so that the kernel will get a #GP and crashes when applying this workaround when running inside KVM. This issue was reported as: https://bugzilla.kernel.org/show_bug.cgi?id=35132 and is fixed with this patch. The change just let the kernel ignore any #GP it gets while accessing this MSR by using the _safe msr access methods. Reported-by: Török Edwin <edwintorok@gmail.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: Maciej Rutecki <maciej.rutecki@gmail.com> Cc: Avi Kivity <avi@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/amd.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 770346d348c..02a5a5f6a6c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -586,10 +586,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/
u64 mask;
+ int err;
- rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
- mask |= (1 << 10);
- wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
+ if (err == 0) {
+ mask |= (1 << 10);
+ checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ }
}
}