From e6d41e8c697e07832efa4a85bf23438bc4c4e1b2 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 29 Oct 2012 18:40:08 +0100 Subject: x86, AMD: Change Boris' email address Move to private email and put in maintained status. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/1351532410-4887-1-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 698b6ec12e0..1ac581f38df 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -6,7 +6,7 @@ * * Written by Jacob Shin - AMD, Inc. * - * Support: borislav.petkov@amd.com + * Maintained by: Borislav Petkov * * April 2006 * - added support for AMD Family 0x10 processors -- cgit v1.2.3-18-g5258 From 943482d07e926128eed0482b879736f912c429e4 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Mon, 29 Oct 2012 18:51:38 +0100 Subject: x86, microcode_amd: Change email addresses, MAINTAINERS entry Signed-off-by: Andreas Herrmann Cc: lm-sensors@lm-sensors.org Cc: oprofile-list@lists.sf.net Cc: Stephane Eranian Cc: Robert Richter Cc: Borislav Petkov Cc: Jorg Roedel Cc: Rafael J. Wysocki Cc: Jean Delvare Cc: Guenter Roeck Link: http://lkml.kernel.org/r/20121029175138.GC5024@tweety Signed-off-by: Ingo Molnar --- arch/x86/kernel/microcode_amd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7720ff5a9ee..b3e67ba55b7 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -8,8 +8,8 @@ * Tigran Aivazian * * Maintainers: - * Andreas Herrmann - * Borislav Petkov + * Andreas Herrmann + * Borislav Petkov * * This driver allows to upgrade microcode on F10h AMD * CPUs and later. -- cgit v1.2.3-18-g5258 From f49f4ab95c301dbccad0efe85296d908b8ae7ad4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 29 Oct 2012 14:40:18 +0100 Subject: x86/ce4100: Fix pm_poweroff The CE4100 platform is currently missing a proper pm_poweroff implementation leading to poweroff making the CPU spin forever and the CE4100 platform does not enter a low-power mode where the external Power Management Unit can properly power off the system. Power off on this platform is implemented pretty much like reboot, by writing to the SoC built-in 8051 microcontroller mapped at I/O port 0xcf9, the value 0x4. Signed-off-by: Florian Fainelli Acked-by: Sebastian Andrzej Siewior Cc: rui.zhang@intel.com Cc: alan@linux.intel.com Link: http://lkml.kernel.org/r/1351518020-25556-2-git-send-email-ffainelli@freebox.fr Signed-off-by: Ingo Molnar --- arch/x86/platform/ce4100/ce4100.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 4c61b52191e..5de16e359fe 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -27,6 +27,18 @@ static int ce4100_i8042_detect(void) return 0; } +/* + * The CE4100 platform has an internal 8051 Microcontroller which is + * responsible for signaling to the external Power Management Unit the + * intention to reset, reboot or power off the system. This 8051 device has + * its command register mapped at I/O port 0xcf9 and the value 0x4 is used + * to power off the system. + */ +static void ce4100_power_off(void) +{ + outb(0x4, 0xcf9); +} + #ifdef CONFIG_SERIAL_8250 static unsigned int mem_serial_in(struct uart_port *p, int offset) @@ -143,4 +155,6 @@ void __init x86_ce4100_early_setup(void) x86_init.pci.init_irq = sdv_pci_init; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; #endif + + pm_power_off = ce4100_power_off; } -- cgit v1.2.3-18-g5258 From d7959916026aaae60e1878ae33c7503b2cc4471d Mon Sep 17 00:00:00 2001 From: Maxime Bizon Date: Mon, 29 Oct 2012 14:40:19 +0100 Subject: x86/ce4100: Fix reboot by forcing the reboot method to be KBD The default reboot is via ACPI for this platform, and the CEFDK bootloader actually supports this, but will issue a system power off instead of a real reboot. Setting the reboot method to be KBD instead of ACPI ensures proper system reboot. Acked-by: Sebastian Andrzej Siewior Signed-off-by: Maxime Bizon Signed-off-by: Florian Fainelli Cc: rui.zhang@intel.com Cc: alan@linux.intel.com Link: http://lkml.kernel.org/r/1351518020-25556-3-git-send-email-ffainelli@freebox.fr Signed-off-by: Ingo Molnar --- arch/x86/platform/ce4100/ce4100.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 5de16e359fe..92525cb8e54 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -21,6 +21,7 @@ #include #include #include +#include static int ce4100_i8042_detect(void) { @@ -151,6 +152,15 @@ void __init x86_ce4100_early_setup(void) x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.pci.init = ce4100_pci_init; + /* + * By default, the reboot method is ACPI which is supported by the + * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue + * the bootloader will however issue a system power off instead of + * reboot. By using BOOT_KBD we ensure proper system reboot as + * expected. + */ + reboot_type = BOOT_KBD; + #ifdef CONFIG_X86_IO_APIC x86_init.pci.init_irq = sdv_pci_init; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; -- cgit v1.2.3-18-g5258 From 37aeec36220c39f1b2e7118287d951fd9cfdd6b7 Mon Sep 17 00:00:00 2001 From: Maxime Bizon Date: Mon, 29 Oct 2012 14:40:20 +0100 Subject: x86/ce4100: Fix PCI configuration register access for devices without interrupts Some CE4100 devices such as the: - DFX module (01:0b.7) - entertainment encryption device (01:10.0) - multimedia controller (01:12.0) do not have a device interrupt at all. This patch fixes the PCI controller code to declare the missing PCI configuration register space, as well as a fixup method for forcing the interrupt pin to be 0 for these devices. This is required to ensure that pci drivers matching on these devices will be able to honor the various PCI subsystem calls touching the configuration space. Signed-off-by: Maxime Bizon Signed-off-by: Florian Fainelli Acked-by: Sebastian Andrzej Siewior Cc: rui.zhang@intel.com Cc: alan@linux.intel.com Link: http://lkml.kernel.org/r/1351518020-25556-4-git-send-email-ffainelli@freebox.fr Signed-off-by: Ingo Molnar --- arch/x86/pci/ce4100.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 41bd2a2d2c5..b914e20b5a0 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c @@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value) reg_read(reg, value); } +static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + /* force interrupt pin value to 0 */ + *value = reg->sim_reg.value & 0xfff00ff; + raw_spin_unlock_irqrestore(&pci_config_lock, flags); +} + static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) @@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) + DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) @@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) + DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) + DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) }; static void __init init_sim_regs(void) -- cgit v1.2.3-18-g5258 From 85b97637bb40a9f486459dd254598759af9c3d50 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Mon, 29 Oct 2012 11:01:50 +0800 Subject: x86/mce: Do not change worker's running cpu in cmci_rediscover(). cmci_rediscover() used set_cpus_allowed_ptr() to change the current process's running cpu, and migrate itself to the dest cpu. But worker processes are not allowed to be migrated. If current is a worker, the worker will be migrated to another cpu, but the corresponding worker_pool is still on the original cpu. In this case, the following BUG_ON in try_to_wake_up_local() will be triggered: BUG_ON(rq != this_rq()); This will cause the kernel panic. The call trace is like the following: [ 6155.451107] ------------[ cut here ]------------ [ 6155.452019] kernel BUG at kernel/sched/core.c:1654! ...... [ 6155.452019] RIP: 0010:[] [] try_to_wake_up_local+0x115/0x130 ...... [ 6155.452019] Call Trace: [ 6155.452019] [] __schedule+0x764/0x880 [ 6155.452019] [] schedule+0x29/0x70 [ 6155.452019] [] schedule_timeout+0x235/0x2d0 [ 6155.452019] [] ? mark_held_locks+0x8d/0x140 [ 6155.452019] [] ? __lock_release+0x133/0x1a0 [ 6155.452019] [] ? _raw_spin_unlock_irq+0x30/0x50 [ 6155.452019] [] ? trace_hardirqs_on_caller+0x105/0x190 [ 6155.452019] [] wait_for_common+0x12b/0x180 [ 6155.452019] [] ? try_to_wake_up+0x2f0/0x2f0 [ 6155.452019] [] wait_for_completion+0x1d/0x20 [ 6155.452019] [] stop_one_cpu+0x8a/0xc0 [ 6155.452019] [] ? __migrate_task+0x1a0/0x1a0 [ 6155.452019] [] ? complete+0x28/0x60 [ 6155.452019] [] set_cpus_allowed_ptr+0x128/0x130 [ 6155.452019] [] cmci_rediscover+0xf5/0x140 [ 6155.452019] [] mce_cpu_callback+0x18d/0x19d [ 6155.452019] [] notifier_call_chain+0x67/0x150 [ 6155.452019] [] __raw_notifier_call_chain+0xe/0x10 [ 6155.452019] [] __cpu_notify+0x20/0x40 [ 6155.452019] [] cpu_notify_nofail+0x15/0x30 [ 6155.452019] [] _cpu_down+0x262/0x2e0 [ 6155.452019] [] cpu_down+0x36/0x50 [ 6155.452019] [] acpi_processor_remove+0x50/0x11e [ 6155.452019] [] acpi_device_remove+0x90/0xb2 [ 6155.452019] [] __device_release_driver+0x7c/0xf0 [ 6155.452019] [] device_release_driver+0x2f/0x50 [ 6155.452019] [] acpi_bus_remove+0x32/0x6d [ 6155.452019] [] acpi_bus_trim+0x87/0xee [ 6155.452019] [] acpi_bus_hot_remove_device+0x88/0x16b [ 6155.452019] [] acpi_os_execute_deferred+0x27/0x34 [ 6155.452019] [] process_one_work+0x219/0x680 [ 6155.452019] [] ? process_one_work+0x1b8/0x680 [ 6155.452019] [] ? acpi_os_wait_events_complete+0x23/0x23 [ 6155.452019] [] worker_thread+0x12e/0x320 [ 6155.452019] [] ? manage_workers+0x110/0x110 [ 6155.452019] [] kthread+0xc6/0xd0 [ 6155.452019] [] kernel_thread_helper+0x4/0x10 [ 6155.452019] [] ? retint_restore_args+0x13/0x13 [ 6155.452019] [] ? __init_kthread_worker+0x70/0x70 [ 6155.452019] [] ? gs_change+0x13/0x13 This patch removes the set_cpus_allowed_ptr() call, and put the cmci rediscover jobs onto all the other cpus using system_wq. This could bring some delay for the jobs. Signed-off-by: Tang Chen Signed-off-by: Miao Xie Signed-off-by: Tony Luck --- arch/x86/kernel/cpu/mcheck/mce_intel.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 5f88abf07e9..4f9a3cbfc4a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -285,34 +285,39 @@ void cmci_clear(void) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } +static long cmci_rediscover_work_func(void *arg) +{ + int banks; + + /* Recheck banks in case CPUs don't all have the same */ + if (cmci_supported(&banks)) + cmci_discover(banks); + + return 0; +} + /* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { - int banks; - int cpu; - cpumask_var_t old; + int cpu, banks; if (!cmci_supported(&banks)) return; - if (!alloc_cpumask_var(&old, GFP_KERNEL)) - return; - cpumask_copy(old, ¤t->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; - if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) + + if (cpu == smp_processor_id()) { + cmci_rediscover_work_func(NULL); continue; - /* Recheck banks in case CPUs don't all have the same */ - if (cmci_supported(&banks)) - cmci_discover(banks); - } + } - set_cpus_allowed_ptr(current, old); - free_cpumask_var(old); + work_on_cpu(cpu, cmci_rediscover_work_func, NULL); + } } /* -- cgit v1.2.3-18-g5258 From 2bbf0a1427c377350f001fbc6260995334739ad7 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 31 Oct 2012 17:20:50 +0100 Subject: x86, amd: Disable way access filter on Piledriver CPUs The Way Access Filter in recent AMD CPUs may hurt the performance of some workloads, caused by aliasing issues in the L1 cache. This patch disables it on the affected CPUs. The issue is similar to that one of last year: http://lkml.indiana.edu/hypermail/linux/kernel/1107.3/00041.html This new patch does not replace the old one, we just need another quirk for newer CPUs. The performance penalty without the patch depends on the circumstances, but is a bit less than the last year's 3%. The workloads affected would be those that access code from the same physical page under different virtual addresses, so different processes using the same libraries with ASLR or multiple instances of PIE-binaries. The code needs to be accessed simultaneously from both cores of the same compute unit. More details can be found here: http://developer.amd.com/Assets/SharedL1InstructionCacheonAMD15hCPU.pdf CPUs affected are anything with the core known as Piledriver. That includes the new parts of the AMD A-Series (aka Trinity) and the just released new CPUs of the FX-Series (aka Vishera). The model numbering is a bit odd here: FX CPUs have model 2, A-Series has model 10h, with possible extensions to 1Fh. Hence the range of model ids. Signed-off-by: Andre Przywara Link: http://lkml.kernel.org/r/1351700450-9277-1-git-send-email-osp@andrep.de Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/amd.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f7e98a2c0d1..1b7d1656a04 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + wrmsrl_safe(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ -- cgit v1.2.3-18-g5258 From ddd32b4289a6feda9ad9e68b0d85641c389a72ba Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 13 Nov 2012 02:17:36 +0900 Subject: x86, mm: Correct vmflag test for checking VM_HUGETLB commit 611ae8e3f5204f7480b3b405993b3352cfa16662('enable tlb flush range support for x86') change flush_tlb_mm_range() considerably. After this, we test whether vmflag equal to VM_HUGETLB and it may be always failed, because vmflag usually has other flags simultaneously. Our intention is to check whether this vma is for hughtlb, so correct it according to this purpose. Signed-off-by: Joonsoo Kim Acked-by: Alex Shi Link: http://lkml.kernel.org/r/1352740656-19417-1-git-send-email-js1304@gmail.com Signed-off-by: H. Peter Anvin --- arch/x86/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0777f042e40..60f926cd8b0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, } if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 - || vmflag == VM_HUGETLB) { + || vmflag & VM_HUGETLB) { local_flush_tlb(); goto flush_all; } -- cgit v1.2.3-18-g5258 From caaa8c6339ccefee6e834c4a8b0bec7e721e7357 Mon Sep 17 00:00:00 2001 From: Cesar Eduardo Barros Date: Sat, 27 Oct 2012 20:34:15 -0200 Subject: x86: remove dummy long from EFI stub Commit 2e064b1 (x86, efi: Fix issue of overlapping .reloc section for EFI_STUB) removed a dummy reloc added by commit 291f363 (x86, efi: EFI boot stub support), but forgot to remove the dummy long used by that reloc. Reviewed-by: Jordan Justen Tested-by: Lee G Rosenbaum Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Cesar Eduardo Barros Signed-off-by: Matt Fleming --- arch/x86/boot/header.S | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 2a017441b8b..8c132a625b9 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -476,6 +476,3 @@ die: setup_corrupt: .byte 7 .string "No setup signature found...\n" - - .data -dummy: .long 0 -- cgit v1.2.3-18-g5258 From 0f905a43ce955b638139bd84486194770a6a2c08 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 20 Nov 2012 13:07:46 +0000 Subject: x86, efi: Fix processor-specific memcpy() build error Building for Athlon/Duron/K7 results in the following build error, arch/x86/boot/compressed/eboot.o: In function `__constant_memcpy3d': eboot.c:(.text+0x385): undefined reference to `_mmx_memcpy' arch/x86/boot/compressed/eboot.o: In function `efi_main': eboot.c:(.text+0x1a22): undefined reference to `_mmx_memcpy' because the boot stub code doesn't link with the kernel proper, and therefore doesn't have access to the 3DNow version of memcpy. So, follow the example of misc.c and #undef memcpy so that we use the version provided by misc.c. See https://bugzilla.kernel.org/show_bug.cgi?id=50391 Reported-by: Al Viro Reported-by: Ryan Underwood Cc: H. Peter Anvin Cc: stable@vger.kernel.org Signed-off-by: Matt Fleming --- arch/x86/boot/compressed/eboot.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c760e073963..e87b0cac14b 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -12,6 +12,8 @@ #include #include +#undef memcpy /* Use memcpy from misc.c */ + #include "eboot.h" static efi_system_table_t *sys_table; -- cgit v1.2.3-18-g5258 From 1022623842cb72ee4d0dbf02f6937f38c92c3f41 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 3 Sep 2012 20:54:48 +0200 Subject: x86-32: Fix invalid stack address while in softirq In 32 bit the stack address provided by kernel_stack_pointer() may point to an invalid range causing NULL pointer access or page faults while in NMI (see trace below). This happens if called in softirq context and if the stack is empty. The address at ®s->sp is then out of range. Fixing this by checking if regs and ®s->sp are in the same stack context. Otherwise return the previous stack pointer stored in struct thread_info. If that address is invalid too, return address of regs. BUG: unable to handle kernel NULL pointer dereference at 0000000a IP: [] print_context_stack+0x6e/0x8d *pde = 00000000 Oops: 0000 [#1] SMP Modules linked in: Pid: 4434, comm: perl Not tainted 3.6.0-rc3-oprofile-i386-standard-g4411a05 #4 Hewlett-Packard HP xw9400 Workstation/0A1Ch EIP: 0060:[] EFLAGS: 00010093 CPU: 0 EIP is at print_context_stack+0x6e/0x8d EAX: ffffe000 EBX: 0000000a ECX: f4435f94 EDX: 0000000a ESI: f4435f94 EDI: f4435f94 EBP: f5409ec0 ESP: f5409ea0 DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 CR0: 8005003b CR2: 0000000a CR3: 34ac9000 CR4: 000007d0 DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff0ff0 DR7: 00000400 Process perl (pid: 4434, ti=f5408000 task=f5637850 task.ti=f4434000) Stack: 000003e8 ffffe000 00001ffc f4e39b00 00000000 0000000a f4435f94 c155198c f5409ef0 c1003723 c155198c f5409f04 00000000 f5409edc 00000000 00000000 f5409ee8 f4435f94 f5409fc4 00000001 f5409f1c c12dce1c 00000000 c155198c Call Trace: [] dump_trace+0x7b/0xa1 [] x86_backtrace+0x40/0x88 [] ? oprofile_add_sample+0x56/0x84 [] oprofile_add_sample+0x75/0x84 [] op_amd_check_ctrs+0x46/0x260 [] profile_exceptions_notify+0x23/0x4c [] nmi_handle+0x31/0x4a [] ? ftrace_define_fields_irq_handler_entry+0x45/0x45 [] do_nmi+0xa0/0x2ff [] ? ftrace_define_fields_irq_handler_entry+0x45/0x45 [] nmi_stack_correct+0x28/0x2d [] ? ftrace_define_fields_irq_handler_entry+0x45/0x45 [] ? do_softirq+0x4b/0x7f [] irq_exit+0x35/0x5b [] smp_apic_timer_interrupt+0x6c/0x7a [] apic_timer_interrupt+0x2a/0x30 Code: 89 fe eb 08 31 c9 8b 45 0c ff 55 ec 83 c3 04 83 7d 10 00 74 0c 3b 5d 10 73 26 3b 5d e4 73 0c eb 1f 3b 5d f0 76 1a 3b 5d e8 73 15 <8b> 13 89 d0 89 55 e0 e8 ad 42 03 00 85 c0 8b 55 e0 75 a6 eb cc EIP: [] print_context_stack+0x6e/0x8d SS:ESP 0068:f5409ea0 CR2: 000000000000000a ---[ end trace 62afee3481b00012 ]--- Kernel panic - not syncing: Fatal exception in interrupt V2: * add comments to kernel_stack_pointer() * always return a valid stack address by falling back to the address of regs Reported-by: Yang Wei Cc: Signed-off-by: Robert Richter Link: http://lkml.kernel.org/r/20120912135059.GZ8285@erda.amd.com Signed-off-by: H. Peter Anvin Cc: Jun Zhang --- arch/x86/include/asm/ptrace.h | 15 ++++----------- arch/x86/kernel/ptrace.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index dcfde52979c..19f16ebaf4f 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) } #endif -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b00b33a1839..2484e331a64 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -166,6 +166,34 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); -- cgit v1.2.3-18-g5258 From cb57a2b4cff7edf2a4e32c0163200e9434807e0a Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 20 Nov 2012 22:21:02 -0800 Subject: x86-32: Export kernel_stack_pointer() for modules Modules, in particular oprofile (and possibly other similar tools) need kernel_stack_pointer(), so export it using EXPORT_SYMBOL_GPL(). Cc: Yang Wei Cc: Robert Richter Cc: Jun Zhang Cc: Link: http://lkml.kernel.org/r/20120912135059.GZ8285@erda.amd.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/ptrace.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 2484e331a64..5e0596b0632 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -193,6 +194,7 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) return (unsigned long)regs; } +EXPORT_SYMBOL_GPL(kernel_stack_pointer); static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { -- cgit v1.2.3-18-g5258 From 36c46ca4f322a7bf89aad5462a3a1f61713edce7 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Thu, 15 Nov 2012 13:41:50 -0500 Subject: x86, microcode, AMD: Add support for family 16h processors Add valid patch size for family 16h processors. [ hpa: promoting to urgent/stable since it is hw enabling and trivial ] Signed-off-by: Boris Ostrovsky Acked-by: Andreas Herrmann Link: http://lkml.kernel.org/r/1353004910-2204-1-git-send-email-boris.ostrovsky@amd.com Signed-off-by: H. Peter Anvin Cc: --- arch/x86/kernel/microcode_amd.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index b3e67ba55b7..efdec7cd8e0 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; -- cgit v1.2.3-18-g5258 From ee4eb87be2c3f69c2c4d9f1c1d98e363a7ad18ab Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 2 Nov 2012 11:18:39 +0000 Subject: x86-64: Fix ordering of CFI directives and recent ASM_CLAC additions While these got added in the right place everywhere else, entry_64.S is the odd one where they ended up before the initial CFI directive(s). In order to cover the full code ranges, the CFI directive must be first, though. Signed-off-by: Jan Beulich Link: http://lkml.kernel.org/r/5093BA1F02000078000A600E@nat28.tlf.novell.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/entry_64.S | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b51b2c7ee51..1328fe49a3f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -995,8 +995,8 @@ END(interrupt) */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: - ASM_CLAC XCPT_FRAME + ASM_CLAC addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ interrupt do_IRQ /* 0(%rsp): old_rsp-ARGOFFSET */ @@ -1135,8 +1135,8 @@ END(common_interrupt) */ .macro apicinterrupt num sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC pushq_cfi $~(\num) .Lcommon_\sym: interrupt \do_sym @@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \ */ .macro zeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1208,8 +1208,8 @@ END(\sym) .macro paranoidzeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1227,8 +1227,8 @@ END(\sym) #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) .macro paranoidzeroentry_ist sym do_sym ist ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1247,8 +1247,8 @@ END(\sym) .macro errorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 @@ -1266,8 +1266,8 @@ END(\sym) /* error code is on the stack already */ .macro paranoiderrorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 -- cgit v1.2.3-18-g5258