From 3556ddfa9284a86a59a9b78fe5894430f6ab4eef Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 2 Apr 2007 12:14:12 +0200 Subject: [PATCH] x86-64: Disable local APIC timer use on AMD systems with C1E AMD dual core laptops with C1E do not run the APIC timer correctly when they go idle. Previously the code assumed this only happened on C2 or deeper. But not all of these systems report support C2. Use a AMD supplied snippet to detect C1E being enabled and then disable local apic timer use. This supercedes an earlier workaround using DMI detection of specific systems. Thanks to Mark Langsdorf for the detection snippet. Signed-off-by: Andi Kleen --- arch/i386/kernel/apic.c | 32 +++----------------------------- arch/i386/kernel/cpu/amd.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 29 deletions(-) (limited to 'arch/i386') diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index e88415282a6..93aa911646a 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -271,32 +271,6 @@ static void __devinit setup_APIC_timer(void) clockevents_register_device(levt); } -/* - * Detect systems with known broken BIOS implementations - */ -static int __init lapic_check_broken_bios(struct dmi_system_id *d) -{ - printk(KERN_NOTICE "%s detected: disabling lapic timer.\n", - d->ident); - local_apic_timer_disabled = 1; - return 0; -} - -static struct dmi_system_id __initdata broken_bios_dmi_table[] = { - { - /* - * BIOS exports only C1 state, but uses deeper power - * modes behind the kernels back. - */ - .callback = lapic_check_broken_bios, - .ident = "HP nx6325", - .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), - }, - }, - {} -}; - /* * In this functions we calibrate APIC bus clocks to the external timer. * @@ -372,12 +346,12 @@ void __init setup_boot_APIC_clock(void) long delta, deltapm; int pm_referenced = 0; - /* Detect know broken systems */ - dmi_check_system(broken_bios_dmi_table); + if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) + local_apic_timer_disabled = 1; /* * The local apic timer can be disabled via the kernel - * commandline or from the dmi quirk above. Register the lapic + * commandline or from the test above. Register the lapic * timer as a dummy clock event source on SMP systems, so the * broadcast mechanism is used. On UP systems simply ignore it. */ diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 41cfea57232..2d47db48297 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -22,6 +22,37 @@ extern void vide(void); __asm__(".align 4\nvide: ret"); +#define ENABLE_C1E_MASK 0x18000000 +#define CPUID_PROCESSOR_SIGNATURE 1 +#define CPUID_XFAM 0x0ff00000 +#define CPUID_XFAM_K8 0x00000000 +#define CPUID_XFAM_10H 0x00100000 +#define CPUID_XFAM_11H 0x00200000 +#define CPUID_XMOD 0x000f0000 +#define CPUID_XMOD_REV_F 0x00040000 + +/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ +static __cpuinit int amd_apic_timer_broken(void) +{ + u32 lo, hi; + u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + switch (eax & CPUID_XFAM) { + case CPUID_XFAM_K8: + if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) + break; + case CPUID_XFAM_10H: + case CPUID_XFAM_11H: + rdmsr(MSR_K8_ENABLE_C1E, lo, hi); + if (lo & ENABLE_C1E_MASK) + return 1; + break; + default: + /* err on the side of caution */ + return 1; + } + return 0; +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (cpuid_eax(0x80000000) >= 0x80000006) num_cache_leaves = 3; + + if (amd_apic_timer_broken()) + set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) -- cgit v1.2.3-18-g5258 From 89e07569e4e4e935b2cec18e9d94f131aecb2e40 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 2 Apr 2007 12:14:12 +0200 Subject: [PATCH] x86-64: Let oprofile reserve MSR on all CPUs The MSR reservation is per CPU and oprofile would only allocate them on the CPU it was initialized on. Change this to handle all CPUs. This also fixes a warning about unprotected use of smp_processor_id() in preemptible kernels. Signed-off-by: Andi Kleen --- arch/i386/kernel/nmi.c | 127 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 96 insertions(+), 31 deletions(-) (limited to 'arch/i386') diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 14702427b10..851f3b1c5b1 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) /* checks for a bit availability (hack for oprofile) */ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) { + int cpu; BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } /* checks the an msr for availability */ int avail_to_resrv_perfctr_nmi(unsigned int msr) { unsigned int counter; + int cpu; counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } -int reserve_perfctr_nmi(unsigned int msr) +static int __reserve_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) + if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) return 1; return 0; } -void release_perfctr_nmi(unsigned int msr) +static void __release_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); + clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); } -int reserve_evntsel_nmi(unsigned int msr) +int reserve_perfctr_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_perfctr_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_perfctr_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_perfctr_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_perfctr_nmi(cpu, msr); + } +} + +int __reserve_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) + if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) return 1; return 0; } -void release_evntsel_nmi(unsigned int msr) +static void __release_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); + clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); +} + +int reserve_evntsel_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_evntsel_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_evntsel_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_evntsel_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_evntsel_nmi(cpu, msr); + } } static __cpuinit inline int nmi_known_cpu(void) @@ -507,10 +572,10 @@ static int setup_k7_watchdog(void) perfctr_msr = MSR_K7_PERFCTR0; evntsel_msr = MSR_K7_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -533,7 +598,7 @@ static int setup_k7_watchdog(void) wd->check_bit = 1ULL<<63; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -544,8 +609,8 @@ static void stop_k7_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define P6_EVNTSEL0_ENABLE (1 << 22) @@ -563,10 +628,10 @@ static int setup_p6_watchdog(void) perfctr_msr = MSR_P6_PERFCTR0; evntsel_msr = MSR_P6_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -590,7 +655,7 @@ static int setup_p6_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -601,8 +666,8 @@ static void stop_p6_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } /* Note that these events don't tick when the CPU idles. This means @@ -668,10 +733,10 @@ static int setup_p4_watchdog(void) cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); } - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; evntsel = P4_ESCR_EVENT_SELECT(0x3F) @@ -695,7 +760,7 @@ static int setup_p4_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -707,8 +772,8 @@ static void stop_p4_watchdog(void) wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL @@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void) perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void) wd->check_bit = 1ULL << (eax.split.bit_width - 1); return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void) return; wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } void setup_apic_nmi_watchdog (void *unused) -- cgit v1.2.3-18-g5258 From 0fb2ebfcb5f0b1916ed5ff260ec953ef616fec7c Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 2 Apr 2007 12:14:12 +0200 Subject: [PATCH] x86-64: Increase NMI watchdog probing timeout A 4 core Opteron needs longer than 10 ticks for this. Signed-off-by: Andi Kleen --- arch/i386/kernel/nmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/i386') diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 851f3b1c5b1..a98ba88a8c0 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -328,7 +328,7 @@ static int __init check_nmi_watchdog(void) for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); - mdelay((10*1000)/nmi_hz); // wait 10 ticks + mdelay((20*1000)/nmi_hz); // wait 20 ticks for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP -- cgit v1.2.3-18-g5258 From a369a7100da3b4f5c2269be25160653d2c7013fc Mon Sep 17 00:00:00 2001 From: Zwane Mwaikambo Date: Mon, 2 Apr 2007 12:14:12 +0200 Subject: [PATCH] x86: Don't probe for DDC on VBE1.2 VBE1.2 doesn't support function 15h (DDC) resulting in a 'hang' whilst uncompressing kernel with some video cards. Make sure we check VBE version before fiddling around with DDC. http://bugzilla.kernel.org/show_bug.cgi?id=1458 Opened: 2003-10-30 09:12 Last update: 2007-02-13 22:03 Much thanks to Tobias Hain for help in testing and investigating the bug. Tested on; i386, Chips & Technologies 65548 VESA VBE 1.2 CONFIG_VIDEO_SELECT=Y CONFIG_FIRMWARE_EDID=Y Untested on x86_64. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andi Kleen --- arch/i386/boot/video.S | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/i386') diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 2c5b5cc55f7..8143c9516cb 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S @@ -571,6 +571,16 @@ setr1: lodsw jmp _m_s check_vesa: +#ifdef CONFIG_FIRMWARE_EDID + leaw modelist+1024, %di + movw $0x4f00, %ax + int $0x10 + cmpw $0x004f, %ax + jnz setbad + + movw 4(%di), %ax + movw %ax, vbe_version +#endif leaw modelist+1024, %di subb $VIDEO_FIRST_VESA>>8, %bh movw %bx, %cx # Get mode information structure @@ -1945,6 +1955,9 @@ store_edid: rep stosl + cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0 + jl no_edid + pushw %es # save ES xorw %di, %di # Report Capability pushw %di @@ -1987,6 +2000,7 @@ do_restore: .byte 0 # Screen contents altered during mode change svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes graphic_mode: .byte 0 # Graphic mode with a linear frame buffer dac_size: .byte 6 # DAC bit depth +vbe_version: .word 0 # VBE bios version # Status messages keymsg: .ascii "Press to see video modes available, " -- cgit v1.2.3-18-g5258