aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c4
-rw-r--r--arch/x86_64/kernel/apic.c19
-rw-r--r--arch/x86_64/kernel/cpufreq/Kconfig1
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/early-quirks.c17
-rw-r--r--arch/x86_64/kernel/i8259.c6
-rw-r--r--arch/x86_64/kernel/mpparse.c4
-rw-r--r--arch/x86_64/kernel/nmi.c125
-rw-r--r--arch/x86_64/kernel/pci-gart.c2
-rw-r--r--arch/x86_64/kernel/process.c13
-rw-r--r--arch/x86_64/kernel/smpboot.c5
-rw-r--r--arch/x86_64/kernel/vsyscall.c2
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c1
13 files changed, 142 insertions, 59 deletions
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 23178ce6c78..e1548fbe95a 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -66,8 +66,10 @@ static void init_low_mapping(void)
{
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
+ /* FIXME: We're playing with the current task's page tables here, which
+ * is potentially dangerous on SMP systems.
+ */
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- WARN_ON(num_online_cpus() != 1);
local_flush_tlb();
}
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 723417d924c..bd3e45d47c3 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -47,6 +47,10 @@ int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
+/* Local APIC timer works in C2? */
+int local_apic_timer_c2_ok;
+EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+
static struct resource *ioapic_resources;
static struct resource lapic_resource = {
.name = "Local APIC",
@@ -930,9 +934,17 @@ EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
void smp_send_timer_broadcast_ipi(void)
{
+ int cpu = smp_processor_id();
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+
+ if (cpu_isset(cpu, mask)) {
+ cpu_clear(cpu, mask);
+ add_pda(apic_timer_irqs, 1);
+ smp_local_timer_interrupt();
+ }
+
if (!cpus_empty(mask)) {
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
}
@@ -1192,6 +1204,13 @@ static __init int setup_nolapic(char *str)
}
early_param("nolapic", setup_nolapic);
+static int __init parse_lapic_timer_c2_ok(char *arg)
+{
+ local_apic_timer_c2_ok = 1;
+ return 0;
+}
+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
+
static __init int setup_noapictimer(char *str)
{
if (str[0] != ' ' && str[0] != 0)
diff --git a/arch/x86_64/kernel/cpufreq/Kconfig b/arch/x86_64/kernel/cpufreq/Kconfig
index ced15d06f0f..40acb67fb88 100644
--- a/arch/x86_64/kernel/cpufreq/Kconfig
+++ b/arch/x86_64/kernel/cpufreq/Kconfig
@@ -75,6 +75,7 @@ config X86_ACPI_CPUFREQ_PROC_INTF
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
depends on EMBEDDED
+ select CPU_FREQ_TABLE
help
This adds the clock modulation driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 4651fd22b21..a490fabfcf4 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -662,7 +662,7 @@ static int __init parse_memmap_opt(char *p)
}
early_param("memmap", parse_memmap_opt);
-void finish_e820_parsing(void)
+void __init finish_e820_parsing(void)
{
if (userdef) {
printk(KERN_INFO "user-defined physical RAM map:\n");
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 8047ea8c2ab..fede55a5399 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -16,7 +16,7 @@
#include <asm/proto.h>
#include <asm/dma.h>
-static void via_bugs(void)
+static void __init via_bugs(void)
{
#ifdef CONFIG_IOMMU
if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
@@ -30,16 +30,13 @@ static void via_bugs(void)
#ifdef CONFIG_ACPI
-static int nvidia_hpet_detected __initdata;
-
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
- nvidia_hpet_detected = 1;
return 0;
}
#endif
-static void nvidia_bugs(void)
+static void __init nvidia_bugs(void)
{
#ifdef CONFIG_ACPI
/*
@@ -52,11 +49,7 @@ static void nvidia_bugs(void)
if (acpi_use_timer_override)
return;
- nvidia_hpet_detected = 0;
- if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check))
- return;
-
- if (nvidia_hpet_detected == 0) {
+ if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
@@ -69,7 +62,7 @@ static void nvidia_bugs(void)
}
-static void ati_bugs(void)
+static void __init ati_bugs(void)
{
if (timer_over_8254 == 1) {
timer_over_8254 = 0;
@@ -95,7 +88,7 @@ struct chipset {
void (*f)(void);
};
-static struct chipset early_qrk[] = {
+static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
{ PCI_VENDOR_ID_VIA, via_bugs },
{ PCI_VENDOR_ID_ATI, ati_bugs },
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 21d95b74743..48942668277 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -45,7 +45,7 @@
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
- * (these are usually mapped to vectors 0x20-0x2f)
+ * (these are usually mapped to vectors 0x30-0x3f)
*/
/*
@@ -299,7 +299,7 @@ void init_8259A(int auto_eoi)
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
+ outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi)
outb_p(0x03, 0x21); /* master does Auto EOI */
@@ -307,7 +307,7 @@ void init_8259A(int auto_eoi)
outb_p(0x01, 0x21); /* master expects normal EOI */
outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
+ outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
is to be investigated) */
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 50dd8bef850..455aa0b932f 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -60,9 +60,9 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U;
/* Internal processor count */
-unsigned int num_processors __initdata = 0;
+unsigned int num_processors __cpuinitdata = 0;
-unsigned disabled_cpus __initdata;
+unsigned disabled_cpus __cpuinitdata;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 486f4c61a94..a90996c27dc 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
+ int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
+ int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
-int reserve_perfctr_nmi(unsigned int msr)
+static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
-void release_perfctr_nmi(unsigned int msr)
+static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+ clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
-int reserve_evntsel_nmi(unsigned int msr)
+int reserve_perfctr_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_perfctr_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_perfctr_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu)
+ __release_perfctr_nmi(cpu, msr);
+}
+
+int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
-void release_evntsel_nmi(unsigned int msr)
+static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
+ clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_evntsel_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_evntsel_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_evntsel_nmi(cpu, msr);
+ }
}
static __cpuinit inline int nmi_known_cpu(void)
@@ -187,10 +251,7 @@ void nmi_watchdog_default(void)
{
if (nmi_watchdog != NMI_DEFAULT)
return;
- if (nmi_known_cpu())
- nmi_watchdog = NMI_LOCAL_APIC;
- else
- nmi_watchdog = NMI_IO_APIC;
+ nmi_watchdog = NMI_NONE;
}
static int endflag __initdata = 0;
@@ -256,7 +317,7 @@ int __init check_nmi_watchdog (void)
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
- mdelay((10*1000)/nmi_hz); // wait 10 ticks
+ mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) {
if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
@@ -475,10 +536,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
/* Simulator may not support it */
@@ -504,9 +565,9 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63;
return 1;
fail2:
- release_evntsel_nmi(evntsel_msr);
+ __release_evntsel_nmi(-1, evntsel_msr);
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -517,8 +578,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
@@ -584,10 +645,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -612,7 +673,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -624,8 +685,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
@@ -653,10 +714,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
@@ -683,7 +744,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
@@ -707,8 +768,8 @@ static void stop_intel_arch_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog(void *unused)
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 030eb375335..2bac8c60ad6 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -675,7 +675,7 @@ void __init gart_iommu_init(void)
dma_ops = &gart_dma_ops;
}
-void gart_parse_options(char *p)
+void __init gart_parse_options(char *p)
{
int arg;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index cbbc6adc1a9..d8d5ccc245c 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -382,14 +382,17 @@ void exit_thread(void)
void flush_thread(void)
{
struct task_struct *tsk = current;
- struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING) {
- t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
- if (t->flags & _TIF_IA32)
+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+ clear_tsk_thread_flag(tsk, TIF_IA32);
+ } else {
+ set_tsk_thread_flag(tsk, TIF_IA32);
current_thread_info()->status |= TS_COMPAT;
+ }
}
- t->flags &= ~_TIF_DEBUG;
+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
tsk->thread.debugreg0 = 0;
tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 35443729aad..cd4643a3702 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -923,8 +923,9 @@ void __init smp_prepare_boot_cpu(void)
*/
int __cpuinit __cpu_up(unsigned int cpu)
{
- int err;
int apicid = cpu_present_to_apicid(cpu);
+ unsigned long flags;
+ int err;
WARN_ON(irqs_disabled());
@@ -958,7 +959,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
/*
* Make sure and check TSC sync:
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 180ff919eaf..b43c698cf7d 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -112,7 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
vread = __vsyscall_gtod_data.clock.vread;
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
- gettimeofday(tv,0);
+ gettimeofday(tv,NULL);
return;
}
now = vread();
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 0dffae69f4a..77c25b30763 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -59,3 +59,4 @@ EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(init_level4_pgt);
EXPORT_SYMBOL(load_gs_index);
+EXPORT_SYMBOL(_proxy_pda);