diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 13:13:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 13:13:16 -0700 |
commit | d20ead9e86881bc7ae84e385f47b5196b7d93aac (patch) | |
tree | ed27dd5db5f8447e4b3f541f0ec38219085d2f32 /arch/x86 | |
parent | c56ec7639288f3e5d6371b0c48d37da93642fc93 (diff) | |
parent | 88e4d250234fc9e64d6ce51df95efdcf8334fd95 (diff) |
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (114 commits)
x86: delete vsyscall files during make clean
kbuild: fix typo SRCARCH in find_sources
x86: fix kernel rebuild due to vsyscall fallout
.gitignore update for x86 arch
x86: unify include/asm/debugreg_32/64.h
x86: unify include/asm/unwind_32/64.h
x86: unify include/asm/types_32/64.h
x86: unify include/asm/tlb_32/64.h
x86: unify include/asm/siginfo_32/64.h
x86: unify include/asm/bug_32/64.h
x86: unify include/asm/mman_32/64.h
x86: unify include/asm/agp_32/64.h
x86: unify include/asm/kdebug_32/64.h
x86: unify include/asm/ioctls_32/64.h
x86: unify include/asm/floppy_32/64.h
x86: apply missing DMA/OOM prevention to floppy_32.h
x86: unify include/asm/cache_32/64.h
x86: unify include/asm/cache_32/64.h
x86: unify include/asm/dmi_32/64.h
x86: unify include/asm/delay_32/64.h
...
Diffstat (limited to 'arch/x86')
81 files changed, 495 insertions, 341 deletions
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile index cdae36435e2..e2edda255a8 100644 --- a/arch/x86/ia32/Makefile +++ b/arch/x86/ia32/Makefile @@ -18,18 +18,35 @@ $(obj)/syscall32_syscall.o: \ $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so) # Teach kbuild about targets -targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so) +targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\ + $F.o $F.so $F.so.dbg) # The DSO images are built using a special linker script quiet_cmd_syscall = SYSCALL $@ - cmd_syscall = $(CC) -m32 -nostdlib -shared -s \ + cmd_syscall = $(CC) -m32 -nostdlib -shared \ $(call ld-option, -Wl$(comma)--hash-style=sysv) \ -Wl,-soname=linux-gate.so.1 -o $@ \ -Wl,-T,$(filter-out FORCE,$^) -$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \ -$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +$(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \ +$(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE $(call if_changed,syscall) AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 + +vdsos := vdso32-sysenter.so vdso32-syscall.so + +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \ + $(MODLIB)/vdso/$@ + +$(vdsos): + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: $(vdsos) diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 7cf1c29bf90..f82e1a94fcb 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -420,6 +420,8 @@ beyond_if: (regs)->eflags = 0x200; (regs)->cs = __USER32_CS; (regs)->ss = __USER32_DS; + regs->r8 = regs->r9 = regs->r10 = regs->r11 = + regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; set_fs(USER_DS); if (unlikely(current->ptrace & PT_PTRACED)) { if (current->ptrace & PT_TRACE_EXEC) diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c index d3c53e8b05c..118b9f9ff49 100644 --- a/arch/x86/ia32/ia32_binfmt.c +++ b/arch/x86/ia32/ia32_binfmt.c @@ -112,11 +112,8 @@ struct elf_prpsinfo char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; -#define __STR(x) #x -#define STR(x) __STR(x) - #define _GET_SEG(x) \ - ({ __u32 seg; asm("movl %%" STR(x) ",%0" : "=r"(seg)); seg; }) + ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) /* Assumes current==process to be dumped */ #define ELF_CORE_COPY_REGS(pr_reg, regs) \ diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c index 4a233ad6269..f52770ef0ee 100644 --- a/arch/x86/ia32/ptrace32.c +++ b/arch/x86/ia32/ptrace32.c @@ -228,6 +228,8 @@ static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) return ret; } +#define COMPAT_GDT_ENTRY_TLS_MIN 6 + asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) { struct task_struct *child; @@ -246,8 +248,6 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) case PTRACE_SYSCALL: case PTRACE_OLDSETOPTIONS: case PTRACE_SETOPTIONS: - case PTRACE_SET_THREAD_AREA: - case PTRACE_GET_THREAD_AREA: return sys_ptrace(request, pid, addr, data); default: @@ -271,6 +271,12 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) case PTRACE_SETSIGINFO: case PTRACE_GETSIGINFO: return ptrace32_siginfo(request, pid, addr, data); + + case PTRACE_SET_THREAD_AREA: + case PTRACE_GET_THREAD_AREA: + return sys_ptrace(request, pid, + addr + GDT_ENTRY_TLS_MIN - COMPAT_GDT_ENTRY_TLS_MIN, + data); } child = ptrace_get_task_struct(pid); diff --git a/arch/x86/kernel/.gitignore b/arch/x86/kernel/.gitignore index 40836ad9079..4ea38a39aed 100644 --- a/arch/x86/kernel/.gitignore +++ b/arch/x86/kernel/.gitignore @@ -1 +1,2 @@ vsyscall.lds +vsyscall_32.lds diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 45855c97923..38573340b14 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -3,3 +3,7 @@ include ${srctree}/arch/x86/kernel/Makefile_32 else include ${srctree}/arch/x86/kernel/Makefile_64 endif + +# Workaround to delete .lds files with make clean +# The problem is that we do not enter Makefile_32 with make clean. +clean-files := vsyscall*.lds vsyscall*.so diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index 7ff02063b85..a3fa11f8f46 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 @@ -51,7 +51,7 @@ obj-$(CONFIG_SCx200) += scx200_32.o # We must build both images before we can assemble it. # Note: kbuild does not track this dependency due to usage of .incbin $(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so -targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) +targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so) targets += vsyscall-note_32.o vsyscall_32.lds # The DSO images are built using a special linker script. diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 42421437ded..3bd2688bd44 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -63,11 +63,11 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); /* Use inline assembly to define this because the nops are defined as inline assembly strings in the include files and we cannot get them easily into strings. */ -asm("\t.data\nintelnops: " +asm("\t.section .rodata, \"a\"\nintelnops: " GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 GENERIC_NOP7 GENERIC_NOP8); -extern unsigned char intelnops[]; -static unsigned char *intel_nops[ASM_NOP_MAX+1] = { +extern const unsigned char intelnops[]; +static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { NULL, intelnops, intelnops + 1, @@ -81,11 +81,11 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = { #endif #ifdef K8_NOP1 -asm("\t.data\nk8nops: " +asm("\t.section .rodata, \"a\"\nk8nops: " K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 K8_NOP7 K8_NOP8); -extern unsigned char k8nops[]; -static unsigned char *k8_nops[ASM_NOP_MAX+1] = { +extern const unsigned char k8nops[]; +static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { NULL, k8nops, k8nops + 1, @@ -99,11 +99,11 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = { #endif #ifdef K7_NOP1 -asm("\t.data\nk7nops: " +asm("\t.section .rodata, \"a\"\nk7nops: " K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 K7_NOP7 K7_NOP8); -extern unsigned char k7nops[]; -static unsigned char *k7_nops[ASM_NOP_MAX+1] = { +extern const unsigned char k7nops[]; +static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { NULL, k7nops, k7nops + 1, @@ -116,28 +116,49 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = { }; #endif +#ifdef P6_NOP1 +asm("\t.section .rodata, \"a\"\np6nops: " + P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 + P6_NOP7 P6_NOP8); +extern const unsigned char p6nops[]; +static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { + NULL, + p6nops, + p6nops + 1, + p6nops + 1 + 2, + p6nops + 1 + 2 + 3, + p6nops + 1 + 2 + 3 + 4, + p6nops + 1 + 2 + 3 + 4 + 5, + p6nops + 1 + 2 + 3 + 4 + 5 + 6, + p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +#endif + #ifdef CONFIG_X86_64 extern char __vsyscall_0; -static inline unsigned char** find_nop_table(void) +static inline const unsigned char*const * find_nop_table(void) { - return k8_nops; + return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || + boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; } #else /* CONFIG_X86_64 */ -static struct nop { +static const struct nop { int cpuid; - unsigned char **noptable; + const unsigned char *const *noptable; } noptypes[] = { { X86_FEATURE_K8, k8_nops }, { X86_FEATURE_K7, k7_nops }, + { X86_FEATURE_P4, p6_nops }, + { X86_FEATURE_P3, p6_nops }, { -1, NULL } }; -static unsigned char** find_nop_table(void) +static const unsigned char*const * find_nop_table(void) { - unsigned char **noptable = intel_nops; + const unsigned char *const *noptable = intel_nops; int i; for (i = 0; noptypes[i].cpuid >= 0; i++) { @@ -154,7 +175,7 @@ static unsigned char** find_nop_table(void) /* Use this to add nops to a buffer, then text_poke the whole buffer. */ static void add_nops(void *insns, unsigned int len) { - unsigned char **noptable = find_nop_table(); + const unsigned char *const *noptable = find_nop_table(); while (len > 0) { unsigned int noplen = len; @@ -415,9 +436,6 @@ void __init alternative_instructions(void) alternatives_smp_unlock(__smp_locks, __smp_locks_end, _text, _etext); } - free_init_pages("SMP alternatives", - (unsigned long)__smp_locks, - (unsigned long)__smp_locks_end); } else { alternatives_smp_module_add(NULL, "core kernel", __smp_locks, __smp_locks_end, @@ -428,6 +446,11 @@ void __init alternative_instructions(void) apply_paravirt(__parainstructions, __parainstructions_end); local_irq_restore(flags); + if (smp_alt_once) + free_init_pages("SMP alternatives", + (unsigned long)__smp_locks, + (unsigned long)__smp_locks_end); + restart_nmi(); #ifdef CONFIG_X86_MCE restart_mce(); diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3d67ae18d76..793341fffc8 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1277,6 +1277,7 @@ void smp_spurious_interrupt(struct pt_regs *regs) /* see sw-dev-man vol 3, chapter 7.4.13.5 */ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " "should never happen.\n", smp_processor_id()); + __get_cpu_var(irq_stat).irq_spurious_count++; irq_exit(); } diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 09b82093bc7..f47bc493dba 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -974,15 +974,12 @@ void __init setup_boot_APIC_clock (void) */ void __cpuinit check_boot_apic_timer_broadcast(void) { - struct clock_event_device *levt = &per_cpu(lapic_events, boot_cpu_id); - if (!disable_apic_timer || (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) return; printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; - levt->features |= CLOCK_EVT_FEAT_DUMMY; local_irq_enable(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); @@ -1143,6 +1140,7 @@ asmlinkage void smp_spurious_interrupt(void) if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); + add_pda(irq_spurious_count, 1); irq_exit(); } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index dcf6bbb1c7c..5f8af875f45 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -4,6 +4,7 @@ #include <asm/io.h> #include <asm/processor.h> #include <asm/apic.h> +#include <asm/mach_apic.h> #include "cpu.h" @@ -45,13 +46,17 @@ static __cpuinit int amd_apic_timer_broken(void) case CPUID_XFAM_10H: case CPUID_XFAM_11H: rdmsr(MSR_K8_ENABLE_C1E, lo, hi); - if (lo & ENABLE_C1E_MASK) + if (lo & ENABLE_C1E_MASK) { + if (smp_processor_id() != boot_cpu_physical_apicid) + printk(KERN_INFO "AMD C1E detected late. " + " Force timer broadcast.\n"); return 1; - break; - default: - /* err on the side of caution */ + } + break; + default: + /* err on the side of caution */ return 1; - } + } return 0; } #endif diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 7decd6a50ff..f3686a5f230 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -565,7 +565,7 @@ static unsigned int powernow_get(unsigned int cpu) } -static int __init acer_cpufreq_pst(struct dmi_system_id *d) +static int __init acer_cpufreq_pst(const struct dmi_system_id *d) { printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident); printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc4e08147b1..cc8c501b9f3 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <asm/processor.h> +#include <asm/pgtable.h> #include <asm/msr.h> #include <asm/uaccess.h> @@ -19,8 +20,6 @@ #include <mach_apic.h> #endif -extern int trap_init_f00f_bug(void); - #ifdef CONFIG_X86_INTEL_USERCOPY /* * Alignment at which movsl is preferred for bulk memory copies. @@ -95,6 +94,20 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) return 1; } +#ifdef CONFIG_X86_F00F_BUG +static void __cpuinit trap_init_f00f_bug(void) +{ + __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); + + /* + * Update the IDT descriptor and reload the IDT so that + * it uses the read-only mapped virtual address. + */ + idt_descr.address = fix_to_virt(FIX_F00F_IDT); + load_idt(&idt_descr); +} +#endif + static void __cpuinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index db6c25aa577..1826395ebee 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -170,15 +170,15 @@ union l3_cache { unsigned val; }; -static const unsigned short assocs[] = { +static unsigned short assocs[] __cpuinitdata = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, [0xf] = 0xffff // ?? }; -static const unsigned char levels[] = { 1, 1, 2, 3 }; -static const unsigned char types[] = { 1, 2, 3, 3 }; +static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; +static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, @@ -493,8 +493,8 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) } } #else -static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} -static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {} +static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} +static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} #endif static void free_cache_attributes(unsigned int cpu) @@ -794,8 +794,9 @@ static int __cpuinit cache_sysfs_init(void) register_hotcpu_notifier(&cacheinfo_cpu_notifier); for_each_online_cpu(i) { - cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, - (void *)(long)i); + struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); + + cache_add_dev(sys_dev); } return 0; diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 1509edfb231..be4dabfee1f 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c @@ -61,6 +61,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs) { irq_enter(); vendor_thermal_interrupt(regs); + __get_cpu_var(irq_stat).irq_thermal_count++; irq_exit(); } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 1203dc5ab87..494d320d909 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -152,7 +152,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block thermal_throttle_cpu_notifier = +static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = { .notifier_call = thermal_throttle_cpu_callback, }; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index c48b6fea5ab..5e4be30ff90 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -738,13 +738,7 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - int cpu = get_cpu(); - - if (cpu == 0) - mtrr_save_fixed_ranges(NULL); - else - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); - put_cpu(); + smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); } static int __init mtrr_init_finialize(void) diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 93fecd4b03d..54cdbf1a40f 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -34,7 +34,7 @@ struct wd_ops { u64 checkbit; }; -static struct wd_ops *wd_ops; +static const struct wd_ops *wd_ops; /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) @@ -317,7 +317,7 @@ static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); } -static struct wd_ops k7_wd_ops = { +static const struct wd_ops k7_wd_ops = { .reserve = single_msr_reserve, .unreserve = single_msr_unreserve, .setup = setup_k7_watchdog, @@ -380,7 +380,7 @@ static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); } -static struct wd_ops p6_wd_ops = { +static const struct wd_ops p6_wd_ops = { .reserve = single_msr_reserve, .unreserve = single_msr_unreserve, .setup = setup_p6_watchdog, @@ -532,7 +532,7 @@ static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); } -static struct wd_ops p4_wd_ops = { +static const struct wd_ops p4_wd_ops = { .reserve = p4_reserve, .unreserve = p4_unreserve, .setup = setup_p4_watchdog, @@ -550,6 +550,8 @@ static struct wd_ops p4_wd_ops = { #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK +static struct wd_ops intel_arch_wd_ops; + static int setup_intel_arch_watchdog(unsigned nmi_hz) { unsigned int ebx; @@ -591,11 +593,11 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) wd->perfctr_msr = perfctr_msr; wd->evntsel_msr = evntsel_msr; wd->cccr_msr = 0; //unused - wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1); + intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); return 1; } -static struct wd_ops intel_arch_wd_ops = { +static struct wd_ops intel_arch_wd_ops __read_mostly = { .reserve = single_msr_reserve, .unreserve = single_msr_unreserve, .setup = setup_intel_arch_watchdog, diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index f4548c93ccf..70dcf912d9f 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -43,8 +43,6 @@ static struct class *cpuid_class; -#ifdef CONFIG_SMP - struct cpuid_command { u32 reg; u32 *data; @@ -62,25 +60,11 @@ static inline void do_cpuid(int cpu, u32 reg, u32 * data) { struct cpuid_command cmd; - preempt_disable(); - if (cpu == smp_processor_id()) { - cpuid(reg, &data[0], &data[1], &data[2], &data[3]); - } else { - cmd.reg = reg; - cmd.data = data; + cmd.reg = reg; + cmd.data = data; - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); - } - preempt_enable(); + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); } -#else /* ! CONFIG_SMP */ - -static inline void do_cpuid(int cpu, u32 reg, u32 * data) -{ - cpuid(reg, &data[0], &data[1], &data[2], &data[3]); -} - -#endif /* ! CONFIG_SMP */ static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) { @@ -150,7 +134,7 @@ static const struct file_operations cpuid_fops = { .open = cpuid_open, }; -static int cpuid_device_create(int i) +static int __cpuinit cpuid_device_create(int i) { int err = 0; struct device *dev; @@ -161,7 +145,9 @@ static int cpuid_device_create(int i) return err; } -static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f1cacd4897f..3a058bb1640 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -988,7 +988,7 @@ child_rip: movq %rsi, %rdi call *%rax # exit - xorl %edi, %edi + mov %eax, %edi call do_exit CFI_ENDPROC ENDPROC(child_rip) diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 47496a40e84..4ae03e3e829 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -29,8 +29,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; EXPORT_SYMBOL(x86_cpu_to_apicid); -u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; - struct genapic __read_mostly *genapic = &apic_flat; /* diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index ecb01eefdd2..91c7526768e 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -52,7 +52,6 @@ static void flat_init_apic_ldr(void) num = smp_processor_id(); id = 1UL << num; - x86_cpu_to_log_apicid[num] = id; apic_write(APIC_DFR, APIC_DFR_FLAT); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 9150ca9b5f8..39677965e16 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -51,6 +51,15 @@ */ LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) +/* + * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate + * pagetables from above the 16MB DMA limit, so we'll have to set + * up pagetables 16MB more (worst-case): + */ +#ifdef CONFIG_DEBUG_PAGEALLOC +LOW_PAGES = LOW_PAGES + 0x1000000 +#endif + #if PTRS_PER_PMD > 1 PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD #else @@ -443,6 +452,7 @@ early_page_fault: early_fault: cld #ifdef CONFIG_PRINTK + pusha movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es @@ -534,8 +544,15 @@ int_msg: .asciz "Unknown interrupt or fault at EIP %p %p %p\n" fault_msg: - .ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n" - .asciz "Stack: %p %p %p %p %p %p %p %p\n" + .ascii \ +/* fault info: */ "BUG: Int %d: CR2 %p\n" \ +/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ + " EBX %p EDX %p ECX %p EAX %p\n" \ +/* fault frame: */ " err %p EIP %p CS %p flg %p\n" \ + \ + "Stack: %p %p %p %p %p %p %p %p\n" \ + " %p %p %p %p %p %p %p %p\n" \ + " %p %p %p %p %p %p %p %p\n" #include "../../x86/xen/xen-head.S" diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index e3d4b73bfdb..edd39ccf139 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -1,4 +1,5 @@ #include <linux/module.h> +#include <asm/semaphore.h> #include <asm/checksum.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index 679bb33acbf..d34a10cc13a 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c @@ -349,7 +349,11 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) * New motherboards sometimes make IRQ 13 be a PCI interrupt, * so allow interrupt sharing. */ -static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL }; +static struct irqaction fpu_irq = { + .handler = math_error_irq, + .mask = CPU_MASK_NONE, + .name = "fpu", +}; void __init init_ISA_irqs (void) { diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c index eb72976cc13..3f27ea0b981 100644 --- a/arch/x86/kernel/i8259_64.c +++ b/arch/x86/kernel/i8259_64.c @@ -395,7 +395,11 @@ device_initcall(i8259A_init_sysfs); * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { + .handler = no_action, + .mask = CPU_MASK_NONE, + .name = "cascade", +}; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... IRQ0_VECTOR - 1] = -1, [IRQ0_VECTOR] = 0, diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4ee1e5ee9b5..5f10c718953 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1296,6 +1296,11 @@ static void __init setup_IO_APIC_irqs(void) continue; } + if (!first_notcon) { + apic_printk(APIC_VERBOSE, " not connected.\n"); + first_notcon = 1; + } + entry.trigger = irq_trigger(idx); entry.polarity = irq_polarity(idx); diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 966fa106249..1c2c7bf6a9d 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -875,6 +875,10 @@ static void __init setup_IO_APIC_irqs(void) apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); continue; } + if (!first_notcon) { + apic_printk(APIC_VERBOSE, " not connected.\n"); + first_notcon = 1; + } irq = pin_2_irq(idx, apic, pin); add_pin_to_irq(irq, apic, pin); @@ -885,7 +889,7 @@ static void __init setup_IO_APIC_irqs(void) } if (!first_notcon) - apic_printk(APIC_VERBOSE," not connected.\n"); + apic_printk(APIC_VERBOSE, " not connected.\n"); } /* @@ -1845,7 +1849,7 @@ static struct sysdev_class ioapic_sysdev_class = { static int __init ioapic_init_sysfs(void) { struct sys_device * dev; - int i, size, error = 0; + int i, size, error; error = sysdev_class_register(&ioapic_sysdev_class); if (error) @@ -1854,12 +1858,11 @@ static int __init ioapic_init_sysfs(void) for (i = 0; i < nr_ioapics; i++ ) { size = sizeof(struct sys_device) + nr_ioapic_registers[i] * sizeof(struct IO_APIC_route_entry); - mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); + mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); if (!mp_ioapic_data[i]) { printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); continue; } - memset(mp_ioapic_data[i], 0, size); dev = &mp_ioapic_data[i]->dev; dev->id = i; dev->cls = &ioapic_sysdev_class; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index e173b763f14..d3fde94f734 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -255,9 +255,17 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { + unsigned any_count = 0; + spin_lock_irqsave(&irq_desc[i].lock, flags); +#ifndef CONFIG_SMP + any_count = kstat_irqs(i); +#else + for_each_online_cpu(j) + any_count |= kstat_cpu(j).irqs[i]; +#endif action = irq_desc[i].action; - if (!action) + if (!action && !any_count) goto skip; seq_printf(p, "%3d: ",i); #ifndef CONFIG_SMP @@ -268,10 +276,12 @@ int show_interrupts(struct seq_file *p, void *v) #endif seq_printf(p, " %8s", irq_desc[i].chip->name); seq_printf(p, "-%-8s", irq_desc[i].name); - seq_printf(p, " %s", action->name); - for (action=action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); + if (action) { + seq_printf(p, " %s", action->name); + while ((action = action->next) != NULL) + seq_printf(p, ", %s", action->name); + } seq_putc(p, '\n'); skip: @@ -280,14 +290,41 @@ skip: seq_printf(p, "NMI: "); for_each_online_cpu(j) seq_printf(p, "%10u ", nmi_count(j)); - seq_putc(p, '\n'); + seq_printf(p, " Non-maskable interrupts\n"); #ifdef CONFIG_X86_LOCAL_APIC seq_printf(p, "LOC: "); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs); - seq_putc(p, '\n'); + seq_printf(p, " Local timer interrupts\n"); #endif +#ifdef CONFIG_SMP + seq_printf(p, "RES: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + per_cpu(irq_stat,j).irq_resched_count); + seq_printf(p, " Rescheduling interrupts\n"); + seq_printf(p, "CAL: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + per_cpu(irq_stat,j).irq_call_count); + seq_printf(p, " function call interrupts\n"); + seq_printf(p, "TLB: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + per_cpu(irq_stat,j).irq_tlb_count); + seq_printf(p, " TLB shootdowns\n"); +#endif + seq_printf(p, "TRM: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + per_cpu(irq_stat,j).irq_thermal_count); + seq_printf(p, " Thermal event interrupts\n"); + seq_printf(p, "SPU: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + per_cpu(irq_stat,j).irq_spurious_count); + seq_printf(p, " Spurious interrupts\n"); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 865669efc54..6b5c730d67b 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -62,9 +62,17 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { + unsigned any_count = 0; + spin_lock_irqsave(&irq_desc[i].lock, flags); +#ifndef CONFIG_SMP + any_count = kstat_irqs(i); +#else + for_each_online_cpu(j) + any_count |= kstat_cpu(j).irqs[i]; +#endif action = irq_desc[i].action; - if (!action) + if (!action && !any_count) goto skip; seq_printf(p, "%3d: ",i); #ifndef CONFIG_SMP @@ -76,9 +84,11 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, " %8s", irq_desc[i].chip->name); seq_printf(p, "-%-8s", irq_desc[i].name); - seq_printf(p, " %s", action->name); - for (action=action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); + if (action) { + seq_printf(p, " %s", action->name); + while ((action = action->next) != NULL) + seq_printf(p, ", %s", action->name); + } seq_putc(p, '\n'); skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); @@ -86,11 +96,37 @@ skip: seq_printf(p, "NMI: "); for_each_online_cpu(j) seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); - seq_putc(p, '\n'); + seq_printf(p, " Non-maskable interrupts\n"); seq_printf(p, "LOC: "); for_each_online_cpu(j) seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); - seq_putc(p, '\n'); + seq_printf(p, " Local timer interrupts\n"); +#ifdef CONFIG_SMP + seq_printf(p, "RES: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count); + seq_printf(p, " Rescheduling interrupts\n"); + seq_printf(p, "CAL: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); + seq_printf(p, " function call interrupts\n"); + seq_printf(p, "TLB: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); + seq_printf(p, " TLB shootdowns\n"); +#endif + seq_printf(p, "TRM: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count); + seq_printf(p, " Thermal event interrupts\n"); + seq_printf(p, "THR: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count); + seq_printf(p, " Threshold APIC interrupts\n"); + seq_printf(p, "SPU: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count); + seq_printf(p, " Spurious interrupts\n"); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); } return 0; diff --git a/arch/x86/kernel/ldt_32.c b/arch/x86/kernel/ldt_32.c index a8b18421863..9ff90a27c45 100644 --- a/arch/x86/kernel/ldt_32.c +++ b/arch/x86/kernel/ldt_32.c @@ -92,13 +92,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) struct mm_struct * old_mm; int retval = 0; - init_MUTEX(&mm->context.sem); + mutex_init(&mm->context.lock); mm->context.size = 0; old_mm = current->mm; if (old_mm && old_mm->context.size > 0) { - down(&old_mm->context.sem); + mutex_lock(&old_mm->context.lock); retval = copy_ldt(&mm->context, &old_mm->context); - up(&old_mm->context.sem); + mutex_unlock(&old_mm->context.lock); } return retval; } @@ -130,7 +130,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - down(&mm->context.sem); + mutex_lock(&mm->context.lock); size = mm->context.size*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; @@ -138,7 +138,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) err = 0; if (copy_to_user(ptr, mm->context.ldt, size)) err = -EFAULT; - up(&mm->context.sem); + mutex_unlock(&mm->context.lock); if (err < 0) goto error_return; if (size != bytecount) { @@ -194,7 +194,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) goto out; } - down(&mm->context.sem); + mutex_lock(&mm->context.lock); if (ldt_info.entry_number >= mm->context.size) { error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); if (error < 0) @@ -221,7 +221,7 @@ install: error = 0; out_unlock: - up(&mm->context.sem); + mutex_unlock(&mm->context.lock); out: return error; } diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c index 3796523d616..60e57abb8e9 100644 --- a/arch/x86/kernel/ldt_64.c +++ b/arch/x86/kernel/ldt_64.c @@ -96,13 +96,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) struct mm_struct * old_mm; int retval = 0; - init_MUTEX(&mm->context.sem); + mutex_init(&mm->context.lock); mm->context.size = 0; old_mm = current->mm; if (old_mm && old_mm->context.size > 0) { - down(&old_mm->context.sem); + mutex_lock(&old_mm->context.lock); retval = copy_ldt(&mm->context, &old_mm->context); - up(&old_mm->context.sem); + mutex_unlock(&old_mm->context.lock); } return retval; } @@ -133,7 +133,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - down(&mm->context.sem); + mutex_lock(&mm->context.lock); size = mm->context.size*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; @@ -141,7 +141,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) err = 0; if (copy_to_user(ptr, mm->context.ldt, size)) err = -EFAULT; - up(&mm->context.sem); + mutex_unlock(&mm->context.lock); if (err < 0) goto error_return; if (size != bytecount) { @@ -193,7 +193,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) goto out; } - down(&mm->context.sem); + mutex_lock(&mm->context.lock); if (ldt_info.entry_number >= (unsigned)mm->context.size) { error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); if (error < 0) @@ -223,7 +223,7 @@ install: error = 0; out_unlock: - up(&mm->context.sem); + mutex_unlock(&mm->context.lock); out: return error; } diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c index 97d2b757d6b..8ca8f864896 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/mce_64.c @@ -695,8 +695,6 @@ static int __init mcheck_disable(char *str) mce=nobootlog Don't log MCEs from before booting. */ static int __init mcheck_enable(char *str) { - if (*str == '=') - str++; if (!strcmp(str, "off")) mce_dont_init = 1; else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) @@ -709,7 +707,7 @@ static int __init mcheck_enable(char *str) } __setup("nomce", mcheck_disable); -__setup("mce", mcheck_enable); +__setup("mce=", mcheck_enable); /* * Sysfs support diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c index 805b62b1e0d..0d2afd96aca 100644 --- a/arch/x86/kernel/mce_amd_64.c +++ b/arch/x86/kernel/mce_amd_64.c @@ -237,6 +237,7 @@ asmlinkage void mce_threshold_interrupt(void) } } out: + add_pda(irq_threshold_count, 1); irq_exit(); } diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/mce_intel_64.c index 6551505d8a2..c17eaf5dd6d 100644 --- a/arch/x86/kernel/mce_intel_64.c +++ b/arch/x86/kernel/mce_intel_64.c @@ -26,6 +26,7 @@ asmlinkage void smp_thermal_interrupt(void) if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); + add_pda(irq_thermal_count, 1); irq_exit(); } diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index c044de310b6..df85c9c1360 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -133,7 +133,7 @@ static const struct file_operations msr_fops = { .open = msr_open, }; -static int msr_device_create(int i) +static int __cpuinit msr_device_create(int i) { int err = 0; struct device *dev; @@ -144,7 +144,7 @@ static int msr_device_create(int i) return err; } -static int msr_class_cpu_callback(struct notifier_block *nfb, +static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index a50b787b3bf..5098f58063a 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -222,10 +222,10 @@ static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) return npages; } -static inline int translate_phb(struct pci_dev* dev) +static inline int translation_enabled(struct iommu_table *tbl) { - int disabled = bus_info[dev->bus->number].translation_disabled; - return !disabled; + /* only PHBs with translation enabled have an IOMMU table */ + return (tbl != NULL); } static void iommu_range_reserve(struct iommu_table *tbl, @@ -388,7 +388,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *s; int i; - if (!translate_phb(to_pci_dev(dev))) + if (!translation_enabled(tbl)) return; for_each_sg(sglist, s, nelems, i) { @@ -428,7 +428,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, unsigned long entry; int i; - if (!translate_phb(to_pci_dev(dev))) + if (!translation_enabled(tbl)) return calgary_nontranslate_map_sg(dev, sg, nelems, direction); for_each_sg(sg, s, nelems, i) { @@ -474,7 +474,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr, uaddr = (unsigned long)vaddr; npages = num_dma_pages(uaddr, size); - if (translate_phb(to_pci_dev(dev))) + if (translation_enabled(tbl)) dma_handle = iommu_alloc(tbl, vaddr, npages, direction); else dma_handle = virt_to_bus(vaddr); @@ -488,7 +488,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, struct iommu_table *tbl = find_iommu_table(dev); unsigned int npages; - if (!translate_phb(to_pci_dev(dev))) + if (!translation_enabled(tbl)) return; npages = num_dma_pages(dma_handle, size); @@ -513,7 +513,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, goto error; memset(ret, 0, size); - if (translate_phb(to_pci_dev(dev))) { + if (translation_enabled(tbl)) { /* set up tces to cover the allocated range */ mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); if (mapping == bad_dma_address) @@ -1194,7 +1194,7 @@ static int __init calgary_init(void) { int ret; struct pci_dev *dev = NULL; - void *tce_space; + struct calgary_bus_info *info; ret = calgary_locate_bbars(); if (ret) @@ -1206,12 +1206,14 @@ static int __init calgary_init(void) break; if (!is_cal_pci_dev(dev->device)) continue; - if (!translate_phb(dev)) { + + info = &bus_info[dev->bus->number]; + if (info->translation_disabled) { calgary_init_one_nontraslated(dev); continue; } - tce_space = bus_info[dev->bus->number].tce_space; - if (!tce_space && !translate_empty_slots) + + if (!info->tce_space && !translate_empty_slots) continue; ret = calgary_init_one(dev); @@ -1229,11 +1231,13 @@ error: break; if (!is_cal_pci_dev(dev->device)) continue; - if (!translate_phb(dev)) { + + info = &bus_info[dev->bus->number]; + if (info->translation_disabled) { pci_dev_put(dev); continue; } - if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots) + if (!info->tce_space && !translate_empty_slots) continue; calgary_disable_translation(dev); @@ -1546,7 +1550,7 @@ static void __init calgary_fixup_one_tce_space(struct pci_dev *dev) static int __init calgary_fixup_tce_spaces(void) { struct pci_dev *dev = NULL; - void *tce_space; + struct calgary_bus_info *info; if (no_iommu || swiotlb || !calgary_detected) return -ENODEV; @@ -1559,11 +1563,12 @@ static int __init calgary_fixup_tce_spaces(void) break; if (!is_cal_pci_dev(dev->device)) continue; - if (!translate_phb(dev)) + + info = &bus_info[dev->bus->number]; + if (info->translation_disabled) continue; - tce_space = bus_info[dev->bus->number].tce_space; - if (!tce_space) + if (!info->tce_space) continue; calgary_fixup_one_tce_space(dev); diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c index 0aae2f3847a..51330321a5d 100644 --- a/arch/x86/kernel/pci-dma_32.c +++ b/arch/x86/kernel/pci-dma_32.c @@ -12,7 +12,6 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/module.h> -#include <linux/pci.h> #include <asm/io.h> struct dma_coherent_mem { diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index 9576a2eb375..b2b42bdb0a1 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c @@ -51,11 +51,9 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) { struct page *page; int node; -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - node = pcibus_to_node(to_pci_dev(dev)->bus); - else -#endif + + node = dev_to_node(dev); + if (node == -1) node = numa_node_id(); if (node < first_node(node_online_map)) diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index cfcc84e6c35..5cdfab65e93 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -8,6 +8,7 @@ * See Documentation/DMA-mapping.txt for the interface specification. * * Copyright 2002 Andi Kleen, SuSE Labs. + * Subject to the GNU General Public License v2 only. */ #include <linux/types.h> @@ -375,7 +376,8 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems, * DMA map all entries in a scatterlist. * Merge chunks that have page aligned sizes into a continuous mapping. */ -int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) +static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, + int dir) { int i; int out; diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c index 8622b9cd3e3..99102ec5fad 100644 --- a/arch/x86/kernel/ptrace_32.c +++ b/arch/x86/kernel/ptrace_32.c @@ -165,7 +165,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ seg &= ~7UL; - down(&child->mm->context.sem); + mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { @@ -179,7 +179,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ addr &= 0xffff; addr += base; } - up(&child->mm->context.sem); + mutex_unlock(&child->mm->context.lock); } return addr; } diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c index 86321ee6da9..607085f3f08 100644 --- a/arch/x86/kernel/ptrace_64.c +++ b/arch/x86/kernel/ptrace_64.c @@ -103,7 +103,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r seg &= ~7UL; - down(&child->mm->context.sem); + mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { @@ -117,7 +117,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r addr &= 0xffff; addr += base; } - up(&child->mm->context.sem); + mutex_unlock(&child->mm->context.lock); } return addr; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 8159bf0be17..5a19f0cc5b6 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -604,7 +604,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) level = cpuid_eax(1); if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); - if (c->x86 == 0x10) + if (c->x86 == 0x10 || c->x86 == 0x11) set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); /* Enable workaround for FXSAVE leak */ @@ -968,7 +968,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) * applications want to get the raw CPUID data, they should access * /dev/cpu/<cpu_nr>/cpuid instead. */ - static char *x86_cap_flags[] = { + static const char *const x86_cap_flags[] = { /* Intel-defined */ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", @@ -1022,7 +1022,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; - static char *x86_power_flags[] = { + static const char *const x86_power_flags[] = { "ts", /* temperature sensor */ "fid", /* frequency id control */ "vid", /* voltage id control */ diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index d01d51fcce2..0d79df3c563 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -385,7 +385,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->edx = (unsigned long) 0; regs->ecx = (unsigned long) 0; - set_fs(USER_DS); regs->xds = __USER_DS; regs->xes = __USER_DS; regs->xss = __USER_DS; @@ -479,7 +478,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->edx = (unsigned long) &frame->info; regs->ecx = (unsigned long) &frame->uc; - set_fs(USER_DS); regs->xds = __USER_DS; regs->xes = __USER_DS; regs->xss = __USER_DS; diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 2d35d850202..791d9f8036a 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -342,6 +342,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs) smp_mb__after_clear_bit(); out: put_cpu_no_resched(); + __get_cpu_var(irq_stat).irq_tlb_count++; } void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, @@ -640,6 +641,7 @@ static void native_smp_send_stop(void) fastcall void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); + __get_cpu_var(irq_stat).irq_resched_count++; } fastcall void smp_call_function_interrupt(struct pt_regs *regs) @@ -660,6 +662,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) */ irq_enter(); (*func)(info); + __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); if (wait) { @@ -705,3 +708,10 @@ struct smp_ops smp_ops = { .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, }; + +int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), + void *info, int wait) +{ + return smp_ops.smp_call_function_mask(mask, func, info, wait); +} +EXPORT_SYMBOL(smp_call_function_mask); diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index df4a82812ad..5c2964727d1 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -163,6 +163,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) out: ack_APIC_irq(); cpu_clear(cpu, f->flush_cpumask); + add_pda(irq_tlb_count, 1); } static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, @@ -493,6 +494,7 @@ void smp_send_stop(void) asmlinkage void smp_reschedule_interrupt(void) { ack_APIC_irq(); + add_pda(irq_resched_count, 1); } asmlinkage void smp_call_function_interrupt(void) @@ -514,6 +516,7 @@ asmlinkage void smp_call_function_interrupt(void) exit_idle(); irq_enter(); (*func)(info); + add_pda(irq_call_count, 1); irq_exit(); if (wait) { mb(); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 31fc08bd15e..be3faac0471 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -102,8 +102,8 @@ u8 apicid_2_node[MAX_APICID]; * Trampoline 80x86 program as an array. */ -extern unsigned char trampoline_data []; -extern unsigned char trampoline_end []; +extern const unsigned char trampoline_data []; +extern const unsigned char trampoline_end []; static unsigned char *trampoline_base; static int trampoline_exec; @@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; * has made sure it's suitably aligned. */ -static unsigned long __devinit setup_trampoline(void) +static unsigned long __cpuinit setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); return virt_to_phys(trampoline_base); @@ -1021,6 +1021,12 @@ static void __init smp_boot_cpus(unsigned int max_cpus) if (!max_cpus) { smp_found_config = 0; printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); + + if (nmi_watchdog == NMI_LOCAL_APIC) { + printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n"); + connect_bsp_APIC(); + setup_local_APIC(); + } smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); cpu_set(0, per_cpu(cpu_sibling_map, 0)); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 0faa0a0af27..e351ac4ab5b 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -102,8 +102,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); * Trampoline 80x86 program as an array. */ -extern unsigned char trampoline_data[]; -extern unsigned char trampoline_end[]; +extern const unsigned char trampoline_data[]; +extern const unsigned char trampoline_end[]; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; @@ -695,7 +695,6 @@ do_rest: cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_possible_map); x86_cpu_to_apicid[cpu] = BAD_APICID; - x86_cpu_to_log_apicid[cpu] = BAD_APICID; return -EIO; } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 413e527cdeb..6fa6cf036c7 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -33,7 +33,7 @@ static void save_stack_address(void *data, unsigned long addr) trace->entries[trace->nr_entries++] = addr; } -static struct stacktrace_ops save_stack_ops = { +static const struct stacktrace_ops save_stack_ops = { .warning = save_stack_warning, .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c index e3f2569b2c4..9e540fee700 100644 --- a/arch/x86/kernel/tce_64.c +++ b/arch/x86/kernel/tce_64.c @@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr) { /* a single tce can't cross a cache line */ if (cpu_has_clflush) - asm volatile("clflush (%0)" :: "r" (tceaddr)); + clflush(tceaddr); else - asm volatile("wbinvd":::"memory"); + wbinvd(); } void tce_build(struct iommu_table *tbl, unsigned long index, diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index c25f23eb397..8caa0b77746 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -44,15 +44,15 @@ int arch_register_cpu(int num) * Also certain PCI quirks require not to enable hotplug control * for all CPU's. */ - if (num && enable_cpu_hotplug) +#ifdef CONFIG_HOTPLUG_CPU + if (num) cpu_devices[num].cpu.hotpluggable = 1; +#endif return register_cpu(&cpu_devices[num].cpu, num); } #ifdef CONFIG_HOTPLUG_CPU -int enable_cpu_hotplug = 1; - void arch_unregister_cpu(int num) { return unregister_cpu(&cpu_devices[num].cpu); } diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index f62815f8d06..9bcc1c6aca3 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S @@ -36,11 +36,11 @@ #include <asm/segment.h> #include <asm/page.h> -.data - /* We can free up trampoline after bootup if cpu hotplug is not supported. */ #ifndef CONFIG_HOTPLUG_CPU .section ".init.data","aw",@progbits +#else +.section .rodata,"a",@progbits #endif .code16 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 607983b0d27..e30b67c6a9f 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -33,7 +33,12 @@ #include <asm/msr.h> #include <asm/segment.h> -.data +/* We can free up trampoline after bootup if cpu hotplug is not supported. */ +#ifndef CONFIG_HOTPLUG_CPU +.section .init.data, "aw", @progbits +#else +.section .rodata, "a", @progbits +#endif .code16 diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 05c27ecaf2a..b132d3957df 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -112,7 +112,7 @@ struct stack_frame { static inline unsigned long print_context_stack(struct thread_info *tinfo, unsigned long *stack, unsigned long ebp, - struct stacktrace_ops *ops, void *data) + const struct stacktrace_ops *ops, void *data) { #ifdef CONFIG_FRAME_POINTER struct stack_frame *frame = (struct stack_frame *)ebp; @@ -149,7 +149,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, - struct stacktrace_ops *ops, void *data) + const struct stacktrace_ops *ops, void *data) { unsigned long ebp = 0; @@ -221,7 +221,7 @@ static void print_trace_address(void *data, unsigned long addr) touch_nmi_watchdog(); } -static struct stacktrace_ops print_trace_ops = { +static const struct stacktrace_ops print_trace_ops = { .warning = print_trace_warning, .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, @@ -398,31 +398,24 @@ void die(const char * str, struct pt_regs * regs, long err) local_save_flags(flags); if (++die.lock_owner_depth < 3) { - int nl = 0; unsigned long esp; unsigned short ss; report_bug(regs->eip, regs); - printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); + printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, + ++die_counter); #ifdef CONFIG_PREEMPT - printk(KERN_EMERG "PREEMPT "); - nl = 1; + printk("PREEMPT "); #endif #ifdef CONFIG_SMP - if (!nl) - printk(KERN_EMERG); printk("SMP "); - nl = 1; #endif #ifdef CONFIG_DEBUG_PAGEALLOC - if (!nl) - printk(KERN_EMERG); printk("DEBUG_PAGEALLOC"); - nl = 1; #endif - if (nl) - printk("\n"); + printk("\n"); + if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) { @@ -1112,20 +1105,6 @@ asmlinkage void math_emulate(long arg) #endif /* CONFIG_MATH_EMULATION */ -#ifdef CONFIG_X86_F00F_BUG -void __init trap_init_f00f_bug(void) -{ - __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); - - /* - * Update the IDT descriptor and reload the IDT so that - * it uses the read-only mapped virtual address. - */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); - load_idt(&idt_descr); -} -#endif - /* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index bc7116acf8f..b4a9b3db199 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -215,7 +215,7 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, - struct stacktrace_ops *ops, void *data) + const struct stacktrace_ops *ops, void *data) { const unsigned cpu = get_cpu(); unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; @@ -336,7 +336,7 @@ static void print_trace_address(void *data, unsigned long addr) printk_address(addr); } -static struct stacktrace_ops print_trace_ops = { +static const struct stacktrace_ops print_trace_ops = { .warning = print_trace_warning, .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index b85ad754f70..e87a3939ed4 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -349,10 +349,10 @@ __cpuinit int unsynchronized_tsc(void) static void __init check_geode_tsc_reliable(void) { - unsigned long val; + unsigned long res_low, res_high; - rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); - if ((val & RTSC_SUSP)) + rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); + if (res_low & RTSC_SUSP) clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; } #else diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 93847d84815..8a67e282cb5 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -78,7 +78,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.sys_tz = sys_tz; - vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } @@ -289,7 +288,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu) unsigned long *d; unsigned long node = 0; #ifdef CONFIG_NUMA - node = cpu_to_node[cpu]; + node = cpu_to_node(cpu); #endif if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) write_rdtscp_aux((node << 12) | cpu); diff --git a/arch/x86/lib/bitstr_64.c b/arch/x86/lib/bitstr_64.c index 24676609a6a..7445caf1b5d 100644 --- a/arch/x86/lib/bitstr_64.c +++ b/arch/x86/lib/bitstr_64.c @@ -14,7 +14,7 @@ find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len) /* could test bitsliced, but it's hardly worth it */ end = n+len; - if (end >= nbits) + if (end > nbits) return -1; for (i = n+1; i < end; i++) { if (test_bit(i, bitmap)) { diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 7767962f25d..57d043fa893 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c @@ -26,27 +26,18 @@ static void __rdmsr_safe_on_cpu(void *info) static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) { int err = 0; - preempt_disable(); - if (smp_processor_id() == cpu) - if (safe) - err = rdmsr_safe(msr_no, l, h); - else - rdmsr(msr_no, *l, *h); - else { - struct msr_info rv; - - rv.msr_no = msr_no; - if (safe) { - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, - &rv, 0, 1); - err = rv.err; - } else { - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); - } - *l = rv.l; - *h = rv.h; + struct msr_info rv; + + rv.msr_no = msr_no; + if (safe) { + smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); + err = rv.err; + } else { + smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); } - preempt_enable(); + *l = rv.l; + *h = rv.h; + return err; } @@ -67,27 +58,18 @@ static void __wrmsr_safe_on_cpu(void *info) static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) { int err = 0; - preempt_disable(); - if (smp_processor_id() == cpu) - if (safe) - err = wrmsr_safe(msr_no, l, h); - else - wrmsr(msr_no, l, h); - else { - struct msr_info rv; - - rv.msr_no = msr_no; - rv.l = l; - rv.h = h; - if (safe) { - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, - &rv, 0, 1); - err = rv.err; - } else { - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); - } + struct msr_info rv; + + rv.msr_no = msr_no; + rv.l = l; + rv.h = h; + if (safe) { + smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); + err = rv.err; + } else { + smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); } - preempt_enable(); + return err; } diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S index 0cde1f80731..05ea55f7140 100644 --- a/arch/x86/lib/rwlock_64.S +++ b/arch/x86/lib/rwlock_64.S @@ -2,7 +2,7 @@ #include <linux/linkage.h> #include <asm/rwlock.h> -#include <asm/alternative-asm.i> +#include <asm/alternative-asm.h> #include <asm/dwarf2.h> /* rdi: pointer to rwlock_t */ diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index c01eb39c0b4..444fba40098 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S @@ -15,8 +15,8 @@ #include <linux/linkage.h> #include <asm/rwlock.h> -#include <asm/alternative-asm.i> -#include <asm/frame.i> +#include <asm/alternative-asm.h> +#include <asm/frame.h> #include <asm/dwarf2.h> /* diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 2c773fefa3d..c2c0504a307 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c @@ -160,26 +160,6 @@ char *strchr(const char * s, int c) EXPORT_SYMBOL(strchr); #endif -#ifdef __HAVE_ARCH_STRRCHR -char *strrchr(const char * s, int c) -{ - int d0, d1; - char * res; - asm volatile( "movb %%al,%%ah\n" - "1:\tlodsb\n\t" - "cmpb %%ah,%%al\n\t" - "jne 2f\n\t" - "leal -1(%%esi),%0\n" - "2:\ttestb %%al,%%al\n\t" - "jne 1b" - :"=g" (res), "=&S" (d0), "=&a" (d1) - :"0" (0),"1" (s),"2" (c) - :"memory"); - return res; -} -EXPORT_SYMBOL(strrchr); -#endif - #ifdef __HAVE_ARCH_STRLEN size_t strlen(const char * s) { diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 1bd82983986..3f08010f351 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -35,7 +35,11 @@ void __init pre_intr_init_hook(void) /* * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { + .handler = no_action, + .mask = CPU_MASK_NONE, + .name = "cascade", +}; /** * intr_init_hook - post gate setup interrupt initialisation diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/mach-es7000/es7000plat.c index ab99072d3f9..f5d6f7d8b86 100644 --- a/arch/x86/mach-es7000/es7000plat.c +++ b/arch/x86/mach-es7000/es7000plat.c @@ -46,11 +46,11 @@ * ES7000 Globals */ -volatile unsigned long *psai = NULL; -struct mip_reg *mip_reg; -struct mip_reg *host_reg; -int mip_port; -unsigned long mip_addr, host_addr; +static volatile unsigned long *psai = NULL; +static struct mip_reg *mip_reg; +static struct mip_reg *host_reg; +static int mip_port; +static unsigned long mip_addr, host_addr; /* * GSI override for ES7000 platforms. @@ -288,28 +288,8 @@ es7000_start_cpu(int cpu, unsigned long eip) } -int -es7000_stop_cpu(int cpu) -{ - int startup; - - if (psai == NULL) - return -1; - - startup= (0x1000000 | cpu); - - while ((*psai & 0xff00ffff) != startup) - ; - - startup = (*psai & 0xff0000) >> 16; - *psai &= 0xffffff; - - return 0; - -} - void __init -es7000_sw_apic() +es7000_sw_apic(void) { if (es7000_plat) { int mip_status; diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index 74f3da63442..4121d155180 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -22,7 +22,7 @@ extern struct genapic apic_default; struct genapic *genapic = &apic_default; -struct genapic *apic_probe[] __initdata = { +static struct genapic *apic_probe[] __initdata = { &apic_summit, &apic_bigsmp, &apic_es7000, diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a0ab4002abc..3bef977cb29 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c @@ -18,7 +18,11 @@ void __init pre_intr_init_hook(void) /* * IRQ2 is cascade interrupt to second interrupt controller */ -static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; +static struct irqaction irq2 = { + .handler = no_action, + .mask = CPU_MASK_NONE, + .name = "cascade", +}; void __init intr_init_hook(void) { diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index b87f8548e75..e4928aa6bdf 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -442,8 +442,8 @@ static __u32 __init setup_trampoline(void) { /* these two are global symbols in trampoline.S */ - extern __u8 trampoline_end[]; - extern __u8 trampoline_data[]; + extern const __u8 trampoline_end[]; + extern const __u8 trampoline_data[]; memcpy((__u8 *)trampoline_base, trampoline_data, trampoline_end - trampoline_data); @@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void) */ irq_enter(); (*func)(info); + __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); if (wait) { mb(); diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index b1e45457d4e..13893772cc4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -103,14 +103,14 @@ extern unsigned long highend_pfn, highstart_pfn; #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) -unsigned long node_remap_start_pfn[MAX_NUMNODES]; +static unsigned long node_remap_start_pfn[MAX_NUMNODES]; unsigned long node_remap_size[MAX_NUMNODES]; -unsigned long node_remap_offset[MAX_NUMNODES]; -void *node_remap_start_vaddr[MAX_NUMNODES]; +static unsigned long node_remap_offset[MAX_NUMNODES]; +static void *node_remap_start_vaddr[MAX_NUMNODES]; void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); -void *node_remap_end_vaddr[MAX_NUMNODES]; -void *node_remap_alloc_vaddr[MAX_NUMNODES]; +static void *node_remap_end_vaddr[MAX_NUMNODES]; +static void *node_remap_alloc_vaddr[MAX_NUMNODES]; static unsigned long kva_start_pfn; static unsigned long kva_pages; /* diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index c686ae20fd6..6555c3d1437 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c @@ -105,7 +105,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, LDT and other horrors are only used in user space. */ if (seg & (1<<2)) { /* Must lock the LDT while reading it. */ - down(¤t->mm->context.sem); + mutex_lock(¤t->mm->context.lock); desc = current->mm->context.ldt; desc = (void *)desc + (seg & ~7); } else { @@ -118,7 +118,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, base = get_desc_base((unsigned long *)desc); if (seg & (1<<2)) { - up(¤t->mm->context.sem); + mutex_unlock(¤t->mm->context.lock); } else put_cpu(); @@ -539,23 +539,22 @@ no_context: printk(KERN_ALERT "BUG: unable to handle kernel paging" " request"); printk(" at virtual address %08lx\n",address); - printk(KERN_ALERT " printing eip:\n"); - printk("%08lx\n", regs->eip); + printk(KERN_ALERT "printing eip: %08lx ", regs->eip); page = read_cr3(); page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; #ifdef CONFIG_X86_PAE - printk(KERN_ALERT "*pdpt = %016Lx\n", page); + printk("*pdpt = %016Lx ", page); if ((page >> PAGE_SHIFT) < max_low_pfn && page & _PAGE_PRESENT) { page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)]; - printk(KERN_ALERT "*pde = %016Lx\n", page); + printk(KERN_ALERT "*pde = %016Lx ", page); page &= ~_PAGE_NX; } #else - printk(KERN_ALERT "*pde = %08lx\n", page); + printk("*pde = %08lx ", page); #endif /* @@ -569,8 +568,10 @@ no_context: page &= PAGE_MASK; page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)]; - printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page); + printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); } + + printk("\n"); } tsk->thread.cr2 = address; diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 33d367a3432..c7d19471261 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -85,13 +85,20 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { - pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + pte_t *page_table = NULL; + +#ifdef CONFIG_DEBUG_PAGEALLOC + page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); +#endif + if (!page_table) + page_table = + (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } - + return pte_offset_kernel(pmd, 0); } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 6da23552226..5eec5e56d07 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -166,7 +166,7 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end, return __va(mem); ptr = __alloc_bootmem_nopanic(size, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); - if (ptr == 0) { + if (ptr == NULL) { printk(KERN_ERR "Cannot find %lu bytes in node %d\n", size, nodeid); return NULL; @@ -261,7 +261,7 @@ void __init numa_init_array(void) We round robin the existing nodes. */ rr = first_node(node_online_map); for (i = 0; i < NR_CPUS; i++) { - if (cpu_to_node[i] != NUMA_NO_NODE) + if (cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); rr = next_node(rr, node_online_map); @@ -543,7 +543,7 @@ __cpuinit void numa_add_cpu(int cpu) void __cpuinit numa_set_node(int cpu, int node) { cpu_pda(cpu)->nodenumber = node; - cpu_to_node[cpu] = node; + cpu_to_node(cpu) = node; } unsigned long __init numa_free_all_bootmem(void) diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 4241a74d16c..260073c0760 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c @@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, static void cache_flush_page(struct page *p) { - unsigned long adr = (unsigned long)page_address(p); + void *adr = page_address(p); int i; for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (adr + i)); + clflush(adr+i); } static void flush_kernel_map(void *arg) diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 10b9809ce82..8a4f65bf956 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c @@ -65,7 +65,7 @@ static void cache_flush_page(void *adr) { int i; for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (adr + i)); + clflush(adr+i); } static void flush_kernel_map(void *arg) @@ -148,6 +148,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, split = split_large_page(address, prot, ref_prot2); if (!split) return -ENOMEM; + pgprot_val(ref_prot2) &= ~_PAGE_NX; set_pte(kpte, mk_pte(split, ref_prot2)); kpte_page = split; } diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index ef1f6cd3ea6..be61a1d845a 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> @@ -39,6 +40,8 @@ void show_mem(void) for_each_online_pgdat(pgdat) { pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; ++i) { + if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) + touch_nmi_watchdog(); page = pgdat_page_nr(pgdat, i); total++; if (PageHighMem(page)) @@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) } pte = pte_offset_kernel(pmd, vaddr); if (pgprot_val(flags)) - /* <pfn,flags> stored as-is, to permit clearing entries */ - set_pte(pte, pfn_pte(pfn, flags)); + set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags)); else pte_clear(&init_mm, vaddr, pte); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index acdf03e1914..56089ccc394 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -431,9 +431,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) setup_node_bootmem(i, nodes[i].start, nodes[i].end); for (i = 0; i < NR_CPUS; i++) { - if (cpu_to_node[i] == NUMA_NO_NODE) + if (cpu_to_node(i) == NUMA_NO_NODE) continue; - if (!node_isset(cpu_to_node[i], node_possible_map)) + if (!node_isset(cpu_to_node(i), node_possible_map)) numa_set_node(i, NUMA_NO_NODE); } numa_init_array(); diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 11b7a51566a..2d0eeac7251 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -269,7 +269,6 @@ static void nmi_cpu_shutdown(void * dummy) apic_write(APIC_LVTPC, saved_lvtpc[cpu]); apic_write(APIC_LVTERR, v); nmi_restore_registers(msrs); - model->shutdown(msrs); } @@ -278,6 +277,7 @@ static void nmi_shutdown(void) nmi_enabled = 0; on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); unregister_die_notifier(&profile_exceptions_nb); + model->shutdown(cpu_msrs); free_msrs(); } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 2d71bbc411d..f4386990b15 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -289,6 +289,22 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"), }, }, + { + .callback = set_bf_sort, + .ident = "HP ProLiant DL385 G2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), + }, + }, + { + .callback = set_bf_sort, + .ident = "HP ProLiant DL585 G2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), + }, + }, #ifdef __i386__ { .callback = assign_all_busses, diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index dcd6bb9e0bb..7a2ba458393 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -13,7 +13,7 @@ vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) $(obj)/vdso.o: $(obj)/vdso.so -targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o +targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o # The DSO images are built using a special linker script. quiet_cmd_syscall = SYSCALL $@ @@ -26,12 +26,19 @@ vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \ $(call ld-option, -Wl$(comma)--hash-style=sysv) \ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 SYSCFLAGS_vdso.so = $(vdso-flags) +SYSCFLAGS_vdso.so.dbg = $(vdso-flags) $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so $(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE + +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE $(call if_changed,syscall) +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 $(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL) @@ -47,3 +54,11 @@ $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o SYSCFLAGS_vdso-syms.o = -r -d $(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE $(call if_changed,syscall) + +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ +vdso.so: + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index b9a60e665d0..667d3245d97 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S @@ -26,13 +26,16 @@ SECTIONS is insufficient, ld -shared will barf. Just increase it here. */ . = VDSO_PRELINK + VDSO_TEXT_OFFSET; - .text : { *(.text) } :text - .text.ptr : { *(.text.ptr) } :text - . = VDSO_PRELINK + 0x900; - .data : { *(.data) } :text - .bss : { *(.bss) } :text + .text : { *(.text*) } :text + .rodata : { *(.rodata*) } :text + .data : { + *(.data*) + *(.sdata*) + *(.bss*) + *(.dynbss*) + } :text - .altinstructions : { *(.altinstructions) } :text + .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text .note : { *(.note.*) } :text :note @@ -42,7 +45,6 @@ SECTIONS .useless : { *(.got.plt) *(.got) *(.gnu.linkonce.d.*) - *(.dynbss) *(.gnu.linkonce.b.*) } :text } diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c index 6fc22219a47..1b7e703684f 100644 --- a/arch/x86/vdso/vvar.c +++ b/arch/x86/vdso/vvar.c @@ -8,5 +8,5 @@ #include <asm/timex.h> #include <asm/vgtod.h> -#define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC; +#define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC; #include "vextern.h" diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d53bf9d8a72..c1b131bcdcb 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -356,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) */ irq_enter(); (*func)(info); + __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); if (wait) { |