diff options
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/Kbuild | 11 | ||||
-rw-r--r-- | include/asm-x86_64/alternative.h | 21 | ||||
-rw-r--r-- | include/asm-x86_64/calgary.h | 11 | ||||
-rw-r--r-- | include/asm-x86_64/elf.h | 20 | ||||
-rw-r--r-- | include/asm-x86_64/irqflags.h | 141 | ||||
-rw-r--r-- | include/asm-x86_64/kdebug.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/kprobes.h | 1 | ||||
-rw-r--r-- | include/asm-x86_64/page.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/percpu.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/processor.h | 6 | ||||
-rw-r--r-- | include/asm-x86_64/signal.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/spinlock.h | 11 | ||||
-rw-r--r-- | include/asm-x86_64/swiotlb.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/system.h | 39 | ||||
-rw-r--r-- | include/asm-x86_64/tce.h | 8 | ||||
-rw-r--r-- | include/asm-x86_64/unistd.h | 11 | ||||
-rw-r--r-- | include/asm-x86_64/unwind.h | 1 | ||||
-rw-r--r-- | include/asm-x86_64/vsyscall.h | 3 |
18 files changed, 200 insertions, 94 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild new file mode 100644 index 00000000000..dc4d101e8a1 --- /dev/null +++ b/include/asm-x86_64/Kbuild @@ -0,0 +1,11 @@ +include include/asm-generic/Kbuild.asm + +ALTARCH := i386 +ARCHDEF := defined __x86_64__ +ALTARCHDEF := defined __i386__ + +header-y += boot.h bootsetup.h cpufeature.h debugreg.h ldt.h \ + msr.h prctl.h setup.h sigcontext32.h ucontext.h \ + vsyscall32.h + +unifdef-y += mce.h mtrr.h vsyscall.h diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index aa67bfd1b3c..a584826cc57 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h @@ -4,6 +4,7 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <asm/cpufeature.h> struct alt_instr { u8 *instr; /* original instruction */ @@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {} /* * Alternative inline assembly for SMP. * - * alternative_smp() takes two versions (SMP first, UP second) and is - * for more complex stuff such as spinlocks. - * * The LOCK_PREFIX macro defined here replaces the LOCK and * LOCK_PREFIX macros used everywhere in the source tree. * @@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {} */ #ifdef CONFIG_SMP -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile ("661:\n\t" smpinstr "\n662:\n" \ - ".section .smp_altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte 0x66\n" /* X86_FEATURE_UP */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .smp_altinstr_replacement,\"awx\"\n" \ - "663:\n\t" upinstr "\n" /* replacement */ \ - "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ - ".previous" : args) - #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 8\n" \ @@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {} "661:\n\tlock; " #else /* ! CONFIG_SMP */ -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile (upinstr : args) #define LOCK_PREFIX "" #endif diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index 6e1654f3098..4e391952424 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h @@ -1,8 +1,10 @@ /* * Derived from include/asm-powerpc/iommu.h * - * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation - * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation + * Copyright (C) IBM Corporation, 2006 + * + * Author: Jon Mason <jdmason@us.ibm.com> + * Author: Muli Ben-Yehuda <muli@il.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; } static inline void detect_calgary(void) { return; } #endif -static inline unsigned int bus_to_phb(unsigned char busno) -{ - return ((busno % 15 == 0) ? 0 : busno / 2 + 1); -} - #endif /* _ASM_X86_64_CALGARY_H */ diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index b4f8f4a41a6..a406fcb1e92 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h @@ -7,8 +7,6 @@ #include <asm/ptrace.h> #include <asm/user.h> -#include <asm/processor.h> -#include <asm/compat.h> /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ @@ -39,18 +37,23 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; /* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) \ - ((x)->e_machine == EM_X86_64) - -/* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS64 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_X86_64 +#ifdef __KERNEL__ +#include <asm/processor.h> +#include <asm/compat.h> + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + ((x)->e_machine == EM_X86_64) + + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for @@ -141,7 +144,6 @@ typedef struct user_i387_struct elf_fpregset_t; /* I'm not sure if we can use '-' here */ #define ELF_PLATFORM ("x86_64") -#ifdef __KERNEL__ extern void set_personality_64bit(void); #define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() /* diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h new file mode 100644 index 00000000000..cce6937e87c --- /dev/null +++ b/include/asm-x86_64/irqflags.h @@ -0,0 +1,141 @@ +/* + * include/asm-x86_64/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() functions from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ +/* + * Interrupt control: + */ + +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__( + "# __raw_save_flags\n\t" + "pushfq ; popq %q0" + : "=g" (flags) + : /* no input */ + : "memory" + ); + + return flags; +} + +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) + +static inline void raw_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "pushq %0 ; popfq" + : /* no output */ + :"g" (flags) + :"memory", "cc" + ); +} + +#ifdef CONFIG_X86_VSMP + +/* + * Interrupt control for the VSMP architecture: + */ + +static inline void raw_local_irq_disable(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); +} + +static inline void raw_local_irq_enable(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1<<9)) || (flags & (1 << 18)); +} + +#else /* CONFIG_X86_VSMP */ + +static inline void raw_local_irq_disable(void) +{ + __asm__ __volatile__("cli" : : : "memory"); +} + +static inline void raw_local_irq_enable(void) +{ + __asm__ __volatile__("sti" : : : "memory"); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1 << 9)); +} + +#endif + +/* + * For spinlocks, etc.: + */ + +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_disable(); + + return flags; +} + +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) + +static inline int raw_irqs_disabled(void) +{ + unsigned long flags = __raw_local_save_flags(); + + return raw_irqs_disabled_flags(flags); +} + +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static inline void raw_safe_halt(void) +{ + __asm__ __volatile__("sti; hlt" : : : "memory"); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static inline void halt(void) +{ + __asm__ __volatile__("hlt": : :"memory"); +} + +#else /* __ASSEMBLY__: */ +# ifdef CONFIG_TRACE_IRQFLAGS +# define TRACE_IRQS_ON call trace_hardirqs_on_thunk +# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk +# else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +# endif +#endif + +#endif diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index cd52c7f33bc..2b0c088e295 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -49,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str, return atomic_notifier_call_chain(&die_chain, val, &args); } -extern int printk_address(unsigned long address); +extern void printk_address(unsigned long address); extern void die(const char *,struct pt_regs *,long); extern void __die(const char *,struct pt_regs *,long); extern void show_registers(struct pt_regs *regs); diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index d36febd9bb1..cf5317898fb 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -47,6 +47,7 @@ typedef u8 kprobe_opcode_t; void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); +#define flush_insn_slot(p) do { } while (0) /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index f7bf875aae4..10f346165ca 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -19,7 +19,7 @@ #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) -#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER +#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 549eb929b2c..08dd9f9dda8 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -14,6 +14,8 @@ #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) +#define per_cpu_offset(x) (__per_cpu_offset(x)) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 3b3c1217fe6..de9c3147ee4 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -232,8 +232,14 @@ struct tss_struct { unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; } __attribute__((packed)) ____cacheline_aligned; + extern struct cpuinfo_x86 boot_cpu_data; DECLARE_PER_CPU(struct tss_struct,init_tss); +/* Save the original ist values for checking stack pointers during debugging */ +struct orig_ist { + unsigned long ist[7]; +}; +DECLARE_PER_CPU(struct orig_ist, orig_ist); #ifdef CONFIG_X86_VSMP #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index cef7a7d51b7..3ede2a61973 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -3,13 +3,13 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> -#include <linux/linkage.h> #include <linux/time.h> /* Avoid too many header ordering problems. */ struct siginfo; #ifdef __KERNEL__ +#include <linux/linkage.h> /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 8d3421996f9..248a79f0eaf 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -21,7 +21,7 @@ #define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decl %0\n\t" \ + LOCK_PREFIX " ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ @@ -40,10 +40,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) { - alternative_smp( - __raw_spin_lock_string, - __raw_spin_lock_string_up, - "=m" (lock->slock) : : "memory"); + asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) static inline void __raw_read_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" + asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" : "=m" (rw->lock) : : "memory"); } diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index 5f9a0180582..ba94ab3d267 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h @@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size, extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); +extern int swiotlb_force; + #ifdef CONFIG_SWIOTLB extern int swiotlb; #else diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 68e559f3631..6bf170bceae 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -240,47 +240,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #endif #define read_barrier_depends() do {} while(0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) -/* interrupt control.. */ -#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) -#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") - -#ifdef CONFIG_X86_VSMP -/* Interrupt control for VSMP architecture */ -#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) -#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & (1<<18)) || !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) -#else /* CONFIG_X86_VSMP */ -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) -#endif - -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() __asm__ __volatile__("hlt": : :"memory") +#include <linux/irqflags.h> void cpu_idle_wait(void); diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h index ee51d31528d..53e9a68b333 100644 --- a/include/asm-x86_64/tce.h +++ b/include/asm-x86_64/tce.h @@ -1,9 +1,11 @@ /* - * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation - * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation - * * This file is derived from asm-powerpc/tce.h. * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Muli Ben-Yehuda <muli@il.ibm.com> + * Author: Jon Mason <jdmason@us.ibm.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 94387c915e5..80fd48e84bb 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -821,8 +821,6 @@ asmlinkage long sys_fork(struct pt_regs regs); asmlinkage long sys_vfork(struct pt_regs regs); asmlinkage long sys_pipe(int *fildes); -#endif /* __KERNEL_SYSCALLS__ */ - #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -838,9 +836,9 @@ asmlinkage long sys_rt_sigaction(int sig, struct sigaction __user *oact, size_t sigsetsize); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLY__ */ -#endif /* __NO_STUBS */ +#endif /* __KERNEL_SYSCALLS__ */ /* * "Conditional" syscalls @@ -850,5 +848,8 @@ asmlinkage long sys_rt_sigaction(int sig, */ #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#endif /* __NO_STUBS */ + #endif /* __KERNEL__ */ -#endif + +#endif /* _ASM_X86_64_UNISTD_H_ */ diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index f3e7124effe..1f6e9bfb569 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h @@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) #else #define UNW_PC(frame) ((void)(frame), 0) +#define UNW_SP(frame) ((void)(frame), 0) static inline int arch_unw_user_mode(const void *info) { diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index a85e16f56d7..146b24402a5 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_64_VSYSCALL_H_ #define _ASM_X86_64_VSYSCALL_H_ -#include <linux/seqlock.h> - enum vsyscall_num { __NR_vgettimeofday, __NR_vtime, @@ -14,6 +12,7 @@ enum vsyscall_num { #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) #ifdef __KERNEL__ +#include <linux/seqlock.h> #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) #define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) |