diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/Kbuild | 1 | ||||
-rw-r--r-- | include/asm-i386/dma-mapping.h | 9 | ||||
-rw-r--r-- | include/asm-i386/fixmap.h | 7 | ||||
-rw-r--r-- | include/asm-i386/mmzone.h | 6 | ||||
-rw-r--r-- | include/asm-i386/pgtable-2level.h | 3 | ||||
-rw-r--r-- | include/asm-i386/pgtable-3level.h | 4 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 45 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 60 | ||||
-rw-r--r-- | include/asm-i386/ptrace-abi.h | 39 | ||||
-rw-r--r-- | include/asm-i386/ptrace.h | 35 | ||||
-rw-r--r-- | include/asm-i386/sync_bitops.h | 156 | ||||
-rw-r--r-- | include/asm-i386/system.h | 36 |
12 files changed, 302 insertions, 99 deletions
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild index b75a348d0c1..147e4ac1ebf 100644 --- a/include/asm-i386/Kbuild +++ b/include/asm-i386/Kbuild @@ -3,6 +3,7 @@ include include/asm-generic/Kbuild.asm header-y += boot.h header-y += debugreg.h header-y += ldt.h +header-y += ptrace-abi.h header-y += ucontext.h unifdef-y += mtrr.h diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index 9cf20cacf76..576ae01d71c 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h @@ -21,8 +21,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { - if (direction == DMA_NONE) - BUG(); + BUG_ON(direction == DMA_NONE); WARN_ON(size == 0); flush_write_buffers(); return virt_to_phys(ptr); @@ -32,8 +31,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { - if (direction == DMA_NONE) - BUG(); + BUG_ON(direction == DMA_NONE); } static inline int @@ -42,8 +40,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, { int i; - if (direction == DMA_NONE) - BUG(); + BUG_ON(direction == DMA_NONE); WARN_ON(nents == 0 || sg[0].length == 0); for (i = 0; i < nents; i++ ) { diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index a48cc3f7ccc..02428cb3662 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h @@ -19,7 +19,11 @@ * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ -#define __FIXADDR_TOP 0xfffff000 +#ifndef CONFIG_COMPAT_VDSO +extern unsigned long __FIXADDR_TOP; +#else +#define __FIXADDR_TOP 0xfffff000 +#endif #ifndef __ASSEMBLY__ #include <linux/kernel.h> @@ -93,6 +97,7 @@ enum fixed_addresses { extern void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags); +extern void reserve_top_address(unsigned long reserve); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 22cb07cc8f3..61b07332200 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -38,10 +38,16 @@ static inline void get_memcfg_numa(void) } extern int early_pfn_to_nid(unsigned long pfn); +extern void numa_kva_reserve(void); #else /* !CONFIG_NUMA */ + #define get_memcfg_numa get_memcfg_numa_flat #define get_zholes_size(n) (0) + +static inline void numa_kva_reserve(void) +{ +} #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 2756d4b04c2..201c86a6711 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -21,8 +21,9 @@ #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) -#define pte_same(a, b) ((a).pte_low == (b).pte_low) + #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index dccb1b3337a..0d899173232 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -77,7 +77,7 @@ static inline void pud_clear (pud_t * pud) { } #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) -#define pud_page_kernel(pud) \ +#define pud_page_vaddr(pud) \ ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) @@ -105,6 +105,7 @@ static inline void pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t res; @@ -117,6 +118,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return res; } +#define __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 09697fec3d2..0dc051a8078 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p # include <asm/pgtable-2level.h> #endif +/* + * We only update the dirty/accessed state if we set + * the dirty bit by hand in the kernel, since the hardware + * will do the accessed bit for us, and we don't want to + * race with other CPU's that might be updating the dirty + * bit at the same time. + */ +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ +do { \ + if (dirty) { \ + (ptep)->pte_low = (entry).pte_low; \ + flush_tlb_page(vma, address); \ + } \ +} while (0) + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) @@ -253,6 +270,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) @@ -260,6 +278,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); } +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; @@ -272,6 +291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long return pte; } +#define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); @@ -364,11 +384,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) -#define pmd_page_kernel(pmd) \ +#define pmd_page_vaddr(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* @@ -411,23 +431,8 @@ extern void noexec_setup(const char *str); /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. - * - * Also, we only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. */ #define update_mmu_cache(vma,address,pte) do { } while (0) -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ - do { \ - if (__dirty) { \ - (__ptep)->pte_low = (__entry).pte_low; \ - flush_tlb_page(__vma, __address); \ - } \ - } while (0) - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM @@ -441,12 +446,6 @@ extern void noexec_setup(const char *str); #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> #endif /* _I386_PGTABLE_H */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index b32346d62e1..2277127696d 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -143,6 +143,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ +static inline void __cpuid(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + /* ecx is often an input as well as an output. */ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx)); +} + /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx @@ -150,24 +162,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op), "c"(0)); + *eax = op; + *ecx = 0; + __cpuid(eax, ebx, ecx, edx); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) + int *edx) { - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op), "c" (count)); + *eax = op; + *ecx = count; + __cpuid(eax, ebx, ecx, edx); } /* @@ -175,42 +181,30 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, */ static inline unsigned int cpuid_eax(unsigned int op) { - unsigned int eax; + unsigned int eax, ebx, ecx, edx; - __asm__("cpuid" - : "=a" (eax) - : "0" (op) - : "bx", "cx", "dx"); + cpuid(op, &eax, &ebx, &ecx, &edx); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { - unsigned int eax, ebx; + unsigned int eax, ebx, ecx, edx; - __asm__("cpuid" - : "=a" (eax), "=b" (ebx) - : "0" (op) - : "cx", "dx" ); + cpuid(op, &eax, &ebx, &ecx, &edx); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { - unsigned int eax, ecx; + unsigned int eax, ebx, ecx, edx; - __asm__("cpuid" - : "=a" (eax), "=c" (ecx) - : "0" (op) - : "bx", "dx" ); + cpuid(op, &eax, &ebx, &ecx, &edx); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { - unsigned int eax, edx; + unsigned int eax, ebx, ecx, edx; - __asm__("cpuid" - : "=a" (eax), "=d" (edx) - : "0" (op) - : "bx", "cx"); + cpuid(op, &eax, &ebx, &ecx, &edx); return edx; } diff --git a/include/asm-i386/ptrace-abi.h b/include/asm-i386/ptrace-abi.h new file mode 100644 index 00000000000..a44901817a2 --- /dev/null +++ b/include/asm-i386/ptrace-abi.h @@ -0,0 +1,39 @@ +#ifndef I386_PTRACE_ABI_H +#define I386_PTRACE_ABI_H + +#define EBX 0 +#define ECX 1 +#define EDX 2 +#define ESI 3 +#define EDI 4 +#define EBP 5 +#define EAX 6 +#define DS 7 +#define ES 8 +#define FS 9 +#define GS 10 +#define ORIG_EAX 11 +#define EIP 12 +#define CS 13 +#define EFL 14 +#define UESP 15 +#define SS 16 +#define FRAME_SIZE 17 + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 +#define PTRACE_GETFPXREGS 18 +#define PTRACE_SETFPXREGS 19 + +#define PTRACE_OLDSETOPTIONS 21 + +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#endif diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index f324c53b6f9..1910880fcd4 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -1,24 +1,7 @@ #ifndef _I386_PTRACE_H #define _I386_PTRACE_H -#define EBX 0 -#define ECX 1 -#define EDX 2 -#define ESI 3 -#define EDI 4 -#define EBP 5 -#define EAX 6 -#define DS 7 -#define ES 8 -#define FS 9 -#define GS 10 -#define ORIG_EAX 11 -#define EIP 12 -#define CS 13 -#define EFL 14 -#define UESP 15 -#define SS 16 -#define FRAME_SIZE 17 +#include <asm/ptrace-abi.h> /* this struct defines the way the registers are stored on the stack during a system call. */ @@ -41,22 +24,6 @@ struct pt_regs { int xss; }; -/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 -#define PTRACE_GETFPXREGS 18 -#define PTRACE_SETFPXREGS 19 - -#define PTRACE_OLDSETOPTIONS 21 - -#define PTRACE_GET_THREAD_AREA 25 -#define PTRACE_SET_THREAD_AREA 26 - -#define PTRACE_SYSEMU 31 -#define PTRACE_SYSEMU_SINGLESTEP 32 - #ifdef __KERNEL__ #include <asm/vm86.h> diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h new file mode 100644 index 00000000000..c94d51c993e --- /dev/null +++ b/include/asm-i386/sync_bitops.h @@ -0,0 +1,156 @@ +#ifndef _I386_SYNC_BITOPS_H +#define _I386_SYNC_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +#define ADDR (*(volatile long *) addr) + +/** + * sync_set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writting portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void sync_set_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__("lock; btsl %1,%0" + :"+m" (ADDR) + :"Ir" (nr) + : "memory"); +} + +/** + * sync_clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * sync_clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void sync_clear_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__("lock; btrl %1,%0" + :"+m" (ADDR) + :"Ir" (nr) + : "memory"); +} + +/** + * sync_change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void sync_change_bit(int nr, volatile unsigned long * addr) +{ + __asm__ __volatile__("lock; btcl %1,%0" + :"+m" (ADDR) + :"Ir" (nr) + : "memory"); +} + +/** + * sync_test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +/** + * sync_test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +/** + * sync_test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) +{ + int oldbit; + + __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit),"+m" (ADDR) + :"Ir" (nr) : "memory"); + return oldbit; +} + +static __always_inline int sync_const_test_bit(int nr, const volatile unsigned long *addr) +{ + return ((1UL << (nr & 31)) & + (((const volatile unsigned int *)addr)[nr >> 5])) != 0; +} + +static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) +{ + int oldbit; + + __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" + :"=r" (oldbit) + :"m" (ADDR),"Ir" (nr)); + return oldbit; +} + +#define sync_test_bit(nr,addr) \ + (__builtin_constant_p(nr) ? \ + sync_constant_test_bit((nr),(addr)) : \ + sync_var_test_bit((nr),(addr))) + +#undef ADDR + +#endif /* _I386_SYNC_BITOPS_H */ diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 098bcee94e3..a6dabbcd6e6 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -267,6 +267,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) +#define sync_cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, @@ -296,6 +299,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return old; } +/* + * Always use locked operations when touching memory shared with a + * hypervisor, since the system may be SMP even if the guest kernel + * isn't. + */ +static inline unsigned long __sync_cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to |