aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 16:38:03 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 16:38:03 -0700
commita7aed1c2dc4939d1d61285c738ad32700d791692 (patch)
treea64cda4c4dd29137a09f06a8c1d5db7cd20e7da5 /include
parent1212663fba7c5e003e05d24f043d5ed57eb18b24 (diff)
parent1b82ba6e47c13ee369a4808f72d003499f8c7920 (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits) x86: Add HPET force support for MCP55 (nForce 5) chipsets x86: Force enable HPET for CK804 (nForce 4) chipsets x86: clean up setup.h and the boot code x86: Save registers in saved_context during suspend and hibernation x86: merge setup_32/64.h x86: merge signal_32/64.h x86: merge required-features.h x86: merge sigcontext_32/64.h x86: merge msr_32/64.h x86: merge mttr_32/64.h x86: merge statfs_32/64.h x86: merge stat_32/64.h x86: merge shmbuf_32/64.h x86: merge ptrace_32/64.h x86: merge msgbuf_32/64.h x86: merge elf_32/64.h x86: merge byteorder_32/64.h x86: whitespace cleanup of mce_64.c x86: consolidate the cpu/ related code usage x86: prepare consolidation of cpu/ related code usage ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/Kbuild27
-rw-r--r--include/asm-x86/a.out.h33
-rw-r--r--include/asm-x86/a.out_32.h27
-rw-r--r--include/asm-x86/a.out_64.h28
-rw-r--r--include/asm-x86/apic_64.h1
-rw-r--r--include/asm-x86/bitops_32.h43
-rw-r--r--include/asm-x86/bitops_64.h42
-rw-r--r--include/asm-x86/byteorder.h81
-rw-r--r--include/asm-x86/byteorder_32.h58
-rw-r--r--include/asm-x86/byteorder_64.h33
-rw-r--r--include/asm-x86/div64.h60
-rw-r--r--include/asm-x86/div64_32.h52
-rw-r--r--include/asm-x86/div64_64.h1
-rw-r--r--include/asm-x86/elf.h299
-rw-r--r--include/asm-x86/elf_32.h165
-rw-r--r--include/asm-x86/elf_64.h180
-rw-r--r--include/asm-x86/mmu.h26
-rw-r--r--include/asm-x86/mmu_32.h18
-rw-r--r--include/asm-x86/mmu_64.h21
-rw-r--r--include/asm-x86/msgbuf.h50
-rw-r--r--include/asm-x86/msgbuf_32.h31
-rw-r--r--include/asm-x86/msgbuf_64.h27
-rw-r--r--include/asm-x86/msr.h357
-rw-r--r--include/asm-x86/msr_32.h161
-rw-r--r--include/asm-x86/msr_64.h187
-rw-r--r--include/asm-x86/mtrr.h175
-rw-r--r--include/asm-x86/mtrr_32.h115
-rw-r--r--include/asm-x86/mtrr_64.h152
-rw-r--r--include/asm-x86/ptrace.h151
-rw-r--r--include/asm-x86/ptrace_32.h65
-rw-r--r--include/asm-x86/ptrace_64.h80
-rw-r--r--include/asm-x86/required-features.h73
-rw-r--r--include/asm-x86/required-features_32.h55
-rw-r--r--include/asm-x86/required-features_64.h46
-rw-r--r--include/asm-x86/setup.h72
-rw-r--r--include/asm-x86/setup_32.h63
-rw-r--r--include/asm-x86/setup_64.h19
-rw-r--r--include/asm-x86/shmbuf.h62
-rw-r--r--include/asm-x86/shmbuf_32.h42
-rw-r--r--include/asm-x86/shmbuf_64.h38
-rw-r--r--include/asm-x86/sigcontext.h149
-rw-r--r--include/asm-x86/sigcontext_32.h85
-rw-r--r--include/asm-x86/sigcontext_64.h55
-rw-r--r--include/asm-x86/signal.h273
-rw-r--r--include/asm-x86/signal_32.h232
-rw-r--r--include/asm-x86/signal_64.h181
-rw-r--r--include/asm-x86/smp_64.h2
-rw-r--r--include/asm-x86/stat.h123
-rw-r--r--include/asm-x86/stat_32.h77
-rw-r--r--include/asm-x86/stat_64.h44
-rw-r--r--include/asm-x86/statfs.h72
-rw-r--r--include/asm-x86/statfs_32.h6
-rw-r--r--include/asm-x86/statfs_64.h58
-rw-r--r--include/asm-x86/suspend_64.h23
54 files changed, 1998 insertions, 2598 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 5e3539c129b..12db5a1cdd7 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -11,41 +11,16 @@ header-y += sigcontext32.h
header-y += ucontext.h
header-y += vsyscall32.h
-unifdef-y += a.out_32.h
-unifdef-y += a.out_64.h
-unifdef-y += byteorder_32.h
-unifdef-y += byteorder_64.h
unifdef-y += e820.h
-unifdef-y += elf_32.h
-unifdef-y += elf_64.h
unifdef-y += ist.h
unifdef-y += mce.h
-unifdef-y += msgbuf_32.h
-unifdef-y += msgbuf_64.h
-unifdef-y += msr_32.h
-unifdef-y += msr_64.h
unifdef-y += msr.h
-unifdef-y += mtrr_32.h
-unifdef-y += mtrr_64.h
unifdef-y += mtrr.h
unifdef-y += page_32.h
unifdef-y += page_64.h
unifdef-y += posix_types_32.h
unifdef-y += posix_types_64.h
-unifdef-y += ptrace_32.h
-unifdef-y += ptrace_64.h
-unifdef-y += setup_32.h
-unifdef-y += setup_64.h
-unifdef-y += shmbuf_32.h
-unifdef-y += shmbuf_64.h
-unifdef-y += sigcontext_32.h
-unifdef-y += sigcontext_64.h
-unifdef-y += signal_32.h
-unifdef-y += signal_64.h
-unifdef-y += stat_32.h
-unifdef-y += stat_64.h
-unifdef-y += statfs_32.h
-unifdef-y += statfs_64.h
+unifdef-y += ptrace.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
unifdef-y += user_32.h
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
index 5bc9b1d3b22..a62443e38eb 100644
--- a/include/asm-x86/a.out.h
+++ b/include/asm-x86/a.out.h
@@ -1,13 +1,30 @@
+#ifndef _ASM_X86_A_OUT_H
+#define _ASM_X86_A_OUT_H
+
+struct exec
+{
+ unsigned int a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
#ifdef __KERNEL__
+# include <linux/thread_info.h>
+# define STACK_TOP TASK_SIZE
# ifdef CONFIG_X86_32
-# include "a.out_32.h"
+# define STACK_TOP_MAX STACK_TOP
# else
-# include "a.out_64.h"
-# endif
-#else
-# ifdef __i386__
-# include "a.out_32.h"
-# else
-# include "a.out_64.h"
+# define STACK_TOP_MAX TASK_SIZE64
# endif
#endif
+
+#endif /* _ASM_X86_A_OUT_H */
diff --git a/include/asm-x86/a.out_32.h b/include/asm-x86/a.out_32.h
deleted file mode 100644
index 851a60f8258..00000000000
--- a/include/asm-x86/a.out_32.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __I386_A_OUT_H__
-#define __I386_A_OUT_H__
-
-struct exec
-{
- unsigned long a_info; /* Use macros N_MAGIC, etc for access */
- unsigned a_text; /* length of text, in bytes */
- unsigned a_data; /* length of data, in bytes */
- unsigned a_bss; /* length of uninitialized data area for file, in bytes */
- unsigned a_syms; /* length of symbol table data in file, in bytes */
- unsigned a_entry; /* start address */
- unsigned a_trsize; /* length of relocation info for text, in bytes */
- unsigned a_drsize; /* length of relocation info for data, in bytes */
-};
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#ifdef __KERNEL__
-
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86/a.out_64.h b/include/asm-x86/a.out_64.h
deleted file mode 100644
index e789300e41a..00000000000
--- a/include/asm-x86/a.out_64.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __X8664_A_OUT_H__
-#define __X8664_A_OUT_H__
-
-/* 32bit a.out */
-
-struct exec
-{
- unsigned int a_info; /* Use macros N_MAGIC, etc for access */
- unsigned a_text; /* length of text, in bytes */
- unsigned a_data; /* length of data, in bytes */
- unsigned a_bss; /* length of uninitialized data area for file, in bytes */
- unsigned a_syms; /* length of symbol table data in file, in bytes */
- unsigned a_entry; /* start address */
- unsigned a_trsize; /* length of relocation info for text, in bytes */
- unsigned a_drsize; /* length of relocation info for data, in bytes */
-};
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#ifdef __KERNEL__
-#include <linux/thread_info.h>
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX TASK_SIZE64
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86/apic_64.h b/include/asm-x86/apic_64.h
index 3c8f21eef0b..2747a11a2b1 100644
--- a/include/asm-x86/apic_64.h
+++ b/include/asm-x86/apic_64.h
@@ -69,6 +69,7 @@ extern void clear_local_APIC (void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (int virt_wire_setup);
extern void disable_local_APIC (void);
+extern void lapic_shutdown (void);
extern int verify_local_APIC (void);
extern void cache_APIC_registers (void);
extern void sync_Arb_IDs (void);
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 3268a341cf4..36ebb5b02b4 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
:"Ir" (nr));
}
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ clear_bit(nr, addr);
+}
+
static inline void __clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__(
@@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
:"+m" (ADDR)
:"Ir" (nr));
}
+
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ __clear_bit(nr, addr);
+}
+
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
@@ -146,6 +179,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
}
/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -406,7 +448,6 @@ static inline int fls(int x)
}
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index dacaa5f1feb..b4d47940b95 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
:"dIr" (nr));
}
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ clear_bit(nr, addr);
+}
+
static __inline__ void __clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
@@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
:"dIr" (nr));
}
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ __clear_bit(nr, addr);
+}
+
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
@@ -137,6 +169,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
}
/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -412,7 +453,6 @@ static __inline__ int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index eb14b1870ed..1f2d6d5bf20 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -1,13 +1,72 @@
-#ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-# include "byteorder_32.h"
-# else
-# include "byteorder_64.h"
-# endif
+#ifndef _ASM_X86_BYTEORDER_H
+#define _ASM_X86_BYTEORDER_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+
+#ifdef __i386__
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+#ifdef CONFIG_X86_BSWAP
+ __asm__("bswap %0" : "=r" (x) : "0" (x));
#else
-# ifdef __i386__
-# include "byteorder_32.h"
-# else
-# include "byteorder_64.h"
-# endif
+ __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ :"=q" (x)
+ : "0" (x));
#endif
+ return x;
+}
+
+static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
+{
+ union {
+ struct { __u32 a,b; } s;
+ __u64 u;
+ } v;
+ v.u = val;
+#ifdef CONFIG_X86_BSWAP
+ asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ : "=r" (v.s.a), "=r" (v.s.b)
+ : "0" (v.s.a), "1" (v.s.b));
+#else
+ v.s.a = ___arch__swab32(v.s.a);
+ v.s.b = ___arch__swab32(v.s.b);
+ asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+#endif
+ return v.u;
+}
+
+#else /* __i386__ */
+
+static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
+{
+ __asm__("bswapq %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ __asm__("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+#endif
+
+/* Do not define swab16. Gcc is smart enough to recognize "C" version and
+ convert it into rotation or exhange. */
+
+#define __arch__swab64(x) ___arch__swab64(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_X86_BYTEORDER_H */
diff --git a/include/asm-x86/byteorder_32.h b/include/asm-x86/byteorder_32.h
deleted file mode 100644
index a45470a8b74..00000000000
--- a/include/asm-x86/byteorder_32.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _I386_BYTEORDER_H
-#define _I386_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-/* For avoiding bswap on i386 */
-#ifdef __KERNEL__
-#endif
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
-#ifdef CONFIG_X86_BSWAP
- __asm__("bswap %0" : "=r" (x) : "0" (x));
-#else
- __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
- "rorl $16,%0\n\t" /* swap words */
- "xchgb %b0,%h0" /* swap higher bytes */
- :"=q" (x)
- : "0" (x));
-#endif
- return x;
-}
-
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
-{
- union {
- struct { __u32 a,b; } s;
- __u64 u;
- } v;
- v.u = val;
-#ifdef CONFIG_X86_BSWAP
- asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
- : "=r" (v.s.a), "=r" (v.s.b)
- : "0" (v.s.a), "1" (v.s.b));
-#else
- v.s.a = ___arch__swab32(v.s.a);
- v.s.b = ___arch__swab32(v.s.b);
- asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
-#endif
- return v.u;
-}
-
-/* Do not define swab16. Gcc is smart enough to recognize "C" version and
- convert it into rotation or exhange. */
-
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-#define __BYTEORDER_HAS_U64__
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/little_endian.h>
-
-#endif /* _I386_BYTEORDER_H */
diff --git a/include/asm-x86/byteorder_64.h b/include/asm-x86/byteorder_64.h
deleted file mode 100644
index 5e86c868c75..00000000000
--- a/include/asm-x86/byteorder_64.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _X86_64_BYTEORDER_H
-#define _X86_64_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
- __asm__("bswapq %0" : "=r" (x) : "0" (x));
- return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
- __asm__("bswapl %0" : "=r" (x) : "0" (x));
- return x;
-}
-
-/* Do not define swab16. Gcc is smart enough to recognize "C" version and
- convert it into rotation or exhange. */
-
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab64(x) ___arch__swab64(x)
-
-#endif /* __GNUC__ */
-
-#define __BYTEORDER_HAS_U64__
-
-#include <linux/byteorder/little_endian.h>
-
-#endif /* _X86_64_BYTEORDER_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index 8ac7da6ca28..e98d16e7a37 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -1,5 +1,59 @@
+#ifndef _ASM_X86_DIV64_H
+#define _ASM_X86_DIV64_H
+
#ifdef CONFIG_X86_32
-# include "div64_32.h"
+
+#include <linux/types.h>
+
+/*
+ * do_div() is NOT a C function. It wants to return
+ * two values (the quotient and the remainder), but
+ * since that doesn't work very well in C, what it
+ * does is:
+ *
+ * - modifies the 64-bit dividend _in_place_
+ * - returns the 32-bit remainder
+ *
+ * This ends up being the most efficient "calling
+ * convention" on x86.
+ */
+#define do_div(n,base) ({ \
+ unsigned long __upper, __low, __high, __mod, __base; \
+ __base = (base); \
+ asm("":"=a" (__low), "=d" (__high):"A" (n)); \
+ __upper = __high; \
+ if (__high) { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
+ asm("":"=A" (n):"a" (__low),"d" (__high)); \
+ __mod; \
+})
+
+/*
+ * (long)X = ((long long)divs) / (long)div
+ * (long)rem = ((long long)divs) % (long)div
+ *
+ * Warning, this will do an exception if X overflows.
+ */
+#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
+
+static inline long
+div_ll_X_l_rem(long long divs, long div, long *rem)
+{
+ long dum2;
+ __asm__("divl %2":"=a"(dum2), "=d"(*rem)
+ : "rm"(div), "A"(divs));
+
+ return dum2;
+
+}
+
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+
#else
-# include "div64_64.h"
-#endif
+# include <asm-generic/div64.h>
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_X86_DIV64_H */
diff --git a/include/asm-x86/div64_32.h b/include/asm-x86/div64_32.h
deleted file mode 100644
index 438e980068b..00000000000
--- a/include/asm-x86/div64_32.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __I386_DIV64
-#define __I386_DIV64
-
-#include <linux/types.h>
-
-/*
- * do_div() is NOT a C function. It wants to return
- * two values (the quotient and the remainder), but
- * since that doesn't work very well in C, what it
- * does is:
- *
- * - modifies the 64-bit dividend _in_place_
- * - returns the 32-bit remainder
- *
- * This ends up being the most efficient "calling
- * convention" on x86.
- */
-#define do_div(n,base) ({ \
- unsigned long __upper, __low, __high, __mod, __base; \
- __base = (base); \
- asm("":"=a" (__low), "=d" (__high):"A" (n)); \
- __upper = __high; \
- if (__high) { \
- __upper = __high % (__base); \
- __high = __high / (__base); \
- } \
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
- asm("":"=A" (n):"a" (__low),"d" (__high)); \
- __mod; \
-})
-
-/*
- * (long)X = ((long long)divs) / (long)div
- * (long)rem = ((long long)divs) % (long)div
- *
- * Warning, this will do an exception if X overflows.
- */
-#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
-
-static inline long
-div_ll_X_l_rem(long long divs, long div, long *rem)
-{
- long dum2;
- __asm__("divl %2":"=a"(dum2), "=d"(*rem)
- : "rm"(div), "A"(divs));
-
- return dum2;
-
-}
-
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-#endif
diff --git a/include/asm-x86/div64_64.h b/include/asm-x86/div64_64.h
deleted file mode 100644
index 6cd978cefb2..00000000000
--- a/include/asm-x86/div64_64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index ed6bb6e546b..ec42a4d2e83 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -1,13 +1,290 @@
-#ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-# include "elf_32.h"
-# else
-# include "elf_64.h"
-# endif
+#ifndef _ASM_X86_ELF_H
+#define _ASM_X86_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/auxvec.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_i387_struct elf_fpregset_t;
+
+#ifdef __i386__
+
+typedef struct user_fxsr_struct elf_fpxregset_t;
+
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_386
+
#else
-# ifdef __i386__
-# include "elf_32.h"
-# else