aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/futex.h137
-rw-r--r--arch/arm/include/asm/hardware/timer-sp.h4
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/sizes.h42
-rw-r--r--arch/arm/include/asm/smp.h6
-rw-r--r--arch/arm/include/asm/spinlock.h2
9 files changed, 111 insertions, 101 deletions
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index ca51143f97f..42005542932 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -6,8 +6,10 @@
/*
* This is the maximum virtual address which can be DMA'd from.
*/
-#ifndef MAX_DMA_ADDRESS
+#ifndef ARM_DMA_ZONE_SIZE
#define MAX_DMA_ADDRESS 0xffffffff
+#else
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
#endif
#ifdef CONFIG_ISA_DMA_API
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c3cd8755e64..0e9ce8d9686 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -108,6 +108,7 @@ struct task_struct;
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 199a6b6de7f..8c73900da9e 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,16 +3,74 @@
#ifdef __KERNEL__
+#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+/* ARM doesn't provide unprivileged exclusive memory accessors */
+#include <asm-generic/futex.h>
+#else
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_ex_table(err_reg) \
+ "3:\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ "4: mov %0, " err_reg "\n" \
+ " b 3b\n" \
+ " .popsection"
+
#ifdef CONFIG_SMP
-#include <asm-generic/futex.h>
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldrex %1, [%2]\n" \
+ " " insn "\n" \
+ "2: strex %1, %0, [%2]\n" \
+ " teq %1, #0\n" \
+ " bne 1b\n" \
+ " mov %0, #0\n" \
+ __futex_atomic_ex_table("%4") \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ smp_mb();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrex %1, [%4]\n"
+ " teq %1, %2\n"
+ " ite eq @ explicit IT needed for the 2b label\n"
+ "2: strexeq %0, %3, [%4]\n"
+ " movne %0, #0\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ __futex_atomic_ex_table("%5")
+ : "=&r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+ smp_mb();
+
+ *uval = val;
+ return ret;
+}
#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
-#include <linux/futex.h>
#include <linux/preempt.h>
-#include <linux/uaccess.h>
-#include <asm/errno.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
@@ -21,20 +79,38 @@
" " insn "\n" \
"2: " T(str) " %0, [%2]\n" \
" mov %0, #0\n" \
- "3:\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4f, 2b, 4f\n" \
- " .popsection\n" \
- " .pushsection .fixup,\"ax\"\n" \
- "4: mov %0, %4\n" \
- " b 3b\n" \
- " .popsection" \
+ __futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " T(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+ " it eq @ explicit IT needed for the 2b label\n"
+ "2: " T(streq) " %3, [%4]\n"
+ __futex_atomic_ex_table("%5")
+ : "+r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* !SMP */
+
+static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
@@ -87,39 +163,6 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret = 0;
- u32 val;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " T(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
- " it eq @ explicit IT needed for the 2b label\n"
- "2: " T(streq) " %3, [%4]\n"
- "3:\n"
- " .pushsection __ex_table,\"a\"\n"
- " .align 3\n"
- " .long 1b, 4f, 2b, 4f\n"
- " .popsection\n"
- " .pushsection .fixup,\"ax\"\n"
- "4: mov %0, %5\n"
- " b 3b\n"
- " .popsection"
- : "+r" (ret), "=&r" (val)
- : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
- : "cc", "memory");
-
- *uval = val;
- return ret;
-}
-
-#endif /* !SMP */
-
+#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h
index 21e75e30d49..4384d81eee7 100644
--- a/arch/arm/include/asm/hardware/timer-sp.h
+++ b/arch/arm/include/asm/hardware/timer-sp.h
@@ -1,2 +1,2 @@
-void sp804_clocksource_init(void __iomem *);
-void sp804_clockevents_init(void __iomem *, unsigned int);
+void sp804_clocksource_init(void __iomem *, const char *);
+void sp804_clockevents_init(void __iomem *, unsigned int, const char *);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 431077c5a86..af44a8fb348 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -209,14 +209,10 @@ static inline unsigned long __phys_to_virt(unsigned long x)
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-#ifndef ISA_DMA_THRESHOLD
+#ifndef ARM_DMA_ZONE_SIZE
#define ISA_DMA_THRESHOLD (0xffffffffULL)
-#endif
-
-#ifndef arch_adjust_zones
-#define arch_adjust_zones(size,holes) do { } while (0)
-#elif !defined(CONFIG_ZONE_DMA)
-#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
+#else
+#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
#endif
/*
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index a8ff22b2a39..312d10877bd 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -128,6 +128,12 @@ struct pt_regs {
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
+/*
+ * The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS
+ * and core dumps.
+ */
+#define ARM_VFPREGS_SIZE ( 32 * 8 /*fpregs*/ + 4 /*fpscr*/ )
+
#ifdef __KERNEL__
#define user_mode(regs) \
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
index 316bb2b2be3..154b89b81d3 100644
--- a/arch/arm/include/asm/sizes.h
+++ b/arch/arm/include/asm/sizes.h
@@ -16,44 +16,6 @@
/* Size definitions
* Copyright (C) ARM Limited 1998. All rights reserved.
*/
+#include <asm-generic/sizes.h>
-#ifndef __sizes_h
-#define __sizes_h 1
-
-/* handy sizes */
-#define SZ_16 0x00000010
-#define SZ_32 0x00000020
-#define SZ_64 0x00000040
-#define SZ_128 0x00000080
-#define SZ_256 0x00000100
-#define SZ_512 0x00000200
-
-#define SZ_1K 0x00000400
-#define SZ_2K 0x00000800
-#define SZ_4K 0x00001000
-#define SZ_8K 0x00002000
-#define SZ_16K 0x00004000
-#define SZ_32K 0x00008000
-#define SZ_64K 0x00010000
-#define SZ_128K 0x00020000
-#define SZ_256K 0x00040000
-#define SZ_512K 0x00080000
-
-#define SZ_1M 0x00100000
-#define SZ_2M 0x00200000
-#define SZ_4M 0x00400000
-#define SZ_8M 0x00800000
-#define SZ_16M 0x01000000
-#define SZ_32M 0x02000000
-#define SZ_48M 0x03000000
-#define SZ_64M 0x04000000
-#define SZ_128M 0x08000000
-#define SZ_256M 0x10000000
-#define SZ_512M 0x20000000
-
-#define SZ_1G 0x40000000
-#define SZ_2G 0x80000000
-
-#endif
-
-/* END */
+#define SZ_48M (SZ_32M + SZ_16M)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 96ed521f240..a87664f54f9 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -14,8 +14,6 @@
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#include <mach/smp.h>
-
#ifndef CONFIG_SMP
# error "<asm/smp.h> included in non-SMP build"
#endif
@@ -47,9 +45,9 @@ extern void smp_init_cpus(void);
/*
- * Raise an IPI cross call on CPUs in callmap.
+ * Provide a function to raise an IPI cross call on CPUs in callmap.
*/
-extern void smp_cross_call(const struct cpumask *mask, int ipi);
+extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
/*
* Boot a secondary CPU, and assign it the specified idle task.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index fdd3820edff..65fa3c88095 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,8 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+#include <asm/processor.h>
+
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.