diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2009-06-08 19:27:13 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-06-08 19:27:13 +0100 |
commit | 7698fdedcfa3cab3dd40c9b685590b23be02e267 (patch) | |
tree | 207ddae0f963174356b1a9de7fff8e48a9c1e9c1 /arch/arm | |
parent | 2d8d24935d372175786ebefa8a2de8680831b67f (diff) | |
parent | ae5c8c83735f5fcb09b380944e4854a383006998 (diff) |
Merge branch 'for-rmk' of git://git.marvell.com/orion into devel
Diffstat (limited to 'arch/arm')
61 files changed, 1279 insertions, 318 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 28248b5d6a7..646a5d5eb8c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -394,6 +394,7 @@ config ARCH_KIRKWOOD select CPU_FEROCEON select PCI select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB select GENERIC_TIME select GENERIC_CLOCKEVENTS select PLAT_ORION @@ -415,6 +416,7 @@ config ARCH_MV78XX0 select CPU_FEROCEON select PCI select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB select GENERIC_TIME select GENERIC_CLOCKEVENTS select PLAT_ORION @@ -428,6 +430,7 @@ config ARCH_ORION5X select CPU_FEROCEON select PCI select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB select GENERIC_TIME select GENERIC_CLOCKEVENTS select PLAT_ORION diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig index dcf8153a947..0a1abb978d7 100644 --- a/arch/arm/configs/kirkwood_defconfig +++ b/arch/arm/configs/kirkwood_defconfig @@ -182,6 +182,7 @@ CONFIG_ARCH_KIRKWOOD=y CONFIG_MACH_DB88F6281_BP=y CONFIG_MACH_RD88F6192_NAS=y CONFIG_MACH_RD88F6281=y +CONFIG_MACH_MV88F6281GTW_GE=y CONFIG_MACH_SHEEVAPLUG=y CONFIG_MACH_TS219=y CONFIG_PLAT_ORION=y @@ -270,7 +271,9 @@ CONFIG_CMDLINE="" # # CPU Power Management # -# CONFIG_CPU_IDLE is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y # # Floating point emulation diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index 5b98f764511..9e2385293ec 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig @@ -903,7 +903,8 @@ CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 # CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=m +CONFIG_HW_RANDOM_TIMERIOMEM=m # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set # CONFIG_RAW_DRIVER is not set diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 6116e4893c0..15f8a092b70 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -114,3 +114,16 @@ .align 3; \ .long 9999b,9001f; \ .previous + +/* + * SMP data memory barrier + */ + .macro smp_dmb +#ifdef CONFIG_SMP +#if __LINUX_ARM_ARCH__ >= 7 + dmb +#elif __LINUX_ARM_ARCH__ == 6 + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif +#endif + .endm diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index ee99723b3a6..16b52f39798 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i) : "cc"); } +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%2]\n" +" add %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + static inline int atomic_add_return(int i, atomic_t *v) { unsigned long tmp; int result; + smp_mb(); + __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%2]\n" " add %0, %0, %3\n" @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v) : "r" (&v->counter), "Ir" (i) : "cc"); + smp_mb(); + return result; } +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%2]\n" +" sub %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + static inline int atomic_sub_return(int i, atomic_t *v) { unsigned long tmp; int result; + smp_mb(); + __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%2]\n" " sub %0, %0, %3\n" @@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v) : "r" (&v->counter), "Ir" (i) : "cc"); + smp_mb(); + return result; } @@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { unsigned long oldval, res; + smp_mb(); + do { __asm__ __volatile__("@ atomic_cmpxchg\n" "ldrex %1, [%2]\n" @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); + smp_mb(); + return oldval; } @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v) return val; } +#define atomic_add(i, v) (void) atomic_add_return(i, v) static inline int atomic_sub_return(int i, atomic_t *v) { @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) return val; } +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_add(i, v) (void) atomic_add_return(i, v) -#define atomic_inc(v) (void) atomic_add_return(1, v) -#define atomic_sub(i, v) (void) atomic_sub_return(i, v) -#define atomic_dec(v) (void) atomic_sub_return(1, v) +#define atomic_inc(v) atomic_add(1, v) +#define atomic_dec(v) atomic_sub(1, v) #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -/* Atomic operations are already serializing on ARM */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #include <asm-generic/atomic.h> #endif diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h index 1d77e51907f..59426a4595c 100644 --- a/arch/arm/include/asm/flat.h +++ b/arch/arm/include/asm/flat.h @@ -5,9 +5,6 @@ #ifndef __ARM_FLAT_H__ #define __ARM_FLAT_H__ -/* An odd number of words will be pushed after this alignment, so - deliberately misalign the value. */ -#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4) #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h index ada93a8fc2e..4fc1565e4f9 100644 --- a/arch/arm/include/asm/sizes.h +++ b/arch/arm/include/asm/sizes.h @@ -29,6 +29,7 @@ #define SZ_512 0x00000200 #define SZ_1K 0x00000400 +#define SZ_2K 0x00000800 #define SZ_4K 0x00001000 #define SZ_8K 0x00002000 #define SZ_16K 0x00004000 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index bd4dc8ed53d..d65b2f5bf41 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size unsigned int tmp; #endif + smp_mb(); + switch (size) { #if __LINUX_ARM_ARCH__ >= 6 case 1: @@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size __bad_xchg(ptr, size), ret = 0; break; } + smp_mb(); return ret; } @@ -316,6 +319,12 @@ extern void enable_hlt(void); #include <asm-generic/cmpxchg-local.h> +#if __LINUX_ARM_ARCH__ < 6 + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. @@ -329,6 +338,173 @@ extern void enable_hlt(void); #include <asm-generic/cmpxchg.h> #endif +#else /* __LINUX_ARM_ARCH__ >= 6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifdef CONFIG_CPU_32v6K + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif /* CONFIG_CPU_32v6K */ + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + switch (size) { +#ifndef CONFIG_CPU_32v6K + case 1: + case 2: + ret = __cmpxchg_local_generic(ptr, old, new, size); + break; +#endif /* !CONFIG_CPU_32v6K */ + default: + ret = __cmpxchg(ptr, old, new, size); + } + + return ret; +} + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#ifdef CONFIG_CPU_32v6K + +/* + * Note : ARMv7-M (currently unsupported by Linux) does not support + * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should + * not be allowed to use __cmpxchg64. + */ +static inline unsigned long long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + register unsigned long long oldval asm("r0"); + register unsigned long long __old asm("r2") = old; + register unsigned long long __new asm("r4") = new; + unsigned long res; + + do { + asm volatile( + " @ __cmpxchg8\n" + " ldrexd %1, %H1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " teqeq %H1, %H3\n" + " strexdeq %0, %4, %H4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (__old), "r" (__new) + : "memory", "cc"); + } while (res); + + return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long ret; + + smp_mb(); + ret = __cmpxchg64(ptr, old, new); + smp_mb(); + + return ret; +} + +#define cmpxchg64(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#else /* !CONFIG_CPU_32v6K */ + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif /* CONFIG_CPU_32v6K */ + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #endif /* __ASSEMBLY__ */ #define arch_align_stack(x) (x) diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f..950391f194c 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,6 +78,15 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; +#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) + /* + * If we have support for OABI programs, we can never allow NX + * support - our signal syscall restart mechanism relies upon + * being able to execute code placed on the user stack. + */ + return 1; +#else return 0; +#endif } EXPORT_SYMBOL(arm_elf_read_implies_exec); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d662a2f1fd8..83b1da6b7ba 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -815,10 +815,7 @@ __kuser_helper_start: */ __kuser_memory_barrier: @ 0xffff0fa0 - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) - mcr p15, 0, r0, c7, c10, 5 @ dmb -#endif + smp_dmb usr_ret lr .align 5 diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 2e787d40d59..c7f2627385e 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -18,12 +18,14 @@ mov r2, #1 add r1, r1, r0, lsr #3 @ Get byte offset mov r3, r2, lsl r3 @ create mask + smp_dmb 1: ldrexb r2, [r1] ands r0, r2, r3 @ save old value of bit \instr r2, r2, r3 @ toggle bit strexb ip, r2, [r1] cmp ip, #0 bne 1b + smp_dmb cmp r0, #0 movne r0, #1 2: mov pc, lr diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h index de6752674c0..213a4fcfeb1 100644 --- a/arch/arm/mach-gemini/include/mach/hardware.h +++ b/arch/arm/mach-gemini/include/mach/hardware.h @@ -15,10 +15,9 @@ /* * Memory Map definitions */ -/* FIXME: Does it really swap SRAM like this? */ #ifdef CONFIG_GEMINI_MEM_SWAP # define GEMINI_DRAM_BASE 0x00000000 -# define GEMINI_SRAM_BASE 0x20000000 +# define GEMINI_SRAM_BASE 0x70000000 #else # define GEMINI_SRAM_BASE 0x00000000 # define GEMINI_DRAM_BASE 0x10000000 diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig index b5421cccd7e..25100f7acf4 100644 --- a/arch/arm/mach-kirkwood/Kconfig +++ b/arch/arm/mach-kirkwood/Kconfig @@ -20,6 +20,12 @@ config MACH_RD88F6281 Say 'Y' here if you want your kernel to support the Marvell RD-88F6281 Reference Board. +config MACH_MV88F6281GTW_GE + bool "Marvell 88F6281 GTW GE Board" + help + Say 'Y' here if you want your kernel to support the + Marvell 88F6281 GTW GE Board. + config MACH_SHEEVAPLUG bool "Marvell SheevaPlug Reference Board" help diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile index 8f03c9b9bdd..9dd680e964d 100644 --- a/arch/arm/mach-kirkwood/Makefile +++ b/arch/arm/mach-kirkwood/Makefile @@ -3,5 +3,8 @@ obj-y += common.o addr-map.o irq.o pcie.o mpp.o obj-$(CONFIG_MACH_DB88F6281_BP) += db88f6281-bp-setup.o obj-$(CONFIG_MACH_RD88F6192_NAS) += rd88f6192-nas-setup.o obj-$(CONFIG_MACH_RD88F6281) += rd88f6281-setup.o +obj-$(CONFIG_MACH_MV88F6281GTW_GE) += mv88f6281gtw_ge-setup.o obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o obj-$(CONFIG_MACH_TS219) += ts219-setup.o + +obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/arm/mach-kirkwood/addr-map.c b/arch/arm/mach-kirkwood/addr-map.c index 5db4f0bbe5e..1da5d1c18ec 100644 --- a/arch/arm/mach-kirkwood/addr-map.c +++ b/arch/arm/mach-kirkwood/addr-map.c @@ -20,6 +20,7 @@ */ #define TARGET_DDR 0 #define TARGET_DEV_BUS 1 +#define TARGET_SRAM 3 #define TARGET_PCIE 4 #define ATTR_DEV_SPI_ROM 0x1e #define ATTR_DEV_BOOT 0x1d @@ -30,6 +31,7 @@ #define ATTR_DEV_CS0 0x3e #define ATTR_PCIE_IO 0xe0 #define ATTR_PCIE_MEM 0xe8 +#define ATTR_SRAM 0x01 /* * Helpers to get DDR bank info @@ -48,7 +50,6 @@ struct mbus_dram_target_info kirkwood_mbus_dram_info; -static int __initdata win_alloc_count; static int __init cpu_win_can_remap(int win) { @@ -112,7 +113,11 @@ void __init kirkwood_setup_cpu_mbus(void) setup_cpu_win(2, KIRKWOOD_NAND_MEM_PHYS_BASE, KIRKWOOD_NAND_MEM_SIZE, TARGET_DEV_BUS, ATTR_DEV_NAND, -1); - win_alloc_count = 3; + /* + * Setup window for SRAM. + */ + setup_cpu_win(3, KIRKWOOD_SRAM_PHYS_BASE, KIRKWOOD_SRAM_SIZE, + TARGET_SRAM, ATTR_SRAM, -1); /* * Setup MBUS dram target info. @@ -140,8 +145,3 @@ void __init kirkwood_setup_cpu_mbus(void) } kirkwood_mbus_dram_info.num_cs = cs; } - -void __init kirkwood_setup_sram_win(u32 base, u32 size) -{ - setup_cpu_win(win_alloc_count++, base, size, 0x03, 0x00, -1); -} diff --git a |