diff options
114 files changed, 5748 insertions, 3362 deletions
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 3e1e498fea9..fe5cef8976c 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -9,6 +9,7 @@ Required properties: - compatible : should be one of "arm,armv8-pmuv3" "arm,cortex-a15-pmu" + "arm,cortex-a12-pmu" "arm,cortex-a9-pmu" "arm,cortex-a8-pmu" "arm,cortex-a7-pmu" @@ -16,7 +17,14 @@ Required properties: "arm,arm11mpcore-pmu" "arm,arm1176-pmu" "arm,arm1136-pmu" -- interrupts : 1 combined interrupt or 1 per core. + "qcom,krait-pmu" +- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu + interrupt (PPI) then 1 interrupt should be specified. + +Optional properties: + +- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd + events. Example: diff --git a/arch/Kconfig b/arch/Kconfig index 80bbb8ccd0d..97ff872c7ac 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -86,9 +86,7 @@ config KPROBES_ON_FTRACE optimize on top of function tracing. config UPROBES - bool "Transparent user-space probes (EXPERIMENTAL)" - depends on UPROBE_EVENT && PERF_EVENTS - default n + def_bool n select PERCPU_RWSEM help Uprobes is the user-space counterpart to kprobes: they @@ -101,8 +99,6 @@ config UPROBES managed by the kernel and kept transparent to the probed application. ) - If in doubt, say "N". - config HAVE_64BIT_ALIGNED_ACCESS def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 503da0a2a8e..6b4ac5de6de 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -207,6 +207,9 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool y +config ARCH_SUPPORTS_UPROBES + def_bool y + config ARCH_HAS_DMA_SET_COHERENT_MASK bool @@ -2271,7 +2274,7 @@ source "kernel/power/Kconfig" config ARCH_SUSPEND_POSSIBLE depends on !ARCH_S5PC100 depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \ - CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK + CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK def_bool y config ARM_CPU_SUSPEND diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 4bdc41622c3..70b1eff477b 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o +CFLAGS_REMOVE_mcpm_entry.o = -pg AFLAGS_mcpm_head.o := -march=armv7-a AFLAGS_vlock.o := -march=armv7-a obj-$(CONFIG_TI_PRIV_EDMA) += edma.o diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index a5c3dc38aa1..6ef146edd0c 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@ -232,8 +232,6 @@ static int scoop_probe(struct platform_device *pdev) return 0; - if (devptr->gpio.base != -1) - temp = gpiochip_remove(&devptr->gpio); err_gpio: platform_set_drvdata(pdev, NULL); err_ioremap: diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 5c228516057..380ac4f2000 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -30,8 +30,8 @@ * Endian independent macros for shifting bytes within registers. */ #ifndef __ARMEB__ -#define pull lsr -#define push lsl +#define lspull lsr +#define lspush lsl #define get_byte_0 lsl #0 #define get_byte_1 lsr #8 #define get_byte_2 lsr #16 @@ -41,8 +41,8 @@ #define put_byte_2 lsl #16 #define put_byte_3 lsl #24 #else -#define pull lsl -#define push lsr +#define lspull lsl +#define lspush lsr #define get_byte_0 lsr #24 #define get_byte_1 lsr #16 #define get_byte_2 lsr #8 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 62d2cb53b06..9a92fd7864a 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" @@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%3]\n" @@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int oldval, newval; + unsigned long tmp; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; +} + #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP @@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#endif /* __LINUX_ARM_ARCH__ */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) @@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) unsigned long tmp; smp_mb(); + prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" @@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" @@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) int ret = 1; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" |