aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Kconfig.debug41
-rw-r--r--arch/arm/common/mcpm_entry.c15
-rw-r--r--arch/arm/common/mcpm_platsmp.c27
-rw-r--r--arch/arm/common/timer-sp.c2
-rw-r--r--arch/arm/include/asm/atomic.h76
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/mcpm.h31
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h3
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/spinlock.h8
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/msm.S5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/setup.c24
-rw-r--r--arch/arm/kernel/smp.c19
-rw-r--r--arch/arm/kernel/smp_tlb.c36
-rw-r--r--arch/arm/kvm/arm.c6
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c41
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c8
-rw-r--r--arch/arm/mach-vexpress/dcscb.c56
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c48
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--include/linux/amba/bus.h2
35 files changed, 467 insertions, 279 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1ad6fb6c094..df0c609272e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,7 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
@@ -1091,11 +1092,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config XSCALE_PMU
- bool
- depends on CPU_XSCALE
- default y
-
config MULTI_IRQ_HANDLER
bool
help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 9762c84b419..a8f305b07f2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -318,6 +318,7 @@ choice
config DEBUG_MSM_UART1
bool "Kernel low-level debugging messages via MSM UART1"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the first serial port on MSM devices.
@@ -325,6 +326,7 @@ choice
config DEBUG_MSM_UART2
bool "Kernel low-level debugging messages via MSM UART2"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the second serial port on MSM devices.
@@ -332,6 +334,7 @@ choice
config DEBUG_MSM_UART3
bool "Kernel low-level debugging messages via MSM UART3"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the third serial port on MSM devices.
@@ -340,6 +343,7 @@ choice
bool "Kernel low-level debugging messages via MSM 8660 UART"
depends on ARCH_MSM8X60
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8660 devices.
@@ -348,10 +352,20 @@ choice
bool "Kernel low-level debugging messages via MSM 8960 UART"
depends on ARCH_MSM8960
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8960 devices.
+ config DEBUG_MSM8974_UART
+ bool "Kernel low-level debugging messages via MSM 8974 UART"
+ depends on ARCH_MSM8974
+ select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8974 devices.
+
config DEBUG_MVEBU_UART
bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
depends on ARCH_MVEBU
@@ -834,6 +848,20 @@ choice
options; the platform specific options are deprecated
and will be soon removed.
+ config DEBUG_LL_UART_EFM32
+ bool "Kernel low-level debugging via efm32 UART"
+ depends on ARCH_EFM32
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to an UART or USART port on efm32 based
+ machines. Use the following addresses for DEBUG_UART_PHYS:
+
+ 0x4000c000 | USART0
+ 0x4000c400 | USART1
+ 0x4000c800 | USART2
+ 0x4000e000 | UART0
+ 0x4000e400 | UART1
+
config DEBUG_LL_UART_PL01X
bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
help
@@ -880,11 +908,16 @@ config DEBUG_STI_UART
bool
depends on ARCH_STI
+config DEBUG_MSM_UART
+ bool
+ depends on ARCH_MSM
+
config DEBUG_LL_INCLUDE
string
default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
+ default "debug/efm32.S" if DEBUG_LL_UART_EFM32
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
@@ -895,11 +928,7 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX53_UART ||\
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART
- default "debug/msm.S" if DEBUG_MSM_UART1 || \
- DEBUG_MSM_UART2 || \
- DEBUG_MSM_UART3 || \
- DEBUG_MSM8660_UART || \
- DEBUG_MSM8960_UART
+ default "debug/msm.S" if DEBUG_MSM_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
default "debug/sti.S" if DEBUG_STI_UART
@@ -951,6 +980,7 @@ config DEBUG_UART_PHYS
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
default 0x20201000 if DEBUG_BCM2835
+ default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40090000 if ARCH_LPC32XX
default 0x40100000 if DEBUG_PXA_UART1
default 0x42000000 if ARCH_GEMINI
@@ -981,6 +1011,7 @@ config DEBUG_UART_PHYS
default 0xfff36000 if DEBUG_HIGHBANK_UART
default 0xfffff700 if ARCH_IOP33X
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X
config DEBUG_UART_VIRT
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 990250965f2..6c03d0152e7 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -90,6 +90,21 @@ void mcpm_cpu_power_down(void)
BUG();
}
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+ int ret;
+
+ if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
+ return -EUNATCH;
+
+ ret = platform_ops->power_down_finish(cpu, cluster);
+ if (ret)
+ pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
+ __func__, cpu, cluster, ret);
+
+ return ret;
+}
+
void mcpm_cpu_suspend(u64 expected_residency)
{
phys_reset_t phys_reset;
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 1bc34c7567f..177251a4dd9 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,14 +19,23 @@
#include <asm/smp.h>
#include <asm/smp_plat.h>
+static void cpu_to_pcpu(unsigned int cpu,
+ unsigned int *pcpu, unsigned int *pcluster)
+{
+ unsigned int mpidr;
+
+ mpidr = cpu_logical_map(cpu);
+ *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned int mpidr, pcpu, pcluster, ret;
+ unsigned int pcpu, pcluster, ret;
extern void secondary_startup(void);
- mpidr = cpu_logical_map(cpu);
- pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
__func__, cpu, pcpu, pcluster);
@@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
+static int mcpm_cpu_kill(unsigned int cpu)
+{
+ unsigned int pcpu, pcluster;
+
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
+ return !mcpm_cpu_power_down_finish(pcpu, pcluster);
+}
+
static int mcpm_cpu_disable(unsigned int cpu)
{
/*
@@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.smp_boot_secondary = mcpm_boot_secondary,
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_kill = mcpm_cpu_kill,
.cpu_disable = mcpm_cpu_disable,
.cpu_die = mcpm_cpu_die,
#endif
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index e901d0f3e0b..ce922d0ea7a 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
static struct irqaction sp804_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sp804_timer_interrupt,
.dev_id = &sp804_clockevent,
};
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d3932..f8a4336ed8f 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
- unsigned long oldval, res;
+ int oldval;
+ unsigned long res;
smp_mb();
@@ -134,21 +135,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
- : "cc");
-}
-
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -197,15 +183,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- *addr &= ~mask;
- raw_local_irq_restore(flags);
-}
-
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +215,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- u64 __aligned(8) counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
- u64 result;
+ long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
@@ -257,7 +234,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -266,9 +243,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
);
}
#else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
- u64 result;
+ long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -279,9 +256,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
{
- u64 tmp;
+ long long tmp;
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
@@ -294,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
}
#endif
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
__asm__ __volatile__("@ atomic64_add\n"
@@ -311,9 +288,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
@@ -334,9 +311,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
return result;
}
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
__asm__ __volatile__("@ atomic64_sub\n"
@@ -351,9 +328,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
@@ -374,9 +351,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
return result;
}
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+ long long new)
{
- u64 oldval;
+ long long oldval;
unsigned long res;
smp_mb();
@@ -398,9 +376,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
return oldval;
}
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
@@ -419,9 +397,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
return result;
}
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
@@ -445,9 +423,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- u64 val;
+ long long val;
unsigned long tmp;
int ret = 1;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5bf887..ee753f1749c 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540..df2fbba7efc 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e978d50..acdde76b39b 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df6..3d7351c844a 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88f5b6..1cf26010a6f 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -81,10 +81,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
+ * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
+ * make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
+
+/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +156,7 @@ int mcpm_cpu_powered_up(void);
struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
+ int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386e..86a659a1952 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd) (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f..39c54cfa03e 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f0560950..8d6a089dfb7 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
+extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9..ed6c22919e4 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 38960264040..def9e570199 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
- /*
- * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
- */
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
- return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
/*
* flush_pmd_entry
*
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
#endif
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 00000000000..2265a199280
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD 0x000c
+#define UARTn_CMD_TXEN 0x0004