aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild6
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/arch_timer.h121
-rw-r--r--arch/arm/include/asm/assembler.h90
-rw-r--r--arch/arm/include/asm/atomic.h165
-rw-r--r--arch/arm/include/asm/bL_switcher.h77
-rw-r--r--arch/arm/include/asm/barrier.h50
-rw-r--r--arch/arm/include/asm/bitops.h58
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h138
-rw-r--r--arch/arm/include/asm/checksum.h34
-rw-r--r--arch/arm/include/asm/clkdev.h2
-rw-r--r--arch/arm/include/asm/cmpxchg.h64
-rw-r--r--arch/arm/include/asm/cp15.h50
-rw-r--r--arch/arm/include/asm/cputype.h138
-rw-r--r--arch/arm/include/asm/cti.h10
-rw-r--r--arch/arm/include/asm/dcc.h41
-rw-r--r--arch/arm/include/asm/delay.h1
-rw-r--r--arch/arm/include/asm/device.h6
-rw-r--r--arch/arm/include/asm/div64.h4
-rw-r--r--arch/arm/include/asm/dma-contiguous.h3
-rw-r--r--arch/arm/include/asm/dma-iommu.h13
-rw-r--r--arch/arm/include/asm/dma-mapping.h79
-rw-r--r--arch/arm/include/asm/dma.h6
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm/include/asm/firmware.h70
-rw-r--r--arch/arm/include/asm/fixmap.h21
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/ftrace.h10
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/glue-cache.h35
-rw-r--r--arch/arm/include/asm/glue-df.h28
-rw-r--r--arch/arm/include/asm/glue-proc.h18
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/cache-feroceon-l2.h13
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h105
-rw-r--r--arch/arm/include/asm/hardware/coresight.h14
-rw-r--r--arch/arm/include/asm/hardware/debug-8250.S29
-rw-r--r--arch/arm/include/asm/hardware/debug-pl01x.S29
-rw-r--r--arch/arm/include/asm/hardware/gic.h57
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h30
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-gpio.h75
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h17
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h4
-rw-r--r--arch/arm/include/asm/hardware/pci_v3.h186
-rw-r--r--arch/arm/include/asm/hardware/pl080.h146
-rw-r--r--arch/arm/include/asm/hardware/sp810.h64
-rw-r--r--arch/arm/include/asm/hardware/timer-sp.h16
-rw-r--r--arch/arm/include/asm/hardware/vic.h57
-rw-r--r--arch/arm/include/asm/highmem.h8
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h71
-rw-r--r--arch/arm/include/asm/hugetlb.h84
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h4
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/io.h31
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/irqflags.h22
-rw-r--r--arch/arm/include/asm/jump_label.h3
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kprobes.h17
-rw-r--r--arch/arm/include/asm/kvm_arm.h220
-rw-r--r--arch/arm/include/asm/kvm_asm.h85
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h211
-rw-r--r--arch/arm/include/asm/kvm_host.h234
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h171
-rw-r--r--arch/arm/include/asm/kvm_psci.h27
-rw-r--r--arch/arm/include/asm/localtimer.h34
-rw-r--r--arch/arm/include/asm/mach/arch.h28
-rw-r--r--arch/arm/include/asm/mach/irq.h35
-rw-r--r--arch/arm/include/asm/mach/map.h27
-rw-r--r--arch/arm/include/asm/mach/pci.h37
-rw-r--r--arch/arm/include/asm/mach/time.h30
-rw-r--r--arch/arm/include/asm/mcpm.h259
-rw-r--r--arch/arm/include/asm/memblock.h4
-rw-r--r--arch/arm/include/asm/memory.h193
-rw-r--r--arch/arm/include/asm/mmu.h11
-rw-r--r--arch/arm/include/asm/mmu_context.h45
-rw-r--r--arch/arm/include/asm/module.h2
-rw-r--r--arch/arm/include/asm/mpu.h76
-rw-r--r--arch/arm/include/asm/neon.h36
-rw-r--r--arch/arm/include/asm/opcodes-sec.h24
-rw-r--r--arch/arm/include/asm/opcodes.h1
-rw-r--r--arch/arm/include/asm/outercache.h69
-rw-r--r--arch/arm/include/asm/page.h4
-rw-r--r--arch/arm/include/asm/pci.h10
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/include/asm/pgalloc.h12
-rw-r--r--arch/arm/include/asm/pgtable-2level.h9
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h29
-rw-r--r--arch/arm/include/asm/pgtable-3level.h123
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h2
-rw-r--r--arch/arm/include/asm/pgtable.h40
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/include/asm/probes.h43
-rw-r--r--arch/arm/include/asm/proc-fns.h30
-rw-r--r--arch/arm/include/asm/processor.h37
-rw-r--r--arch/arm/include/asm/prom.h8
-rw-r--r--arch/arm/include/asm/psci.h48
-rw-r--r--arch/arm/include/asm/ptrace.h18
-rw-r--r--arch/arm/include/asm/sched_clock.h14
-rw-r--r--arch/arm/include/asm/setup.h30
-rw-r--r--arch/arm/include/asm/signal.h18
-rw-r--r--arch/arm/include/asm/smp.h17
-rw-r--r--arch/arm/include/asm/smp_plat.h27
-rw-r--r--arch/arm/include/asm/smp_scu.h36
-rw-r--r--arch/arm/include/asm/smp_twd.h8
-rw-r--r--arch/arm/include/asm/spinlock.h149
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/suspend.h5
-rw-r--r--arch/arm/include/asm/switch_to.h10
-rw-r--r--arch/arm/include/asm/sync_bitops.h1
-rw-r--r--arch/arm/include/asm/syscall.h11
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/system_info.h1
-rw-r--r--arch/arm/include/asm/system_misc.h6
-rw-r--r--arch/arm/include/asm/thread_info.h32
-rw-r--r--arch/arm/include/asm/timex.h6
-rw-r--r--arch/arm/include/asm/tlb.h52
-rw-r--r--arch/arm/include/asm/tlbflush.h262
-rw-r--r--arch/arm/include/asm/tls.h40
-rw-r--r--arch/arm/include/asm/topology.h3
-rw-r--r--arch/arm/include/asm/trusted_foundations.h74
-rw-r--r--arch/arm/include/asm/types.h40
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/unistd.h13
-rw-r--r--arch/arm/include/asm/uprobes.h45
-rw-r--r--arch/arm/include/asm/v7m.h56
-rw-r--r--arch/arm/include/asm/virt.h16
-rw-r--r--arch/arm/include/asm/word-at-a-time.h18
-rw-r--r--arch/arm/include/asm/xen/events.h5
-rw-r--r--arch/arm/include/asm/xen/hypercall.h18
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm/include/asm/xen/interface.h2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h50
-rw-r--r--arch/arm/include/asm/xen/page.h64
-rw-r--r--arch/arm/include/asm/xor.h73
139 files changed, 4478 insertions, 1644 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9..f5a35760198 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,23 +7,28 @@ generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
+generic-y += preempt.h
generic-y += resource.h
+generic-y += rwsem.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
@@ -31,5 +36,4 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += unaligned.h
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70..00000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index d40229d9a1c..0704e0cf557 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,28 +1,129 @@
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
+#include <asm/barrier.h>
#include <asm/errno.h>
#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
-int arch_timer_of_register(void);
-int arch_timer_sched_clock_init(void);
-struct timecounter *arch_timer_get_timecounter(void);
-#else
-static inline int arch_timer_of_register(void)
+int arch_timer_arch_init(void);
+
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code. At least it does so with a recent GCC (4.6.3).
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
+{
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+ break;
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
+ break;
+ }
+ }
+
+ isb();
+}
+
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
- return -ENXIO;
+ u32 val = 0;
+
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
+ break;
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
+ break;
+ }
+ }
+
+ return val;
+}
+
+static inline u32 arch_timer_get_cntfrq(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
+ return val;
}
-static inline int arch_timer_sched_clock_init(void)
+static inline u64 arch_counter_get_cntvct(void)
{
- return -ENXIO;
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
}
-static inline struct timecounter *arch_timer_get_timecounter(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
- return NULL;
+ u32 cntkctl;
+ asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
}
+
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
+ asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+}
+
+static inline void arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to both physical/virtual counters/timers */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index eb87200aa4b..57f0584e8d9 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
#define IOMEM(x) (x)
@@ -30,8 +31,8 @@
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
-#define pull lsr
-#define push lsl
+#define lspull lsr
+#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
@@ -41,8 +42,8 @@
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
-#define pull lsl
-#define push lsr
+#define lspull lsl
+#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
@@ -53,6 +54,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
@@ -136,7 +144,11 @@
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
mrs \oldcpsr, cpsr
+#endif
disable_irq
.endm
@@ -150,7 +162,11 @@
* guarantee that this will preserve the flags.
*/
.macro restore_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ msr primask, \oldcpsr
+#else
msr cpsr_c, \oldcpsr
+#endif
.endm
.macro restore_irqs, oldcpsr
@@ -159,6 +175,47 @@
restore_irqs_notrace \oldcpsr
.endm
+/*
+ * Get current thread_info.
+ */
+ .macro get_thread_info, rd
+ ARM( mov \rd, sp, lsr #13 )
+ THUMB( mov \rd, sp )
+ THUMB( lsr \rd, \rd, #13 )
+ mov \rd, \rd, lsl #13
+ .endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+ .macro inc_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ add \tmp, \tmp, #1 @ increment it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ sub \tmp, \tmp, #1 @ decrement it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+
+ .macro dec_preempt_count_ti, ti, tmp
+ get_thread_info \ti
+ dec_preempt_count \ti, \tmp
+ .endm
+#else
+ .macro inc_preempt_count, ti, tmp
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ .endm
+
+ .macro dec_preempt_count_ti, ti, tmp
+ .endm
+#endif
+
#define USER(x...) \
9999: x; \
.pushsection __ex_table,"a"; \
@@ -212,9 +269,9 @@
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
- ALT_SMP(dmb)
+ ALT_SMP(dmb ish)
.else
- ALT_SMP(W(dmb))
+ ALT_SMP(W(dmb) ish)
.endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
@@ -229,7 +286,14 @@
#endif
.endm
-#ifdef CONFIG_THUMB2_KERNEL
+#if defined(CONFIG_CPU_V7M)
+ /*
+ * setmode is used to assert to be in svc mode during boot. For v7-M
+ * this is done in __v7m_setup, so setmode can be empty here.
+ */
+ .macro setmode, mode, reg
+ .endm
+#elif defined(CONFIG_THUMB2_KERNEL)
.macro setmode, mode, reg
mov \reg, #\mode
msr cpsr_c, \reg
@@ -246,18 +310,14 @@
*
* This macro is intended for forcing the CPU into SVC mode at boot time.
* you cannot return to the original mode.
- *
- * Beware, it also clobers LR.
*/
.macro safe_svcmode_maskall reg:req
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
mrs \reg , cpsr
- mov lr , \reg
- and lr , lr , #MODE_MASK
- cmp lr , #HYP_MODE
- orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
+ eor \reg, \reg, #HYP_MODE
+ tst \reg, #MODE_MASK
bic \reg , \reg , #MODE_MASK
- orr \reg , \reg , #SVC_MODE
+ orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index c79f61faa3a..3040359094d 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
+#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
@@ -58,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n"
@@ -79,6 +82,7 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
@@ -96,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
@@ -114,9 +119,11 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
- unsigned long oldval, res;
+ int oldval;
+ unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -134,19 +141,31 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- unsigned long tmp, tmp2;
+ int oldval, newval;
+ unsigned long tmp;
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1: ldrex %0, [%4]\n"
+" teq %0, %5\n"
+" beq 2f\n"
+" add %1, %0, %6\n"
+" strex %2, %1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
: "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
}
#else /* ARM_ARCH_6 */
@@ -197,19 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- *addr &= ~mask;
- raw_local_irq_restore(flags);
-}
-
-#endif /* __LINUX_ARM_ARCH__ */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
@@ -220,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
@@ -231,21 +241,39 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- u64 __aligned(8) counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
-static inline u64 atomic64_read(const atomic64_t *v)
+#ifdef CONFIG_ARM_LPAE
+static inline long long atomic64_read(const atomic64_t *v)
{
- u64 result;
+ long long result;
+
+ __asm__ __volatile__("@ atomic64_read\n"
+" ldrd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&v->counter), "Qo" (v->counter)
+ );
+
+ return result;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ __asm__ __volatile__("@ atomic64_set\n"
+" strd %2, %H2, [%1]"
+ : "=Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ );
+}
+#else
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -256,10 +284,11 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
{
- u64 tmp;
+ long long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
@@ -269,16 +298,18 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
: "r" (&v->counter), "r" (i)
: "cc");
}
+#endif
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -287,17 +318,18 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -310,15 +342,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
return result;
}
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -327,17 +360,18 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -350,12 +384,14 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
return result;
}
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+ long long new)
{
- u64 oldval;
+ long long oldval;
unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -374,12 +410,13 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
return oldval;
}
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
+ prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -395,18 +432,19 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
return result;
}
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, #1\n"
-" sbc %H0, %H0, #0\n"
-" teq %H0, #0\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -421,13 +459,14 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- u64 val;
+ long long val;
unsigned long tmp;
int ret = 1;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
@@ -435,8 +474,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
-" adds %0, %0, %6\n"
-" adc %H0, %H0, %H6\n"
+" adds %Q0, %Q0, %Q6\n"
+" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 00000000000..1714800fa11
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d9..c6a3e73a6e2 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -14,27 +14,27 @@
#endif
#if __LINUX_ARM_ARCH__ >= 7
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
: : "r" (0) : "memory")
#elif defined(CONFIG_CPU_FA526)
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#else
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
#ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
-#define wmb() mb()
+#define wmb() do { dsb(st); outer_sync(); } while (0)
#else
#define mb() barrier()
#define rmb() barrier()
@@ -54,15 +54,33 @@
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() dmb(ish)
+#define smp_rmb() smp_mb()
+#define smp_wmb() dmb(ishst)
#endif
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index e691ec91e4d..56380995f4c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -25,9 +25,7 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
-
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
+#include <asm/barrier.h>
/*
* These functions are the basis of our bit ops.
@@ -254,25 +252,59 @@ static inline int constant_fls(int x)
}
/*
- * On ARMv5 and above those functions can be implemented around
- * the clz instruction for much better code efficiency.
+ * On ARMv5 and above those functions can be implemented around the
+ * clz instruction for much better code efficiency. __clz returns
+ * the number of leading zeros, zero input will return 32, and
+ * 0x80000000 will return 0.
*/
+static inline unsigned int __clz(unsigned int x)
+{
+ unsigned int ret;
+
+ asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
+
+ return ret;
+}
+/*
+ * fls() returns zero if the input is zero, otherwise returns the bit
+ * position of the last set bit, where the LSB is 1 and MSB is 32.
+ */
static inline int fls(int x)
{
- int ret;
-
if (__builtin_constant_p(x))
return constant_fls(x);
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
- ret = 32 - ret;
- return ret;
+ return 32 - __clz(x);
+}
+
+/*
+ * __fls() returns the bit position of the last bit set, where the
+ * LSB is 0 and MSB is 31. Zero input is undefined.
+ */
+static inline unsigned long __fls(unsigned long x)
+{
+ return fls(x) - 1;
+}
+
+/*
+ * ffs() returns zero if the input was zero, otherwise returns the bit
+ * position of the first set bit, where the LSB is 1 and MSB is 32.
+ */
+static inline int ffs(int x)
+{
+ return fls(x & -x);
+}
+
+/*
+ * __ffs() returns the bit position of the first bit set, where the
+ * LSB is 0 and MSB is 31. Zero input is undefined.
+ */
+static inline unsigned long __ffs(unsigned long x)
+{
+ return ffs(x) - 1;
}
-#define __fls(x) (fls(x) - 1)
-#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
-#define __ffs(x) (ffs(x) - 1)
#define ffz(x) __ffs( ~(x) )
#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653..b274bde2490 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_BUG
@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd1..fd43f7f55b7 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb(ishst);
}
/*
@@ -268,8 +269,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(start,end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
@@ -354,7 +352,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ishst);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
@@ -363,4 +361,132 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
flush_cache_all();
}
+/*
+ * Memory synchronization helpers for mixed cached vs non cached accesses.
+ *
+ * Some synchronization algorithms have to set states in memory with the
+ * cache enabled or disabled depending on the code path. It is crucial
+ * to always ensure proper cache maintenance to update main memory right
+ * away in that case.
+ *
+ * Any cached write must be followed by a cache clean operation.
+ * Any cached read must be preceded by a cache invalidate operation.
+ * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
+ * operation is needed to avoid discarding possible concurrent writes to the
+ * accessed memory.
+ *
+ * Also, in order to prevent a cached writer from interfering with an
+ * adjacent non-cached writer, each state variable must be located to
+ * a separate cache line.
+ */
+
+/*
+ * This needs to be >= the max cache writeback size of all
+ * supported platforms included in the current kernel configuration.
+ * This is used to align state variables to their own cache lines.
+ */
+#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
+#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
+
+/*
+ * There is no __cpuc_clean_dcache_area but we use it anyway for
+ * code intent clarity, and alias it to __cpuc_flush_dcache_area.
+ */
+#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
+
+/*
+ * Ensure preceding writes to *p by this CPU are visible to
+ * subsequent reads by other CPUs:
+ */
+static inline void __sync_cache_range_w(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+ __cpuc_clean_dcache_area(_p, size);
+ outer_clean_range(__pa(_p), __pa(_p + size));
+}
+
+/*
+ * Ensure preceding writes to *p by other CPUs are visible to
+ * subsequent reads by this CPU. We must be careful not to
+ * discard data simultaneously written by another CPU, hence the
+ * usage of flush rather than invalidate operations.
+ */
+static inline void __sync_cache_range_r(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+#ifdef CONFIG_OUTER_CACHE
+ if (outer_cache.flush_range) {
+ /*
+ * Ensure dirty data migrated from other CPUs into our cache
+ * are cleaned out safely before the outer cache is cleaned:
+ */
+ __cpuc_clean_dcache_area(_p, size);
+
+ /* Clean and invalidate stale data for *p from outer ... */
+ outer_flush_range(__pa(_p), __pa(_p + size));
+ }
+#endif
+
+ /* ... and inner cache: */
+ __cpuc_flush_dcache_area(_p, size);
+}
+
+#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
+#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
+void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
+ void *kaddr, unsigned long len);
#endif
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index 6dcc1643086..52331511547 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -87,19 +87,33 @@ static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
- __asm__(
- "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
- adcs %0, %0, %3 \n"
+ u32 lenprot = len | proto << 16;
+ if (__builtin_constant_p(sum) && sum == 0) {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
#ifdef __ARMEB__
- "adcs %0, %0, %4 \n"
+ "adcs %0, %0, %3 \n\t"
#else
- "adcs %0, %0, %4, lsl #8 \n"
+ "adcs %0, %0, %3, ror #8 \n\t"
#endif
- "adcs %0, %0, %5 \n\
- adc %0, %0, #0"
- : "=&r"(sum)
- : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
- : "cc");
+ "adc %0, %0, #0"
+ : "=&r" (sum)
+ : "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ } else {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
+ "adcs %0, %0, %3 \n\t"
+#ifdef __ARMEB__
+ "adcs %0, %0, %4 \n\t"
+#else
+ "adcs %0, %0, %4, ror #8 \n\t"
+#endif
+ "adc %0, %0, #0"
+ : "=&r"(sum)
+ : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ }
return sum;
}
/*
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index 80751c15c30..4e8a4b27d7c 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -14,12 +14,14 @@
#include <linux/slab.h>
+#ifndef CONFIG_COMMON_CLK
#ifdef CONFIG_HAVE_MACH_CLKDEV
#include <mach/clkdev.h>
#else
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
#endif
+#endif
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 7eb18c1d8d6..abb2c3769b0 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
+#include <linux/prefetch.h>
#include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif
smp_mb();
+ prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
unsigned long oldval, res;
+ prefetchw((const void *)ptr);
+
switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
@@ -223,6 +227,44 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ prefetchw(ptr);
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -230,18 +272,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long)(o), \
- (unsigned long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a6..c3f11524f10 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -23,6 +23,11 @@
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
+#ifdef CONFIG_MMU
+#define CR_HA (1 << 17) /* Hardware management of Access Flag */
+#else
+#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */
+#endif
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
@@ -37,31 +42,42 @@
#ifndef __ASSEMBLY__
#if __LINUX_ARM_ARCH__ >= 4
-#define vectors_high() (cr_alignment & CR_V)
+#define vectors_high() (get_cr() & CR_V)
#else
#define vectors_high() (0)
#endif
-extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
+#ifdef CONFIG_CPU_CP15
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
-static inline unsigned int get_cr(void)
+static inline unsigned long get_cr(void)
{
- unsigned int val;
+ unsigned long val;
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
return val;
}
-static inline void set_cr(unsigned int val)
+static inline void set_cr(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
: : "r" (val) : "cc");
isb();
}
-#ifndef CONFIG_SMP
-extern void adjust_cr(unsigned long mask, unsigned long set);
-#endif
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val));
+ isb();
+}
#define CPACC_FULL(n) (3 << (n * 2))
#define CPACC_SVC(n) (1 << (n * 2))
@@ -82,6 +98,22 @@ static inline void set_copro_access(unsigned int val)
isb();
}
-#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+/*
+ * cr_alignment is tightly coupled to cp15 (at least in the minds of the
+ * developers). Yielding 0 for machines without a cp15 (and making it
+ * read-only) is fine for most cases and saves quite some #ifdeffery.
+ */
+#define cr_alignment UL(0)
+
+static inline unsigned long get_cr(void)
+{
+ return 0;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index a59dcb5ab5f..8c2b7321a47 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,8 +8,26 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
-
+#define CPUID_REVIDR 6
+
+#ifdef CONFIG_CPU_V7M
+#define CPUID_EXT_PFR0 0x40
+#define CPUID_EXT_PFR1 0x44
+#define CPUID_EXT_DFR0 0x48
+#define CPUID_EXT_AFR0 0x4c
+#define CPUID_EXT_MMFR0 0x50
+#define CPUID_EXT_MMFR1 0x54
+#define CPUID_EXT_MMFR2 0x58
+#define CPUID_EXT_MMFR3 0x5c
+#define CPUID_EXT_ISAR0 0x60
+#define CPUID_EXT_ISAR1 0x64
+#define CPUID_EXT_ISAR2 0x68
+#define CPUID_EXT_ISAR3 0x6c
+#define CPUID_EXT_ISAR4 0x70
+#define CPUID_EXT_ISAR5 0x74
+#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
#define CPUID_EXT_DFR0 "c1, 2"
@@ -24,6 +42,7 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
#define MPIDR_SMP_VALUE (0x2 << 30)
@@ -32,12 +51,34 @@
#define MPIDR_HWID_BITMASK 0xFFFFFF
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_INTEL 0x69
+
+#define ARM_CPU_PART_ARM1136 0xB360
+#define ARM_CPU_PART_ARM1156 0xB560
+#define ARM_CPU_PART_ARM1176 0xB760
+#define ARM_CPU_PART_ARM11MPCORE 0xB020
+#define ARM_CPU_PART_CORTEX_A8 0xC080
+#define ARM_CPU_PART_CORTEX_A9 0xC090
+#define ARM_CPU_PART_CORTEX_A5 0xC050
+#define ARM_CPU_PART_CORTEX_A15 0xC0F0
+#define ARM_CPU_PART_CORTEX_A7 0xC070
+#define ARM_CPU_PART_CORTEX_A12 0xC0D0
+#define ARM_CPU_PART_CORTEX_A17 0xC0E0
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
@@ -50,21 +91,56 @@ extern unsigned int processor_id;
: "cc"); \
__val; \
})
+
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
- : "cc"); \
+ : "memory"); \
__val; \
})
-#else
-#define read_cpuid(reg) (processor_id)
-#define read_cpuid_ext(reg) 0
-#endif
+
+#elif defined(CONFIG_CPU_V7M)
+
+#include <asm/io.h>
+#include <asm/v7m.h>
+
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
+
+static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
+{
+ return readl(BASEADDR_V7M_SCB + offset);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
/*
+ * read_cpuid and read_cpuid_ext should only ever be called on machines that
+ * have cp15 so warn on other usages.
+ */
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
+
+#define read_cpuid_ext(reg) read_cpuid(reg)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#ifdef CONFIG_CPU_CP15
+/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid() directly.
@@ -74,6 +150,37 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+#elif defined(CONFIG_CPU_V7M)
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return processor_id;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+ return (read_cpuid_id() & 0xFF000000) >> 24;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+{
+ return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+ return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+}
+
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
@@ -115,4 +222,23 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1
#endif
+/*
+ * Marvell's PJ4 and PJ4B cores are based on V7 version,
+ * but require a specical sequence for enabling coprocessors.
+ * For this reason, we need a way to distinguish them.
+ */
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
+static inline int cpu_is_pj4(void)
+{
+ unsigned int id;
+
+ id = read_cpuid_id();
+ if ((id & 0xff0fff00) == 0x560f5800)
+ return 1;
+
+ return 0;
+}
+#else
+#define cpu_is_pj4() 0
+#endif
#endif
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index f2e5cad3f30..2381199acb7 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -2,6 +2,7 @@
#define __ASMARM_CTI_H
#include <asm/io.h>
+#include <asm/hardware/coresight.h>
/* The registers' definition is from section 3.2 of
* Embedded Cross Trigger Revision: r0p0
@@ -35,11 +36,6 @@
#define LOCKACCESS 0xFB0
#define LOCKSTATUS 0xFB4
-/* write this value to LOCKACCESS will unlock the module, and
- * other value will lock the module
- */
-#define LOCKCODE 0xC5ACCE55
-
/**
* struct cti - cross trigger interface struct
* @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
*/
static inline void cti_unlock(struct cti *cti)
{
- __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
}
/**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
*/
static inline void cti_lock(struct cti *cti)
{
- __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
}
#endif
diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h
new file mode 100644
index 00000000000..b74899de077
--- /dev/null
+++ b/arch/arm/include/asm/dcc.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/barrier.h>
+
+static inline u32 __dcc_getstatus(void)
+{
+ u32 __ret;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
+ : "=r" (__ret) : : "cc");
+
+ return __ret;
+}
+
+static inline char __dcc_getchar(void)
+{
+ char __c;
+
+ asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
+ : "=r" (__c));
+ isb();
+
+ return __c;
+}
+
+static inline void __dcc_putchar(char c)
+{
+ asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
+ : /* no output register */
+ : "r" (c));
+ isb();
+}
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index ab98fdd083b..dff714d886d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -24,6 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
+ unsigned long ticks_per_jiffy;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index b69c0d3285f..dc662fca923 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -27,4 +27,10 @@ struct pdev_archdata {
#endif
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index fe92ccf1d0b..662c7bd0610 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -46,7 +46,7 @@
__rem; \
})
-#if __GNUC__ < 4
+#if __GNUC__ < 4 || !defined(CONFIG_AEABI)
/*
* gcc versions earlier than 4.0 are simply too problematic for the
@@ -156,7 +156,7 @@
/* Select the best insn combination to perform the */ \
/* actual __m * __n / (__p << 64) operation. */ \
if (!__c) { \
- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n) \
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 3ed37b4d93d..4f8e9e5514b 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -2,10 +2,9 @@
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 799b09409fa..8e3fcb924db 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,14 +7,17 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
+#include <linux/kref.h>
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
- void *bitmap;
- size_t bits;
- unsigned int order;
+ unsigned long **bitmaps; /* array of bitmaps */
+ unsigned int nr_bitmaps; /* nr of elements in array */
+ unsigned int extensions;
+ size_t bitmap_size; /* size of a single bitmap */
+ size_t bits; /* per bitmap */
dma_addr_t base;
spinlock_t lock;
@@ -22,13 +25,13 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
- int order);
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b95150..c45b61a4b4a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,17 +11,28 @@
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (xen_initial_domain())
+ return xen_dma_ops;
+ else
+ return __generic_dma_ops(dev);
+}
+
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
@@ -47,23 +58,40 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
+ if (dev)
+ pfn -= dev->dma_pfn_offset;
return (dma_addr_t)__pfn_to_bus(pfn);
}
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
- return __bus_to_pfn(addr);
+ unsigned long pfn = __bus_to_pfn(addr);
+
+ if (dev)
+ pfn += dev->dma_pfn_offset;
+
+ return pfn;
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
+ if (dev) {
+ unsigned long pfn = dma_to_pfn(dev, addr);
+
+ return phys_to_virt(__pfn_to_phys(pfn));
+ }
+
return (void *)__bus_to_virt((unsigned long)addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
+ if (dev)
+ return pfn_to_dma(dev, virt_to_pfn(addr));
+
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
+
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
@@ -86,6 +114,53 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
+static inline int set_arch_dma_coherent_ops(struct device *dev)
+{
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+ return 0;
+}
+#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ unsigned int offset = paddr & ~PAGE_MASK;
+ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ unsigned int offset = dev_addr & ~PAGE_MASK;
+ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ u64 limit, mask;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ mask = *dev->dma_mask;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit)
+ return 0;
+
+ if ((addr | (addr + size - 1)) & ~mask)
+ return 0;
+
+ return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) { }
+
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 5694a0d6576..99084431d6a 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -8,8 +8,8 @@
#define MAX_DMA_ADDRESS 0xffffffffUL
#else
#define MAX_DMA_ADDRESS ({ \
- extern unsigned long arm_dma_zone_size; \
- arm_dma_zone_size ? \
+ extern phys_addr_t arm_dma_zone_size; \
+ arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
#endif
@@ -105,7 +105,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
- __set_dma_addr(chan, bus_to_virt(addr))
+ __set_dma_addr(chan, (void *)__bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c480..f4b46d39b9c 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
-#define EM_ARM 40
-
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
@@ -130,4 +128,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
#endif
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
new file mode 100644
index 00000000000..2c9f10df756
--- /dev/null
+++ b/arch/arm/include/asm/firmware.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics.
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_FIRMWARE_H
+#define __ASM_ARM_FIRMWARE_H
+
+#include <linux/bug.h>
+
+/*
+ * struct firmware_ops
+ *
+ * A structure to specify available firmware operations.
+ *
+ * A filled up structure can be registered with register_firmware_ops().
+ */
+struct firmware_ops {
+ /*
+ * Inform the firmware we intend to enter CPU idle mode
+ */
+ int (*prepare_idle)(void);
+ /*
+ * Enters CPU idle mode
+ */
+ int (*do_idle)(void);
+ /*
+ * Sets boot address of specified physical CPU
+ */
+ int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
+ /*
+ * Boots specified physical CPU
+ */
+ int (*cpu_boot)(int cpu);
+ /*
+ * Initializes L2 cache
+ */
+ int (*l2x0_init)(void);
+};
+
+/* Global pointer for current firmware_ops structure, can't be NULL. */
+extern const struct firmware_ops *firmware_ops;
+
+/*
+ * call_firmware_op(op, ...)
+ *
+ * Checks if firmware operation is present and calls it,
+ * otherwise returns -ENOSYS
+ */
+#define call_firmware_op(op, ...) \
+ ((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS))
+
+/*
+ * register_firmware_ops(ops)
+ *
+ * A function to register platform firmware_ops struct.
+ */
+static inline void register_firmware_ops(const struct firmware_ops *ops)
+{
+ BUG_ON(!ops);
+
+ firmware_ops = ops;
+}
+
+#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index bbae919bceb..74124b0d0d7 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -1,24 +1,11 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-/*
- * Nothing too fancy for now.
- *
- * On ARM we already have well known fixed virtual addresses imposed by
- * the architecture such as the vector page which is located at 0xffff0000,
- * therefore a second level page table is already allocated covering
- * 0xfff00000 upwards.
- *
- * The cache flushing code in proc-xscale.S uses the virtual area between
- * 0xfffe0000 and 0xfffeffff.
- */
-
-#define FIXADDR_START 0xfff00000UL
-#define FIXADDR_TOP 0xfffe0000UL
+#define FIXADDR_START 0xffc00000UL
+#define FIXADDR_TOP 0xffe00000UL
#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
-#define FIX_KMAP_BEGIN 0
-#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
+#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
@@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void);
static inline unsigned long fix_to_virt(const unsigned int idx)
{
- if (idx >= FIX_KMAP_END)
+ if (idx >= FIX_KMAP_NR_PTES)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d..f4882553fbb 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -25,7 +25,7 @@
#define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
- IRQF_DISABLED,"floppy",NULL)
+ 0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f89515adac6..39eb16b0066 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level)
#endif
-#define HAVE_ARCH_CALLER_ADDR
-
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)return_address(1))
-#define CALLER_ADDR2 ((unsigned long)return_address(2))
-#define CALLER_ADDR3 ((unsigned long)return_address(3))
-#define CALLER_ADDR4 ((unsigned long)return_address(4))
-#define CALLER_ADDR5 ((unsigned long)return_address(5))
-#define CALLER_ADDR6 ((unsigned long)return_address(6))
+#define ftrace_return_address(n) return_address(n)
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e..53e69dae796 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
#ifdef __KERNEL__
-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
-/* ARM doesn't provide unprivileged exclusive memory accessors */
-#include <asm-generic/futex.h>
-#else
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -28,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
+ prefetchw(uaddr); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cca9f15704e..a3c24cd5b7c 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
#undef _CACHE
#undef MULTI_CACHE
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v3
-# endif
-#endif
-
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
@@ -125,10 +117,37 @@
# endif
#endif
+#if defined(CONFIG_CPU_V7M)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE nop
+# endif
+#endif
+
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintenance model
#endif
+#ifndef __ASSEMBLER__
+static inline void nop_flush_icache_all(void) { }
+static inline void nop_flush_kern_cache_all(void) { }
+static inline void nop_flush_kern_cache_louis(void) { }
+static inline void nop_flush_user_cache_all(void) { }
+static inline void nop_flush_user_cache_range(unsigned long a,
+ unsigned long b, unsigned int c) { }
+
+static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
+static inline int nop_coherent_user_range(unsigned long a,
+ unsigned long b) { return 0; }
+static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
+
+static inline void nop_dma_flush_range(const void *a, const void *b) { }
+
+static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
+static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
+#endif
+
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76d..04e18b65665 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
* ================
*
* We have the following to choose from:
- * arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
- * v5tej_early - ARMv5 with Thumb and Java early abort handler
+ * v5t_early - ARMv5 with Thumb early abort handler
+ * v5tj_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
@@ -31,11 +31,11 @@
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
-#if defined(CONFIG_CPU_ARM710)
+#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER cpu_arm7_data_abort
+# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
@@ -47,19 +47,19 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV4
+#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4_early_abort
+# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV4T
+#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4t_early_abort
+# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
@@ -71,27 +71,27 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV5T
+#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v5t_early_abort
+# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV6
+#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v6_early_abort
+# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV7
+#ifdef CONFIG_CPU_ABRT_NOMMU
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v7_early_abort
+# define CPU_DABORT_HANDLER nommu_early_abort
# endif
#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index ac1dd54724b..74a8b84f3cb 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,24 @@
# endif
#endif
+#ifdef CONFIG_CPU_V7M
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v7m
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PJ4B
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_pj4b
+# endif
+#endif
+
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df6..fe3ea776dc3 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 8
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
new file mode 100644
index 00000000000..12e1588dc4f
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
@@ -0,0 +1,13 @@
+/*
+ * arch/arm/include/asm/hardware/cache-feroceon-l2.h
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+extern void __init feroceon_l2_init(int l2_wt_override);
+extern int __init feroceon_of_init(void);
+
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 3b2c40b5bfa..3a5ec1c2565 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -26,8 +26,8 @@
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
-#define L2X0_TAG_LATENCY_CTRL 0x108
-#define L2X0_DATA_LATENCY_CTRL 0x10C
+#define L310_TAG_LATENCY_CTRL 0x108
+#define L310_DATA_LATENCY_CTRL 0x10C
#define L2X0_EVENT_CNT_CTRL 0x200
#define L2X0_EVENT_CNT1_CFG 0x204
#define L2X0_EVENT_CNT0_CFG 0x208
@@ -54,53 +54,93 @@
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
-#define L2X0_ADDR_FILTER_START 0xC00
-#define L2X0_ADDR_FILTER_END 0xC04
+#define L310_ADDR_FILTER_START 0xC00
+#define L310_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
-#define L2X0_PREFETCH_CTRL 0xF60
-#define L2X0_POWER_CTRL 0xF80
-#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
-#define L2X0_STNDBY_MODE_EN (1 << 0)
+#define L310_PREFETCH_CTRL 0xF60
+#define L310_POWER_CTRL 0xF80
+#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
+#define L310_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L220 (2 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_ID_RTL_MASK 0x3f
-#define L2X0_CACHE_ID_RTL_R0P0 0x0
-#define L2X0_CACHE_ID_RTL_R1P0 0x2
-#define L2X0_CACHE_ID_RTL_R2P0 0x4
-#define L2X0_CACHE_ID_RTL_R3P0 0x5
-#define L2X0_CACHE_ID_RTL_R3P1 0x6
-#define L2X0_CACHE_ID_RTL_R3P2 0x8
+#define L210_CACHE_ID_RTL_R0P2_02 0x00
+#define L210_CACHE_ID_RTL_R0P1 0x01
+#define L210_CACHE_ID_RTL_R0P2_01 0x02
+#define L210_CACHE_ID_RTL_R0P3 0x03
+#define L210_CACHE_ID_RTL_R0P4 0x0b
+#define L210_CACHE_ID_RTL_R0P5 0x0f
+#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
+#define L310_CACHE_ID_RTL_R0P0 0x00
+#define L310_CACHE_ID_RTL_R1P0 0x02
+#define L310_CACHE_ID_RTL_R2P0 0x04
+#define L310_CACHE_ID_RTL_R3P0 0x05
+#define L310_CACHE_ID_RTL_R3P1 0x06
+#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
+#define L310_CACHE_ID_RTL_R3P2 0x08
+#define L310_CACHE_ID_RTL_R3P3 0x09
-#define L2X0_AUX_CTRL_MASK 0xc0000fff
+/* L2C auxiliary control register - bits common to L2C-210/220/310 */
+#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
+#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
+#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
+#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
+#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
+#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
+/* L2C-210/220 common bits */
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
-#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
-#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
-#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
+#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
-#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
-#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
-#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
-#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
-#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
-#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
-#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
-#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
-#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
+#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
+#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
+/* L2C-210 specific bits */
+#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
+#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
+#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
+/* L2C-220 specific bits */
+#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L220_AUX_CTRL_FWA_SHIFT 23
+#define L220_AUX_CTRL_FWA_MASK (3 << 23)
+#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
+/* L2C-310 specific bits */
+#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
+#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
+#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
+#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
+#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
+#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
+#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
+#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
-#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
-#define L2X0_LATENCY_CTRL_RD_SHIFT 4
-#define L2X0_LATENCY_CTRL_WR_SHIFT 8
+#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
+#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
+#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
-#define L2X0_ADDR_FILTER_EN 1
+#define L310_ADDR_FILTER_EN 1
+
+#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
+#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
+#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
+#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
#define L2X0_CTRL_EN 1
@@ -131,6 +171,7 @@ struct l2x0_regs {
unsigned long prefetch_ctrl;
unsigned long pwr_ctrl;
unsigned long ctrl;
+ unsigned long aux2_ctrl;
};
extern struct l2x0_regs l2x0_saved_regs;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5..ad774f37c47 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+ (writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -36,7 +36,7 @@
/* CoreSight Component Registers */
#define CSCR_CLASS 0xff4
-#define UNLOCK_MAGIC 0xc5acce55
+#define CS_LAR_KEY 0xc5acce55
/* ETM control register, "ETM Architecture", 3.3.1 */
#define ETMR_CTRL 0
@@ -142,16 +142,16 @@
#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
- (__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+ (writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
- do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
- do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#endif /* __ASM_HARDWARE_CORESIGHT_H */
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644
index 22c689255e6..00000000000
--- a/arch/arm/include/asm/hardware/debug-8250.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/debug-8250.S
- *
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/serial_reg.h>
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
- tst \rd, #UART_MSR_CTS
- beq 1001b
-#endif
- .endm
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S
deleted file mode 100644
index f9fd083eff6..00000000000
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/* arch/arm/include/asm/hardware/debug-pl01x.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-#include <linux/amba/serial.h>
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UART01x_DR]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UART01x_FR]
- tst \rd, #UART01x_FR_TXFF
- bne 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #UART01x_FR]
- tst \rd, #UART01x_FR_BUSY
- bne 1001b
- .endm
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
deleted file mode 100644
index 4b1ce6cd477..00000000000
--- a/arch/arm/include/asm/hardware/gic.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/gic.h
- *
- * Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_HARDWARE_GIC_H
-#define __ASM_ARM_HARDWARE_GIC_H
-
-#include <linux/compiler.h>
-
-#define GIC_CPU_CTRL 0x00
-#define GIC_CPU_PRIMASK 0x04
-#define GIC_CPU_BINPOINT 0x08
-#define GIC_CPU_INTACK 0x0c
-#define GIC_CPU_EOI 0x10
-#define GIC_CPU_RUNNINGPRI 0x14
-#define GIC_CPU_HIGHPRI 0x18
-
-#define GIC_DIST_CTRL 0x000
-#define GIC_DIST_CTR 0x004
-#define GIC_DIST_ENABLE_SET 0x100
-#define GIC_DIST_ENABLE_CLEAR 0x180
-#define GIC_DIST_PENDING_SET 0x200
-#define GIC_DIST_PENDING_CLEAR 0x280
-#define GIC_DIST_ACTIVE_BIT 0x300
-#define GIC_DIST_PRI 0x400
-#define GIC_DIST_TARGET 0x800
-#define GIC_DIST_CONFIG 0xc00
-#define GIC_DIST_SOFTINT 0xf00
-
-#ifndef __ASSEMBLY__
-#include <linux/irqdomain.h>
-struct device_node;
-
-extern struct irq_chip gic_arch_extn;
-
-void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
- u32 offset, struct device_node *);
-int gic_of_init(struct device_node *node, struct device_node *parent);
-void gic_secondary_init(unsigned int);
-void gic_handle_irq(struct pt_regs *regs);
-void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
-
-static inline void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu)
-{
- gic_init_bases(nr, start, dist, cpu, 0, NULL);
-}
-
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 9b28f1243bd..240b29ef17d 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt;
}
-static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
-{
- return 0;
-}
-
-static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->dest_addr;
- case AAU_ID:
- return hw_desc.aau->dest_addr;
- default:
- BUG();
- }
- return 0;
-}
-
-
-static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- BUG();
- return 0;
-}
-
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h
deleted file mode 100644
index 9eda7dc92ad..00000000000
--- a/arch/arm/include/asm/hardware/iop3xx-gpio.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/iop3xx-gpio.h
- *
- * IOP3xx GPIO wrappers
- *
- * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org>
- * Based on IXP4XX gpio.h file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
-#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
-
-#include <mach/hardware.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-#define IOP3XX_N_GPIOS 8
-
-static inline int gpio_get_value(unsigned gpio)
-{
- if (gpio > IOP3XX_N_GPIOS)
- return __gpio_get_value(gpio);
-
- return gpio_line_get(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (gpio > IOP3XX_N_GPIOS) {
- __gpio_set_value(gpio, value);
- return;
- }
- gpio_line_set(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- if (gpio < IOP3XX_N_GPIOS)
- return 0;
- else
- return __gpio_cansleep(gpio);
-}
-
-/*
- * The GPIOs are not generating any interrupt
- * Note : manuals are not clear about this
- */
-static inline int gpio_to_irq(int gpio)
-{
- return -EINVAL;
-}
-
-static inline int irq_to_gpio(int gpio)
-{
- return -EINVAL;
-}
-
-#endif
-
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe247..2594a95ff19 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -18,16 +18,9 @@
/*
* IOP3XX GPIO handling
*/
-#define GPIO_IN 0
-#define GPIO_OUT 1
-#define GPIO_LOW 0
-#define GPIO_HIGH 1
#define IOP3XX_GPIO_LINE(x) (x)
#ifndef __ASSEMBLY__
-extern void gpio_line_config(int line, int direction);
-extern int gpio_line_get(int line);
-extern void gpio_line_set(int line, int value);
extern int init_atu;
extern int iop3xx_get_init_atu(void);
#endif
@@ -37,7 +30,7 @@ extern int iop3xx_get_init_atu(void);
* IOP3XX processor registers
*/
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
@@ -168,11 +161,6 @@ extern int iop3xx_get_init_atu(void);
/* PERCR0 DOESN'T EXIST - index from 1! */
#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-/* General Purpose I/O */
-#define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0000)
-#define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0004)
-#define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x0008)
-
/* Timers */
#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
@@ -223,11 +211,12 @@ extern int iop3xx_get_init_atu(void);
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/reboot.h>
void iop3xx_map_io(void);
void iop_init_cp6_handler(void);
void iop_init_time(unsigned long tickrate);
-void iop3xx_restart(char, const char *);
+void iop3xx_restart(enum reboot_mode, const char *);
static inline u32 read_tmr0(void)
{
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 122f86d8c99..250760e0810 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -82,8 +82,6 @@ struct iop_adma_chan {
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
- * @unmap_src_cnt: number of xor sources
- * @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
@@ -99,8 +97,6 @@ struct iop_adma_desc_slot {
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
- u16 unmap_src_cnt;
- size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
diff --git a/arch/arm/include/asm/hardware/pci_v3.h b/arch/arm/include/asm/hardware/pci_v3.h
deleted file mode 100644
index 2811c7e2cfd..00000000000
--- a/arch/arm/include/asm/hardware/pci_v3.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/pci_v3.h
- *
- * Internal header file PCI V3 chip
- *
- * Copyright (C) ARM Limited
- * Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef ASM_ARM_HARDWARE_PCI_V3_H
-#define ASM_ARM_HARDWARE_PCI_V3_H
-
-/* -------------------------------------------------------------------------------
- * V3 Local Bus to PCI Bridge definitions
- * -------------------------------------------------------------------------------
- * Registers (these are taken from page 129 of the EPC User's Manual Rev 1.04
- * All V3 register names are prefaced by V3_ to avoid clashing with any other
- * PCI definitions. Their names match the user's manual.
- *
- * I'm assuming that I20 is disabled.
- *
- */
-#define V3_PCI_VENDOR 0x00000000
-#define V3_PCI_DEVICE 0x00000002
-#define V3_PCI_CMD 0x00000004
-#define V3_PCI_STAT 0x00000006
-#define V3_PCI_CC_REV 0x00000008
-#define V3_PCI_HDR_CFG 0x0000000C
-#define V3_PCI_IO_BASE 0x00000010
-#define V3_PCI_BASE0 0x00000014
-#define V3_PCI_BASE1 0x00000018
-#define V3_PCI_SUB_VENDOR 0x0000002C
-#define V3_PCI_SUB_ID 0x0000002E
-#define V3_PCI_ROM 0x00000030
-#define V3_PCI_BPARAM 0x0000003C
-#define V3_PCI_MAP0 0x00000040
-#define V3_PCI_MAP1 0x00000044
-#define V3_PCI_INT_STAT 0x00000048
-#define V3_PCI_INT_CFG 0x0000004C
-#define V3_LB_BASE0 0x00000054
-#define V3_LB_BASE1 0x00000058
-#define V3_LB_MAP0 0x0000005E
-#define V3_LB_MAP1 0x00000062
-#define V3_LB_BASE2 0x00000064
-#define V3_LB_MAP2 0x00000066
-#define V3_LB_SIZE 0x00000068
-#define V3_LB_IO_BASE 0x0000006E
-#define V3_FIFO_CFG 0x00000070
-#define V3_FIFO_PRIORITY 0x00000072
-#define V3_FIFO_STAT 0x00000074
-#define V3_LB_ISTAT 0x00000076
-#define V3_LB_IMASK 0x00000077
-#define V3_SYSTEM 0x00000078
-#define V3_LB_CFG 0x0000007A
-#define V3_PCI_CFG 0x0000007C
-#define V3_DMA_PCI_ADR0 0x00000080
-#define V3_DMA_PCI_ADR1 0x00000090
-#define V3_DMA_LOCAL_ADR0 0x00000084
-#define V3_DMA_LOCAL_ADR1 0x00000094
-#define V3_DMA_LENGTH0 0x00000088
-#define V3_DMA_LENGTH1 0x00000098
-#define V3_DMA_CSR0 0x0000008B
-#define V3_DMA_CSR1 0x0000009B
-#define V3_DMA_CTLB_ADR0 0x0000008C
-#define V3_DMA_CTLB_ADR1 0x0000009C
-#define V3_DMA_DELAY 0x000000E0
-#define V3_MAIL_DATA 0x000000C0
-#define V3_PCI_MAIL_IEWR 0x000000D0
-#define V3_PCI_MAIL_IERD 0x000000D2
-#define V3_LB_MAIL_IEWR 0x000000D4
-#define V3_LB_MAIL_IERD 0x000000D6
-#define V3_MAIL_WR_STAT 0x000000D8
-#define V3_MAIL_RD_STAT 0x000000DA
-#define V3_QBA_MAP 0x000000DC
-
-/* PCI COMMAND REGISTER bits
- */
-#define V3_COMMAND_M_FBB_EN (1 << 9)
-#define V3_COMMAND_M_SERR_EN (1 << 8)
-#define V3_COMMAND_M_PAR_EN (1 << 6)
-#define V3_COMMAND_M_MASTER_EN (1 << 2)
-#define V3_COMMAND_M_MEM_EN (1 << 1)
-#define V3_COMMAND_M_IO_EN (1 << 0)
-
-/* SYSTEM REGISTER bits
- */
-#define V3_SYSTEM_M_RST_OUT (1 << 15)
-#define V3_SYSTEM_M_LOCK (1 << 14)
-
-/* PCI_CFG bits
- */
-#define V3_PCI_CFG_M_I2O_EN (1 << 15)
-#define V3_PCI_CFG_M_IO_REG_DIS (1 << 14)
-#define V3_PCI_CFG_M_IO_DIS (1 << 13)
-#define V3_PCI_CFG_M_EN3V (1 << 12)
-#define V3_PCI_CFG_M_RETRY_EN (1 << 10)
-#define V3_PCI_CFG_M_AD_LOW1 (1 << 9)
-#define V3_PCI_CFG_M_AD_LOW0 (1 << 8)
-
-/* PCI_BASE register bits (PCI -> Local Bus)
- */
-#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000
-#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00
-#define V3_PCI_BASE_M_PREFETCH (1 << 3)
-#define V3_PCI_BASE_M_TYPE (3 << 1)
-#define V3_PCI_BASE_M_IO (1 << 0)
-
-/* PCI MAP register bits (PCI -> Local bus)
- */
-#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000
-#define V3_PCI_MAP_M_RD_POST_INH (1 << 15)
-#define V3_PCI_MAP_M_ROM_SIZE (3 << 10)
-#define V3_PCI_MAP_M_SWAP (3 << 8)
-#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0
-#define V3_PCI_MAP_M_REG_EN (1 << 1)
-#define V3_PCI_MAP_M_ENABLE (1 << 0)
-
-/*
- * LB_BASE0,1 register bits (Local bus -> PCI)
- */
-#define V3_LB_BASE_ADR_BASE 0xfff00000
-#define V3_LB_BASE_SWAP (3 << 8)
-#define V3_LB_BASE_ADR_SIZE (15 << 4)
-#define V3_LB_BASE_PREFETCH (1 << 3)
-#define V3_LB_BASE_ENABLE (1 << 0)
-
-#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4)
-#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4)
-#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4)
-#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4)
-#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4)
-#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4)
-#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4)
-#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4)
-#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4)
-#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4)
-#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4)
-#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4)
-
-#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE)
-
-/*
- * LB_MAP0,1 register bits (Local bus -> PCI)
- */
-#define V3_LB_MAP_MAP_ADR 0xfff0
-#define V3_LB_MAP_TYPE (7 << 1)
-#define V3_LB_MAP_AD_LOW_EN (1 << 0)
-
-#define V3_LB_MAP_TYPE_IACK (0 << 1)
-#define V3_LB_MAP_TYPE_IO (1 << 1)
-#define V3_LB_MAP_TYPE_MEM (3 << 1)
-#define V3_LB_MAP_TYPE_CONFIG (5 << 1)
-#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1)
-
-#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR)
-
-/*
- * LB_BASE2 register bits (Local bus -> PCI IO)
- */
-#define V3_LB_BASE2_ADR_BASE 0xff00
-#define V3_LB_BASE2_SWAP (3 << 6)
-#define V3_LB_BASE2_ENABLE (1 << 0)
-
-#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE)
-
-/*
- * LB_MAP2 register bits (Local bus -> PCI IO)
- */
-#define V3_LB_MAP2_MAP_ADR 0xff00
-
-#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR)
-
-#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
deleted file mode 100644
index 4eea2107214..00000000000
--- a/arch/arm/include/asm/hardware/pl080.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* arch/arm/include/asm/hardware/pl080.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * ARM PrimeCell PL080 DMA controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Note, there are some Samsung updates to this controller block which
- * make it not entierly compatible with the PL080 specification from
- * ARM. When in doubt, check the Samsung documentation first.
- *
- * The Samsung defines are PL080S, and add an extra control register,
- * the ability to move more than 2^11 counts of data and some extra
- * OneNAND features.
-*/
-
-#ifndef ASM_PL080_H
-#define ASM_PL080_H
-
-#define PL080_INT_STATUS (0x00)
-#define PL080_TC_STATUS (0x04)
-#define PL080_TC_CLEAR (0x08)
-#define PL080_ERR_STATUS (0x0C)
-#define PL080_ERR_CLEAR (0x10)
-#define PL080_RAW_TC_STATUS (0x14)
-#define PL080_RAW_ERR_STATUS (0x18)
-#define PL080_EN_CHAN (0x1c)
-#define PL080_SOFT_BREQ (0x20)
-#define PL080_SOFT_SREQ (0x24)
-#define PL080_SOFT_LBREQ (0x28)
-#define PL080_SOFT_LSREQ (0x2C)
-
-#define PL080_CONFIG (0x30)
-#define PL080_CONFIG_M2_BE (1 << 2)
-#define PL080_CONFIG_M1_BE (1 << 1)
-#define PL080_CONFIG_ENABLE (1 << 0)
-
-#define PL080_SYNC (0x34)
-
-/* Per channel configuration registers */
-
-#define PL080_Cx_STRIDE (0x20)
-#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
-#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
-#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
-#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
-
-#define PL080_CH_SRC_ADDR (0x00)
-#define PL080_CH_DST_ADDR (0x04)
-#define PL080_CH_LLI (0x08)
-#define PL080_CH_CONTROL (0x0C)
-#define PL080_CH_CONFIG (0x10)
-#define PL080S_CH_CONTROL2 (0x10)
-#define PL080S_CH_CONFIG (0x14)
-
-#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
-#define PL080_LLI_ADDR_SHIFT (2)
-#define PL080_LLI_LM_AHB2 (1 << 0)
-
-#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
-#define PL080_CONTROL_PROT_MASK (0x7 << 28)
-#define PL080_CONTROL_PROT_SHIFT (28)
-#define PL080_CONTROL_PROT_CACHE (1 << 30)
-#define PL080_CONTROL_PROT_BUFF (1 << 29)
-#define PL080_CONTROL_PROT_SYS (1 << 28)
-#define PL080_CONTROL_DST_INCR (1 << 27)
-#define PL080_CONTROL_SRC_INCR (1 << 26)
-#define PL080_CONTROL_DST_AHB2 (1 << 25)
-#define PL080_CONTROL_SRC_AHB2 (1 << 24)
-#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
-#define PL080_CONTROL_DWIDTH_SHIFT (21)
-#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
-#define PL080_CONTROL_SWIDTH_SHIFT (18)
-#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
-#define PL080_CONTROL_DB_SIZE_SHIFT (15)
-#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
-#define PL080_CONTROL_SB_SIZE_SHIFT (12)
-#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
-#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
-
-#define PL080_BSIZE_1 (0x0)
-#define PL080_BSIZE_4 (0x1)
-#define PL080_BSIZE_8 (0x2)
-#define PL080_BSIZE_16 (0x3)
-#define PL080_BSIZE_32 (0x4)
-#define PL080_BSIZE_64 (0x5)
-#define PL080_BSIZE_128 (0x6)
-#define PL080_BSIZE_256 (0x7)
-
-#define PL080_WIDTH_8BIT (0x0)
-#define PL080_WIDTH_16BIT (0x1)
-#define PL080_WIDTH_32BIT (0x2)
-
-#define PL080N_CONFIG_ITPROT (1 << 20)
-#define PL080N_CONFIG_SECPROT (1 << 19)
-#define PL080_CONFIG_HALT (1 << 18)
-#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
-#define PL080_CONFIG_LOCK (1 << 16)
-#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
-#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
-#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
-#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
-#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
-#define PL080_CONFIG_DST_SEL_SHIFT (6)
-#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
-#define PL080_CONFIG_SRC_SEL_SHIFT (1)
-#define PL080_CONFIG_ENABLE (1 << 0)
-
-#define PL080_FLOW_MEM2MEM (0x0)
-#define PL080_FLOW_MEM2PER (0x1)
-#define PL080_FLOW_PER2MEM (0x2)
-#define PL080_FLOW_SRC2DST (0x3)
-#define PL080_FLOW_SRC2DST_DST (0x4)
-#define PL080_FLOW_MEM2PER_PER (0x5)
-#define PL080_FLOW_PER2MEM_PER (0x6)
-#define PL080_FLOW_SRC2DST_SRC (0x7)
-
-/* DMA linked list chain structure */
-
-struct pl080_lli {
- u32 src_addr;
- u32 dst_addr;
- u32 next_lli;
- u32 control0;
-};
-
-struct pl080s_lli {
- u32 src_addr;
- u32 dst_addr;
- u32 next_lli;
- u32 control0;
- u32 control1;
-};
-
-#endif /* ASM_PL080_H */
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
deleted file mode 100644
index 6636430dd0e..00000000000
--- a/arch/arm/include/asm/hardware/sp810.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/sp810.h
- *
- * ARM PrimeXsys System Controller SP810 header file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARM_SP810_H
-#define __ASM_ARM_SP810_H
-
-#include <linux/io.h>
-
-/* sysctl registers offset */
-#define SCCTRL 0x000
-#define SCSYSSTAT 0x004
-#define SCIMCTRL 0x008
-#define SCIMSTAT 0x00C
-#define SCXTALCTRL 0x010
-#define SCPLLCTRL 0x014
-#define SCPLLFCTRL 0x018
-#define SCPERCTRL0 0x01C
-#define SCPERCTRL1 0x020
-#define SCPEREN 0x024
-#define SCPERDIS 0x028
-#define SCPERCLKEN 0x02C
-#define SCPERSTAT 0x030
-#define SCSYSID0 0xEE0
-#define SCSYSID1 0xEE4
-#define SCSYSID2 0xEE8
-#define SCSYSID3 0xEEC
-#define SCITCR 0xF00
-#define SCITIR0 0xF04
-#define SCITIR1 0xF08
-#define SCITOR 0xF0C
-#define SCCNTCTRL 0xF10
-#define SCCNTDATA 0xF14
-#define SCCNTSTEP 0xF18
-#define SCPERIPHID0 0xFE0
-#define SCPERIPHID1 0xFE4
-#define SCPERIPHID2 0xFE8
-#define SCPERIPHID3 0xFEC
-#define SCPCELLID0 0xFF0
-#define SCPCELLID1 0xFF4
-#define SCPCELLID2 0xFF8
-#define SCPCELLID3 0xFFC
-
-#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2))
-
-static inline void sysctl_soft_reset(void __iomem *base)
-{
- /* switch to slow mode */
- writel(0x2, base + SCCTRL);
-
- /* writing any value to SCSYSSTAT reg will reset system */
- writel(0, base + SCSYSSTAT);
-}
-
-#endif /* __ASM_ARM_SP810_H */
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h
index 2dd9d3f83f2..bb28af7c32d 100644
--- a/arch/arm/include/asm/hardware/timer-sp.h
+++ b/arch/arm/include/asm/hardware/timer-sp.h
@@ -1,15 +1,23 @@
+struct clk;
+
void __sp804_clocksource_and_sched_clock_init(void __iomem *,
- const char *, int);
+ const char *, struct clk *, int);
+void __sp804_clockevents_init(void __iomem *, unsigned int,
+ struct clk *, const char *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)
{
- __sp804_clocksource_and_sched_clock_init(base, name, 0);
+ __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
}
static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name)
{
- __sp804_clocksource_and_sched_clock_init(base, name, 1);
+ __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
}
-void sp804_clockevents_init(void __iomem *, unsigned int, const char *);
+static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
+{
+ __sp804_clockevents_init(base, irq, NULL, name);
+
+}
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
deleted file mode 100644
index 2bebad36fc8..00000000000
--- a/arch/arm/include/asm/hardware/vic.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/vic.h
- *
- * Copyright (c) ARM Limited 2003. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_HARDWARE_VIC_H
-#define __ASM_ARM_HARDWARE_VIC_H
-
-#define VIC_IRQ_STATUS 0x00
-#define VIC_FIQ_STATUS 0x04
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-#define VIC_INT_SOFT 0x18
-#define VIC_INT_SOFT_CLEAR 0x1c
-#define VIC_PROTECT 0x20
-#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
-#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
-
-#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
-#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
-#define VIC_ITCR 0x300 /* VIC test control register */
-
-#define VIC_VECT_CNTL_ENABLE (1 << 5)
-
-#define VIC_PL192_VECT_ADDR 0xF00
-
-#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int irq_start, u32 vic_sources,
- u32 resume_sources, struct device_node *node);
-void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_of_init(struct device_node *node, struct device_node *parent);
-void vic_handle_irq(struct pt_regs *regs);
-
-#endif /* __ASSEMBLY__ */
-#endif
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 8c5e828f484..535579511ed 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -18,6 +18,7 @@
} while (0)
extern pte_t *pkmap_page_table;
+extern pte_t *fixmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
@@ -41,6 +42,13 @@ extern void kunmap_high(struct page *page);
#endif
#endif
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 00000000000..d4014fbe5ea
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+
+/*
+ * If our huge pte is non-zero then mark the valid bit.
+ * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * ptes.
+ * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
+ */
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ pte_t retval = *ptep;
+ if (pte_val(retval))
+ pte_val(retval) |= L_PTE_VALID;
+ return retval;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 00000000000..1f1b1cd112f
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+#include <asm/hugetlb-3level.h>
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f..8e427c7b442 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
+#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
@@ -85,6 +86,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_HDBGEN (1 << 14)
#define ARM_DSCR_MDBGEN (1 << 15)
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0 (1 << 0)
+
/* opcode2 numbers for the co-processor instructions. */
#define ARM_OP2_BVR 4
#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1..6e183fd269f 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -9,6 +9,7 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-extern unsigned int elf_hwcap;
+#define ELF_HWCAP2 (elf_hwcap2)
+extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 652b56086de..3d23418cbdd 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -36,6 +38,12 @@
#define isa_bus_to_virt phys_to_virt
/*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
+/*
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
@@ -130,16 +138,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
-extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
+extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
-extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
extern void (*arch_iounmap)(volatile void __iomem *);
@@ -171,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
/* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000
+#if defined(CONFIG_PCI)
+void pci_ioremap_set_mem_type(int mem_type);
+#else
+static inline void pci_ioremap_set_mem_type(int mem_type) {}
+#endif
+
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/*
@@ -327,7 +341,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap __arm_iounmap
@@ -372,6 +386,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 35c21c375d8..53c15dec7af 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#endif
+
#endif
#endif
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 1e6cca55c75..3b763d6652a 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -8,6 +8,16 @@
/*
* CPU interrupt mask handling.
*/
+#ifdef CONFIG_CPU_V7M
+#define IRQMASK_REG_NAME_R "primask"
+#define IRQMASK_REG_NAME_W "primask"
+#define IRQMASK_I_BIT 1
+#else
+#define IRQMASK_REG_NAME_R "cpsr"
+#define IRQMASK_REG_NAME_W "cpsr_c"
+#define IRQMASK_I_BIT PSR_I_BIT
+#endif
+
#if __LINUX_ARM_ARCH__ >= 6
static inline unsigned long arch_local_irq_save(void)
@@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)
unsigned long flags;
asm volatile(
- " mrs %0, cpsr @ arch_local_irq_save\n"
+ " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n"
" cpsid i"
: "=r" (flags) : : "memory", "cc");
return flags;
@@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile(
- " mrs %0, cpsr @ local_save_flags"
+ " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags"
: "=r" (flags) : : "memory", "cc");
return flags;
}
@@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(
- " msr cpsr_c, %0 @ local_irq_restore"
+ " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore"
:
: "r" (flags)
: "memory", "cc");
@@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return flags & PSR_I_BIT;
+ return flags & IRQMASK_I_BIT;
}
-#endif
-#endif
+#endif /* ifdef __KERNEL__ */
+#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c7591..70f9b9bfb1f 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -16,7 +15,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea3..0a9d5dd9329 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index f82ec22eeb1..49fa0dfaad3 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -18,7 +18,7 @@
#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/percpu.h>
+#include <linux/notifier.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
@@ -28,21 +28,10 @@
#define kretprobe_blacklist_size 0
typedef u32 kprobe_opcode_t;
-
struct kprobe;
-typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
-typedef unsigned long (kprobe_check_cc)(unsigned long);
-typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
-typedef void (kprobe_insn_fn_t)(void);
+#include <asm/probes.h>
-/* Architecture specific copy of original instruction. */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
- kprobe_insn_handler_t *insn_handler;
- kprobe_check_cc *insn_check_cc;
- kprobe_insn_singlestep_t *insn_singlestep;
- kprobe_insn_fn_t *insn_fn;
-};
+#define arch_specific_insn arch_probes_insn
struct prev_kprobe {
struct kprobe *kp;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 00000000000..816db0bf2dd
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ARM_H__
+#define __ARM_KVM_ARM_H__
+
+#include <linux/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE (1 << 27)
+#define HCR_TVM (1 << 26)
+#define HCR_TTLB (1 << 25)
+#define HCR_TPU (1 << 24)
+#define HCR_TPC (1 << 23)
+#define HCR_TSW (1 << 22)
+#define HCR_TAC (1 << 21)
+#define HCR_TIDCP (1 << 20)
+#define HCR_TSC (1 << 19)
+#define HCR_TID3 (1 << 18)
+#define HCR_TID2 (1 << 17)
+#define HCR_TID1 (1 << 16)
+#define HCR_TID0 (1 << 15)
+#define HCR_TWE (1 << 14)
+#define HCR_TWI (1 << 13)
+#define HCR_DC (1 << 12)
+#define HCR_BSU (3 << 10)
+#define HCR_BSU_IS (1 << 10)
+#define HCR_FB (1 << 9)
+#define HCR_VA (1 << 8)
+#define HCR_VI (1 << 7)
+#define HCR_VF (1 << 6)
+#define HCR_AMO (1 << 5)
+#define HCR_IMO (1 << 4)
+#define HCR_FMO (1 << 3)
+#define HCR_PTW (1 << 2)
+#define HCR_SWIO (1 << 1)
+#define HCR_VM 1
+
+/*
+ * The bits we set in HCR:
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
+ * TVM: Trap VM ops (until MMU and caches are on)
+ * TSW: Trap cache operations by set/way
+ * TWI: Trap WFI
+ * TWE: Trap WFE
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+ * FB: Force broadcast of all maintainance operations
+ * AMO: Override CPSR.A and enable signaling with VA
+ * IMO: Override CPSR.I and enable signaling with VI
+ * FMO: Override CPSR.F and enable signaling with VF
+ * SWIO: Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+ HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
+
+/* System Control Register (SCTLR) bits */
+#define SCTLR_TE (1 << 30)
+#define SCTLR_EE (1 << 25)
+#define SCTLR_V (1 << 13)
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE (1 << 30)
+#define HSCTLR_EE (1 << 25)
+#define HSCTLR_FI (1 << 21)
+#define HSCTLR_WXN (1 << 19)
+#define HSCTLR_I (1 << 12)
+#define HSCTLR_C (1 << 2)
+#define HSCTLR_A (1 << 1)
+#define HSCTLR_M 1
+#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+ HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1 (3 << 28)
+#define TTBCR_ORGN1 (3 << 26)
+#define TTBCR_IRGN1 (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ (7 << 16)
+#define TTBCR_SH0 (3 << 12)
+#define TTBCR_ORGN0 (3 << 10)
+#define TTBCR_IRGN0 (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ (7 << 0)
+#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+/* Hyp System Trap Register */
+#define HSTR_T(x) (1 << x)
+#define HSTR_TTEE (1 << 16)
+#define HSTR_TJDBX (1 << 17)
+
+/* Hyp Coprocessor Trap Register */
+#define HCPTR_TCP(x) (1 << x)
+#define HCPTR_TCP_MASK (0x3fff)
+#define HCPTR_TASE (1 << 15)
+#define HCPTR_TTA (1 << 20)
+#define HCPTR_TCPAC (1 << 31)
+
+/* Hyp Debug Configuration Register bits */
+#define HDCR_TDRA (1 << 11)
+#define HDCR_TDOSA (1 << 10)
+#define HDCR_TDA (1 << 9)
+#define HDCR_TDE (1 << 8)
+#define HDCR_HPME (1 << 7)
+#define HDCR_TPM (1 << 6)
+#define HDCR_TPMCR (1 << 5)
+#define HDCR_HPMN_MASK (0x1F)
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
+ * space.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
+#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0 (3 << 12)
+#define VTCR_ORGN0 (3 << 10)
+#define VTCR_IRGN0 (3 << 8)
+#define VTCR_SL0 (3 << 6)
+#define VTCR_S (1 << 4)
+#define VTCR_T0SZ (0xf)
+#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
+ VTCR_S | VTCR_T0SZ)
+#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
+#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
+#define KVM_VTCR_SL0 VTCR_SL_L1
+/* stage-2 input address range defined as 2^(32-T0SZ) */
+#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
+#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
+#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
+
+/* Virtualization Translation Table Base Register (VTTBR) bits */
+#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
+#define VTTBR_X (14 - KVM_T0SZ)
+#else
+#define VTTBR_X (5 - KVM_T0SZ)
+#endif
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (48LLU)
+#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp Syndrome Register (HSR) bits */
+#define HSR_EC_SHIFT (26)
+#define HSR_EC (0x3fU << HSR_EC_SHIFT)
+#define HSR_IL (1U << 25)
+#define HSR_ISS (HSR_IL - 1)
+#define HSR_ISV_SHIFT (24)
+#define HSR_ISV (1U << HSR_ISV_SHIFT)
+#define HSR_SRT_SHIFT (16)
+#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
+#define HSR_FSC (0x3f)
+#define HSR_FSC_TYPE (0x3c)
+#define HSR_SSE (1 << 21)
+#define HSR_WNR (1 << 6)
+#define HSR_CV_SHIFT (24)
+#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_COND_SHIFT (20)
+#define HSR_COND (0xfU << HSR_COND_SHIFT)
+
+#define FSC_FAULT (0x04)
+#define FSC_PERM (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK (~0xf)
+
+#define HSR_EC_UNKNOWN (0x00)
+#define HSR_EC_WFI (0x01)
+#define HSR_EC_CP15_32 (0x03)
+#define HSR_EC_CP15_64 (0x04)
+#define HSR_EC_CP14_MR (0x05)
+#define HSR_EC_CP14_LS (0x06)
+#define HSR_EC_CP_0_13 (0x07)
+#define HSR_EC_CP10_ID (0x08)
+#define HSR_EC_JAZELLE (0x09)
+#define HSR_EC_BXJ (0x0A)
+#define HSR_EC_CP14_64 (0x0C)
+#define HSR_EC_SVC_HYP (0x11)
+#define HSR_EC_HVC (0x12)
+#define HSR_EC_SMC (0x13)
+#define HSR_EC_IABT (0x20)
+#define HSR_EC_IABT_HYP (0x21)
+#define HSR_EC_DABT (0x24)
+#define HSR_EC_DABT_HYP (0x25)
+
+#define HSR_WFI_IS_WFE (1U << 0)
+
+#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+
+#define HSR_DABT_S1PTW (1U << 7)
+#define HSR_DABT_CM (1U << 8)
+#define HSR_DABT_EA (1U << 9)
+
+#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 00000000000..53b3c4a50d5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+/* 0 is reserved as an invalid value. */
+#define c0_MPIDR 1 /* MultiProcessor ID Register */
+#define c0_CSSELR 2 /* Cache Size Selection Register */
+#define c1_SCTLR 3 /* System Control Register */
+#define c1_ACTLR 4 /* Auxilliary Control Register */
+#define c1_CPACR 5 /* Coprocessor Access Control */
+#define c2_TTBR0 6 /* Translation Table Base Register 0 */
+#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
+#define c2_TTBR1 8 /* Translation Table Base Register 1 */
+#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
+#define c2_TTBCR 10 /* Translation Table Base Control R. */
+#define c3_DACR 11 /* Domain Access Control Register */
+#define c5_DFSR 12 /* Data Fault Status Register */
+#define c5_IFSR 13 /* Instruction Fault Status Register */
+#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
+#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
+#define c6_DFAR 16 /* Data Fault Address Register */
+#define c6_IFAR 17 /* Instruction Fault Address Register */
+#define c7_PAR 18 /* Physical Address Register */
+#define c7_PAR_high 19 /* PAR top 32 bits */
+#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
+#define c10_PRRR 21 /* Primary Region Remap Register */
+#define c10_NMRR 22 /* Normal Memory Remap Register */
+#define c12_VBAR 23 /* Vector Base Address Register */
+#define c13_CID 24 /* Context ID Register */
+#define c13_TID_URW 25 /* Thread ID, User R/W */
+#define c13_TID_URO 26 /* Thread ID, User R/O */
+#define c13_TID_PRIV 27 /* Thread ID, Privileged */
+#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
+#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
+#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
+#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
+
+#define ARM_EXCEPTION_RESET 0
+#define ARM_EXCEPTION_UNDEFINED 1
+#define ARM_EXCEPTION_SOFTWARE 2
+#define ARM_EXCEPTION_PREF_ABORT 3
+#define ARM_EXCEPTION_DATA_ABORT 4
+#define ARM_EXCEPTION_IRQ 5
+#define ARM_EXCEPTION_FIQ 6
+#define ARM_EXCEPTION_HVC 7
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_exit[];
+extern char __kvm_hyp_exit_end[];
+
+extern char __kvm_hyp_vector[];
+
+extern char __kvm_hyp_code_start[];
+extern char __kvm_hyp_code_end[];
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644
index 00000000000..4917c2f7e45
--- /dev/null
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_H__
+#define __ARM_KVM_COPROC_H__
+#include <linux/kvm_host.h>
+
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_coproc_target_table {
+ unsigned target;
+ const struct coproc_reg *table;
+ size_t num;
+};
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+void kvm_coproc_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 00000000000..0fa90c962ac
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_EMULATE_H__
+#define __ARM_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_arm.h>
+
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
+
+bool kvm_condition_valid(struct kvm_vcpu *vcpu);
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->arch.regs.usr_regs.ARM_pc;
+}
+
+static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->arch.regs.usr_regs.ARM_cpsr;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+}
+
+static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
+}
+
+static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return cpsr_mode > USR_MODE;;
+}
+
+static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hsr;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hxfar;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
+{
+ return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
+}
+
+static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hyp_pc;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+}
+
+static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+}
+
+/* Get Access Size from a data abort */
+static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+{
+ switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ default:
+ kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+ return -EFAULT;
+ }
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+}
+
+static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+}
+
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cp15[c0_MPIDR];
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+ return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return be16_to_cpu(data & 0xffff);
+ default:
+ return be32_to_cpu(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_be16(data & 0xffff);
+ default:
+ return cpu_to_be32(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 00000000000..193ceaf01bf
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_HOST_H__
+#define __ARM_KVM_HOST_H__
+
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/fpstate.h>
+#include <kvm/arm_arch_timer.h>
+
+#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#else
+#define KVM_MAX_VCPUS 0
+#endif
+
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HAVE_ONE_REG
+
+#define KVM_VCPU_MAX_FEATURES 2
+
+#include <kvm/arm_vgic.h>
+
+struct kvm_vcpu;
+u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+int kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_arch {
+ /* VTTBR value associated with below pgd and vmid */
+ u64 vttbr;
+
+ /* Timer */
+ struct arch_timer_kvm timer;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+
+ /* The VMID generation used for the virt. memory system */
+ u64 vmid_gen;
+ u32 vmid;
+
+ /* Stage-2 page table */
+ pgd_t *pgd;
+
+ /* Interrupt controller */
+ struct vgic_dist vgic;
+};
+
+#define KVM_NR_MEM_OBJS 40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_fault_info {
+ u32 hsr; /* Hyp Syndrome Register */
+ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
+ u32 hpfar; /* Hyp IPA Fault Address Register */
+ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
+};
+
+typedef struct vfp_hard_struct kvm_cpu_context_t;
+
+struct kvm_vcpu_arch {
+ struct kvm_regs regs;
+
+ int target; /* Processor target */
+ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+ /* System control coprocessor (cp15) */
+ u32 cp15[NR_CP15_REGS];
+
+ /* The CPU type we expose to the VM */
+ u32 midr;
+
+ /* HYP trapping configuration */
+ u32 hcr;
+
+ /* Interrupt related fields */
+ u32 irq_lines; /* IRQ and FIQ levels */
+
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
+ /* Floating point registers (VFP and Advanced SIMD/NEON) */
+ struct vfp_hard_struct vfp_guest;
+
+ /* Host FP context */
+ kvm_cpu_context_t *host_cpu_context;
+
+ /* VGIC state */
+ struct vgic_cpu vgic_cpu;
+ struct arch_timer_cpu timer_cpu;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+ /* dcache set/way operation pending */
+ int last_pcpu;
+ cpumask_t require_dcache_flush;
+
+ /* Don't run the guest on this vcpu */
+ bool pause;
+
+ /* IO related fields */
+ struct kvm_decode mmio_decode;
+
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* Detect first run of a vcpu */
+ bool has_run_once;
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 halt_wakeup;
+};
+
+struct kvm_vcpu_init;
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+struct kvm_one_reg;
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm;
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+struct kvm_one_reg;
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
+
+static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+{
+ /*
+ * Call initialization code, and switch to the full blown HYP
+ * code. The init code doesn't need to preserve these
+ * registers as r0-r3 are already callee saved according to
+ * the AAPCS.
+ * Note that we slightly misuse the prototype by casing the
+ * stack pointer to a void *.
+ *
+ * We don't have enough registers to perform the full init in
+ * one go. Install the boot PGD first, and then install the
+ * runtime PGD, stack pointer and vectors. The PGDs are always
+ * passed as the third argument, in order to be passed into
+ * r2-r3 to the init code (yes, this is compliant with the
+ * PCS!).
+ */
+
+ kvm_call_hyp(NULL, 0, boot_pgd_ptr);
+
+ kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
+}
+
+static inline int kvm_arch_dev_ioctl_check_extension(long ext)
+{
+ return 0;
+}
+
+int kvm_perf_init(void);
+int kvm_perf_teardown(void);
+
+u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
+int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+
+#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 00000000000..adcc0d7d317
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMIO_H__
+#define __ARM_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+ phys_addr_t phys_addr;
+ u8 data[8];
+ u32 len;
+ bool is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ run->mmio.phys_addr = mmio->phys_addr;
+ run->mmio.len = mmio->len;
+ run->mmio.is_write = mmio->is_write;
+ memcpy(run->mmio.data, mmio->data, mmio->len);
+ run->exit_reason = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa);
+
+#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 00000000000..5c7aa3c1519
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+#include <asm/memory.h>
+#include <asm/page.h>
+
+/*
+ * We directly use the kernel VA for the HYP, as we can directly share
+ * the mapping (HTTBR "covers" TTBR1).
+ */
+#define HYP_PAGE_OFFSET_MASK UL(~0)
+#define HYP_PAGE_OFFSET PAGE_OFFSET
+#define KERN_TO_HYP(kva) (kva)
+
+/*
+ * Our virtual mapping for the boot-time MMU-enable code. Must be
+ * shared across all the page-tables. Conveniently, we use the vectors
+ * page, where no kernel data will ever be shared with HYP.
+ */
+#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_boot_hyp_pgd(void);
+void free_hyp_pgds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+phys_addr_t kvm_mmu_get_boot_httbr(void);
+phys_addr_t kvm_get_idmap_vector(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+ *pmd = new_pmd;
+ flush_pmd_entry(pmd);
+}
+
+static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+ *pte = new_pte;
+ /*
+ * flush_pmd_entry just takes a void pointer and cleans the necessary
+ * cache entries, so we can reuse the function for ptes.
+ */
+ flush_pmd_entry(pte);
+}
+
+static inline bool kvm_is_write_fault(unsigned long hsr)
+{
+ unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+ if (hsr_ec == HSR_EC_IABT)
+ return false;
+ else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+ return false;
+ else
+ return true;
+}
+
+static inline void kvm_clean_pgd(pgd_t *pgd)
+{
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+}
+
+static inline void kvm_clean_pmd_entry(pmd_t *pmd)
+{
+ clean_pmd_entry(pmd);
+}
+
+static inline void kvm_clean_pte(pte_t *pte)
+{
+ clean_pte_table(pte);
+}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+ pte_val(*pte) |= L_PTE_S2_RDWR;
+}
+
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#define kvm_pud_addr_end(addr,end) (end)
+
+#define kvm_pmd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+struct kvm;
+
+#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+ unsigned long size)
+{
+ if (!vcpu_has_cache_enabled(vcpu))
+ kvm_flush_dcache_to_poc((void *)hva, size);
+
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+ * (or another VM) may have used the same page as this guest, and we
+ * read incorrect data from the icache. If we're using a PIPT cache,
+ * we can invalidate just that page, but if we are using a VIPT cache
+ * we need to invalidate the entire icache - damn shame - as written
+ * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+ *
+ * VIVT caches are tagged using both the ASID and the VMID and doesn't
+ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ */
+ if (icache_is_pipt()) {
+ __cpuc_coherent_user_range(hva, hva + size);
+ } else if (!icache_is_vivt_asid_tagged()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+}
+
+#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
+
+void stage2_flush_vm(struct kvm *kvm);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644
index 00000000000..6bda945d31f
--- /dev/null
+++ b/arch/arm/include/asm/kvm_psci.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_PSCI_H__
+#define __ARM_KVM_PSCI_H__
+
+#define KVM_ARM_PSCI_0_1 1
+#define KVM_ARM_PSCI_0_2 2
+
+int kvm_psci_version(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
deleted file mode 100644
index f77ffc1eb0c..00000000000
--- a/arch/arm/include/asm/localtimer.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * arch/arm/include/asm/localtimer.h
- *
- * Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_LOCALTIMER_H
-#define __ASM_ARM_LOCALTIMER_H
-
-#include <linux/errno.h>
-
-struct clock_event_device;
-
-struct local_timer_ops {
- int (*setup)(struct clock_event_device *);
- void (*stop)(struct clock_event_device *);
-};
-
-#ifdef CONFIG_LOCAL_TIMERS
-/*
- * Register a local timer driver
- */
-int local_timer_register(struct local_timer_ops *);
-#else
-static inline int local_timer_register(struct local_timer_ops *ops)
-{
- return -ENXIO;
-}
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 917d4fcfd9b..0406cb3f1af 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -8,17 +8,20 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
#ifndef __ASSEMBLY__
+#include <linux/reboot.h>
struct tag;
-struct meminfo;
-struct sys_timer;
struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
#define smp_ops(ops) (&(ops))
+#define smp_init_ops(ops) (&(ops))
#else
#define smp_ops(ops) (struct smp_operations *)NULL
+#define smp_init_ops(ops) (bool (*)(void))NULL
#endif
struct machine_desc {
@@ -31,7 +34,7 @@ struct machine_desc {
unsigned int nr_irqs; /* number of IRQs */
#ifdef CONFIG_ZONE_DMA
- unsigned long dma_zone_size; /* size of DMA-able area */
+ phys_addr_t dma_zone_size; /* size of DMA-able area */
#endif
unsigned int video_start; /* start of video RAM */
@@ -40,32 +43,37 @@ struct machine_desc {
unsigned char reserve_lp0 :1; /* never has lp0 */
unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */
- char restart_mode; /* default restart mode */
+ enum reboot_mode reboot_mode; /* default restart mode */
+ unsigned l2c_aux_val; /* L2 cache aux value */
+ unsigned l2c_aux_mask; /* L2 cache aux mask */
+ void (*l2c_write_sec)(unsigned long, unsigned);
struct smp_operations *smp; /* SMP operations */
- void (*fixup)(struct tag *, char **,
- struct meminfo *);
+ bool (*smp_init)(void);
+ void (*fixup)(struct tag *, char **);
+ void (*dt_fixup)(void);
+ void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
void (*init_irq)(void);
- struct sys_timer *timer; /* system tick timer */
+ void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
- void (*restart)(char, const char *);
+ void (*restart)(enum reboot_mode, const char *);
};
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define for_each_machine_desc(p) \
for (p = __arch_info_begin; p < __arch_info_end; p++)
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 15cb035309f..2092ee1e130 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -20,10 +20,6 @@ struct seq_file;
extern void init_FIQ(int);
extern int show_fiq_list(struct seq_file *, int);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-#endif
-
/*
* This is for easy migration, but should be changed in the source
*/
@@ -34,35 +30,4 @@ do { \
raw_spin_unlock(&desc->lock); \
} while(0)
-#ifndef __ASSEMBLY__
-/*
- * Entry/exit functions for chained handlers where the primary IRQ chip
- * may implement either fasteoi or level-trigger flow control.
- */
-static inline void chained_irq_enter(struct irq_chip *chip,
- struct irq_desc *desc)
-{
- /* FastEOI controllers require no action on entry. */
- if (chip->irq_eoi)
- return;
-
- if (chip->irq_mask_ack) {
- chip->irq_mask_ack(&desc->irq_data);
- } else {
- chip->irq_mask(&desc->irq_data);
- if (chip->irq_ack)
- chip->irq_ack(&desc->irq_data);
- }
-}
-
-static inline void chained_irq_exit(struct irq_chip *chip,
- struct irq_desc *desc)
-{
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
- else
- chip->irq_unmask(&desc->irq_data);
-}
-#endif
-
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 2fe141fcc8d..f98c7f32c9c 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -22,18 +22,21 @@ struct map_desc {
};
/* types 0-3 are defined in asm/io.h */
-#define MT_UNCACHED 4
-#define MT_CACHECLEAN 5
-#define MT_MINICLEAN 6
-#define MT_LOW_VECTORS 7
-#define MT_HIGH_VECTORS 8
-#define MT_MEMORY 9
-#define MT_ROM 10
-#define MT_MEMORY_NONCACHED 11
-#define MT_MEMORY_DTCM 12
-#define MT_MEMORY_ITCM 13
-#define MT_MEMORY_SO 14
-#define MT_MEMORY_DMA_READY 15
+enum {
+ MT_UNCACHED = 4,
+ MT_CACHECLEAN,
+ MT_MINICLEAN,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+ MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+ MT_ROM,
+ MT_MEMORY_RWX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+ MT_MEMORY_RWX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+};
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2..7fc42784bec 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -16,6 +16,7 @@
struct pci_sys_data;
struct pci_ops;
struct pci_bus;
+struct device;
struct hw_pci {
#ifdef CONFIG_PCI_DOMAINS
@@ -23,12 +24,20 @@ struct hw_pci {
#endif
struct pci_ops *ops;
int nr_controllers;
+ void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
void (*preinit)(void);
void (*postinit)(void);
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+ resource_size_t (*align_resource)(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
+ void (*add_bus)(struct pci_bus *bus);
+ void (*remove_bus)(struct pci_bus *bus);
};
/*
@@ -50,13 +59,30 @@ struct pci_sys_data {
u8 (*swizzle)(struct pci_dev *, u8 *);
/* IRQ mapping */
int (*map_irq)(const struct pci_dev *, u8, u8);
+ /* Resource alignement requirements */
+ resource_size_t (*align_resource)(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
+ void (*add_bus)(struct pci_bus *bus);
+ void (*remove_bus)(struct pci_bus *bus);
void *private_data; /* platform controller private data */
};
/*
* Call this with your hw_pci struct to initialise the PCI system.
*/
-void pci_common_init(struct hw_pci *);
+void pci_common_init_dev(struct device *, struct hw_pci *);
+
+/*
+ * Compatibility wrapper for older platforms that do not care about
+ * passing the parent device.
+ */
+static inline void pci_common_init(struct hw_pci *hw)
+{
+ pci_common_init_dev(NULL, hw);
+}
/*
* Setup early fixed I/O mapping.
@@ -80,13 +106,4 @@ extern int dc21285_setup(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
-extern struct pci_ops via82c505_ops;
-extern int via82c505_setup(int nr, struct pci_sys_data *);
-extern void via82c505_init(void *sysdata);
-
-extern struct pci_ops pci_v3_ops;
-extern int pci_v3_setup(int nr, struct pci_sys_data *);
-extern void pci_v3_preinit(void);
-extern void pci_v3_postinit(void);
-
#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 6ca945f534a..90c12e1e695 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -10,36 +10,6 @@
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-/*
- * This is our kernel timer structure.
- *
- * - init
- * Initialise the kernels jiffy timer source, claim interrupt
- * using setup_irq. This is called early on during initialisation
- * while interrupts are still disabled on the local CPU.
- * - suspend
- * Suspend the kernel jiffy timer source, if necessary. This
- * is called with interrupts disabled, after all normal devices
- * have been suspended. If no action is required, set this to
- * NULL.
- * - resume
- * Resume the kernel jiffy timer source, if necessary. This
- * is called with interrupts disabled before any normal devices
- * are resumed. If no action is required, set this to NULL.
- * - offset
- * Return the timer offset in microseconds since the last timer
- * interrupt. Note: this must take account of any unprocessed
- * timer interrupt which may be pending.
- */
-struct sys_timer {
- void (*init)(void);
- void (*suspend)(void);
- void (*resume)(void);
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- unsigned long (*offset)(void);
-#endif
-};
-
extern void timer_tick(void);
struct timespec;
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 00000000000..94060adba17
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,259 @@
+/*
+ * arch/arm/include/asm/mcpm.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MCPM_H
+#define MCPM_H
+
+/*
+ * Maximum number of possible clusters / CPUs per cluster.
+ *
+ * This should be sufficient for quite a while, while keeping the
+ * (assembly) code simpler. When this starts to grow then we'll have
+ * to consider dynamic allocation.
+ */
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_NR_CLUSTERS 2
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Platform specific code should use this symbol to set up secondary
+ * entry location for processors to use when released from reset.
+ */
+extern void mcpm_entry_point(void);
+
+/*
+ * This is used to indicate where the given CPU from given cluster should
+ * branch once it is ready to re-enter the kernel using ptr, or NULL if it
+ * should be gated. A gated CPU is held in a WFE loop until its vector
+ * becomes non NULL.
+ */
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+
+/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
+ * CPU/cluster power operations API for higher subsystems to use.
+ */
+
+/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
+/**
+ * mcpm_cpu_power_up - make given CPU in given cluster runable
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * The identified CPU is brought out of reset. If the cluster was powered
+ * down then it is brought up as well, taking care not to let the other CPUs
+ * in the cluster run, and ensuring appropriate cluster setup.
+ *
+ * Caller must ensure the appropriate entry vector is initialized with
+ * mcpm_set_entry_vector() prior to calling this.
+ *
+ * This must be called in a sleepable context. However, the implementation
+ * is strongly encouraged to return early and let the operation happen
+ * asynchronously, especially when significant delays are expected.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_power_down - power the calling CPU down
+ *
+ * The calling CPU is powered down.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster is prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
+ */
+void mcpm_cpu_power_down(void);
+
+/**
+ * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
+ * make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ *
+ * @expected_residency: duration in microseconds the CPU is expected
+ * to remain suspended, or 0 if unknown/infinity.
+ *
+ * The calling CPU is suspended. The expected residency argument is used
+ * as a hint by the platform specific backend to implement the appropriate
+ * sleep state level according to the knowledge it has on wake-up latency
+ * for the given hardware.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster may be prepared for power-down too, if the expected
+ * residency makes it worthwhile.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
+ */
+void mcpm_cpu_suspend(u64 expected_residency);
+
+/**
+ * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
+ *
+ * This lets the platform specific backend code perform needed housekeeping
+ * work. This must be called by the newly activated CPU as soon as it is
+ * fully operational in kernel space, before it enables interrupts.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_powered_up(void);
+
+/*
+ * Platform specific methods used in the implementation of the above API.
+ */
+struct mcpm_platform_ops {
+ int (*power_up)(unsigned int cpu, unsigned int cluster);
+ void (*power_down)(void);
+ int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
+ void (*suspend)(u64);
+ void (*powered_up)(void);
+};
+
+/**
+ * mcpm_platform_register - register platform specific power methods
+ *
+ * @ops: mcpm_platform_ops structure to register
+ *
+ * An error is returned if the registration has been done previously.
+ */
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+
+/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+
+/*
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
+bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
+int __mcpm_cluster_state(unsigned int cluster);
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+void __init mcpm_smp_set_ops(void);
+
+#else
+
+/*
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files. Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN 0x11
+#define CPU_COMING_UP 0x12
+#define CPU_UP 0x13
+#define CPU_GOING_DOWN 0x14
+
+#define CLUSTER_DOWN 0x21
+#define CLUSTER_UP 0x22
+#define CLUSTER_GOING_DOWN 0x23
+
+#define INBOUND_NOT_COMING_UP 0x31
+#define INBOUND_COMING_UP 0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+ (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+ (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+ (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
+#endif
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index 00ca5f92648..bf47a6c110a 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,11 +1,9 @@
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
-struct meminfo;
struct machine_desc;
-extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
-
+void arm_memblock_init(const struct machine_desc *);
phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981..2b751464d6f 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,8 @@
#include <linux/types.h>
#include <linux/sizes.h>
+#include <asm/cache.h>
+
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@@ -28,31 +30,32 @@
*/
#define UL(x) _AC(x, UL)
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
-#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
*/
-#define TASK_SIZE_26 UL(0x04000000)
+#define TASK_SIZE_26 (UL(1) << 26)
/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
-#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
-#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif
#if TASK_SIZE > MODULES_VADDR
@@ -80,8 +83,6 @@
*/
#define IOREMAP_MAX_ORDER 24
-#define CONSISTENT_END (0xffe00000UL)
-
#else /* CONFIG_MMU */
/*
@@ -98,23 +99,15 @@
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
@@ -141,24 +134,66 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+/*
+ * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
+ * around in head.S and proc-*.S are shifted by this amount, in order to
+ * leave spare high bits for systems with physical address extension. This
+ * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
+ * gives us about 38-bits or so.
+ */
+#ifdef CONFIG_ARM_LPAE
+#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
+#else
+#define ARCH_PGD_SHIFT 0
+#endif
+#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+
#ifndef __ASSEMBLY__
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ *
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
*/
-#ifndef __virt_to_phys
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#if defined(__virt_to_phys)
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+
+#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_7_0 0x81
-extern unsigned long __pv_phys_offset;
-#define PHYS_OFFSET __pv_phys_offset
+extern unsigned long __pv_phys_pfn_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
+
+#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
+#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
+
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
@@ -169,45 +204,73 @@ extern unsigned long __pv_phys_offset;
: "=r" (to) \
: "r" (from), "I" (type))
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t) \
+ __asm__ volatile("@ __pv_stub_mov\n" \
+ "1: mov %R0, %1\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "=r" (t) \
+ : "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__ volatile("@ __pv_add_carry_stub\n" \
+ "1: adds %Q0, %1, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "+r" (y) \
+ : "r" (x), "I" (__PV_BITS_31_24) \
+ : "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
{
- unsigned long t;
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ phys_addr_t t;
+
+ if (sizeof(phys_addr_t) == 4) {
+ __pv_stub(x, t, "add", __PV_BITS_31_24);
+ } else {
+ __pv_stub_mov_hi(t);
+ __pv_add_carry_stub(x, t);
+ }
return t;
}
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
- __pv_stub(x, t, "sub", __PV_BITS_31_24);
+
+ /*
+ * 'unsigned long' cast discard upper word when
+ * phys_addr_t is 64 bit, and makes sure that inline
+ * assembler expression receives 32 bit argument
+ * in place where 'r' 32 bit operand is expected.
+ */
+ __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
return t;
}
+
#else
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-#endif
-#endif /* __ASSEMBLY__ */
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
-#endif
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
-#ifndef __ASSEMBLY__
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
-/*
- * PFNs are used to describe any physical page; this means
- * PFN 0 == physical address 0.
- *
- * This is the PFN of the first RAM page in the kernel
- * direct-mapped view. We assume this is the first page
- * of RAM in the mem_map as well.
- */
-#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
+
+#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
@@ -222,16 +285,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)(__phys_to_virt((unsigned long)(x)));
+ return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes. Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+ if (arch_virt_to_idmap)
+ return arch_virt_to_idmap(x);
+ else
+ return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
+
/*
* Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
@@ -245,6 +325,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
+#ifdef CONFIG_VIRT_TO_BUS
static inline __deprecated unsigned long virt_to_bus(void *x)
{
return __virt_to_bus((unsigned long)x);
@@ -254,16 +335,11 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
{
return (void *)__bus_to_virt(x);
}
+#endif
/*
* Conversion between a struct page and a physical address.
*
- * Note: when converting an unknown physical address to a
- * struct page, the resulting pointer must be validated
- * using VALID_PAGE(). It must return an invalid struct page
- * for any physical address not corresponding to a system
- * RAM address.
- *
* page_to_pfn(page) convert a struct page * to a PFN number
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
*
@@ -272,8 +348,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+ && pfn_valid(virt_to_pfn(kaddr)))
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3..64fd15159b7 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,18 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
- u64 id;
+ atomic64_t id;
+#else
+ int switch_pending;
#endif
- unsigned int vmalloc_seq;
+ unsigned int vmalloc_seq;
+ unsigned long sigpage;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
@@ -26,7 +29,7 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
- unsigned long end_brk;
+ unsigned long end_brk;
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc..9b32f76bb0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/proc-fns.h>
+#include <asm/smp_plat.h>
#include <asm-generic/mm_hooks.h>
void __check_vmalloc_seq(struct mm_struct *mm);
@@ -25,7 +26,17 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask);
+#else /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
#else /* !CONFIG_CPU_HAS_ASID */
@@ -45,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -54,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
@@ -96,12 +119,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- /* check for possible thread migration */
- if (!cpumask_empty(mm_cpumask(next)) &&
+ /*
+ * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
+ * so check for possible thread migration and invalidate the I-cache
+ * if we're new to this CPU.
+ */
+ if (cache_ops_need_broadcast() &&
+ !cpumask_empty(mm_cpumask(next)) &&
!cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
-#endif
+
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_and_switch_context(next, tsk);
if (cache_is_vivt())
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e..ed690c49ef9 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -12,6 +12,8 @@ enum {
ARM_SEC_CORE,
ARM_SEC_EXIT,
ARM_SEC_DEVEXIT,
+ ARM_SEC_HOT,
+ ARM_SEC_UNLIKELY,
ARM_SEC_MAX,
};
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
new file mode 100644
index 00000000000..c3247cc2fe0
--- /dev/null
+++ b/arch/arm/include/asm/mpu.h
@@ -0,0 +1,76 @@
+#ifndef __ARM_MPU_H
+#define __ARM_MPU_H
+
+#ifdef CONFIG_ARM_MPU
+
+/* MPUIR layout */
+#define MPUIR_nU 1
+#define MPUIR_DREGION 8
+#define MPUIR_IREGION 16
+#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION)
+#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION)
+
+/* ID_MMFR0 data relevant to MPU */
+#define MMFR0_PMSA (0xF << 4)
+#define MMFR0_PMSAv7 (3 << 4)
+
+/* MPU D/I Size Register fields */
+#define MPU_RSR_SZ 1
+#define MPU_RSR_EN 0
+
+/* The D/I RSR value for an enabled region spanning the whole of memory */
+#define MPU_RSR_ALL_MEM 63
+
+/* Individual bits in the DR/IR ACR */
+#define MPU_ACR_XN (1 << 12)
+#define MPU_ACR_SHARED (1 << 2)
+
+/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
+#define MPU_RGN_CACHEABLE 0xB
+#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#define MPU_RGN_STRONGLY_ORDERED 0
+
+/* Main region should only be shared for SMP */
+#ifdef CONFIG_SMP
+#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#else
+#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE
+#endif
+
+/* Access permission bits of ACR (only define those that we use)*/
+#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
+#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
+#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
+
+/* For minimal static MPU region configurations */
+#define MPU_PROBE_REGION 0
+#define MPU_BG_REGION 1
+#define MPU_RAM_REGION 2
+#define MPU_VECTORS_REGION 3
+
+/* Maximum number of regions Linux is interested in */
+#define MPU_MAX_REGIONS 16
+
+#define MPU_DATA_SIDE 0
+#define MPU_INSTR_SIDE 1
+
+#ifndef __ASSEMBLY__
+
+struct mpu_rgn {
+ /* Assume same attributes for d/i-side */
+ u32 drbar;
+ u32 drsr;
+ u32 dracr;
+};
+
+struct mpu_rgn_info {
+ u32 mpuir;
+ struct mpu_rgn rgns[MPU_MAX_REGIONS];
+};
+extern struct mpu_rgn_info mpu_rgn_info;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_ARM_MPU */
+
+#endif
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 00000000000..8f730fe7009
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ * -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+ BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 00000000000..bc3a9174417
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_OPCODES_SEC_H
+#define __ASM_ARM_OPCODES_SEC_H
+
+#include <asm/opcodes.h>
+
+#define __SMC(imm4) __inst_arm_thumb32( \
+ 0xE1600070 | (((imm4) & 0xF) << 0), \
+ 0xF7F08000 | (((imm4) & 0xF) << 16) \
+)
+
+#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a6fb2..e796c598513 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -10,6 +10,7 @@
#define __ASM_ARM_OPCODES_H
#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352..891a56b35bc 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -28,53 +28,84 @@ struct outer_cache_fns {
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
void (*flush_all)(void);
- void (*inv_all)(void);
void (*disable)(void);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
- void (*set_debug)(unsigned long);
void (*resume)(void);
-};
-#ifdef CONFIG_OUTER_CACHE
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+};
extern struct outer_cache_fns outer_cache;
+#ifdef CONFIG_OUTER_CACHE
+/**
+ * outer_inv_range - invalidate range of outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
+
+/**
+ * outer_clean_range - clean dirty outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
+
+/**
+ * outer_flush_range - clean and invalidate outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
+/**
+ * outer_flush_all - clean and invalidate all cache lines in the outer cache
+ *
+ * Note: depending on implementation, this may not be atomic - it must
+ * only be called with interrupts disabled and no other active outer
+ * cache masters.
+ *
+ * It is intended that this function is only used by implementations
+ * needing to override the outer_cache.disable() method due to security.
+ * (Some implementations perform this as a clean followed by an invalidate.)
+ */
static inline void outer_flush_all(void)
{
if (outer_cache.flush_all)
outer_cache.flush_all();
}
-static inline void outer_inv_all(void)
-{
- if (outer_cache.inv_all)
- outer_cache.inv_all();
-}
-
-static inline void outer_disable(void)
-{
- if (outer_cache.disable)
- outer_cache.disable();
-}
+/**
+ * outer_disable - clean, invalidate and disable the outer cache
+ *
+ * Disable the outer cache, ensuring that any data contained in the outer
+ * cache is pushed out to lower levels of system memory. The note and
+ * conditions above concerning outer_flush_all() applies here.
+ */
+extern void outer_disable(void);
+/**
+ * outer_resume - restore the cache configuration and re-enable outer cache
+ *
+ * Restore any configuration that the cache had when previously enabled,
+ * and re-enable the outer cache.
+ */
static inline void outer_resume(void)
{
if (outer_cache.resume)
@@ -90,12 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{ }
static inline void outer_flush_all(void) { }
-static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { }
+static inline void outer_resume(void) { }
#endif
#ifdef CONFIG_OUTER_CACHE_SYNC
+/**
+ * outer_sync - perform a sync point for outer cache
+ *
+ * Ensure that all outer cache operations are complete and any store
+ * buffers are drained.
+ */
static inline void outer_sync(void)
{
if (outer_cache.sync)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 812a4944e78..4355f0ec44d 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -13,7 +13,7 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifndef __ASSEMBLY__
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index a98a2e112fa..7e95d8535e2 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -31,11 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI_DOMAINS */
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
/*
* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce
@@ -57,12 +52,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-/*
- * Dummy implementation; always return 0.
- */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
- return 0;
+ return channel ? 15 : 14;
}
#endif /* __KERNEL__ */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a..209e6504922 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- /* Read TPIDRPRW */
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * Read TPIDRPRW.
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
return off;
}
#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 943504f53f5..78a77936168 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -102,12 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
#else
pte = alloc_pages(PGALLOC_GFP, 0);
#endif
- if (pte) {
- if (!PageHighMem(pte))
- clean_pte_table(page_address(pte));
- pgtable_page_ctor(pte);
+ if (!pte)
+ return NULL;
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
}
-
return pte;
}
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386e..219ac88a954 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
@@ -160,6 +161,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud;
}
+#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define copy_pmd(pmdpd,pmdps) \
@@ -181,6 +183,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd) (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c..626989fec4d 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -30,20 +30,28 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT (61)
+#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
@@ -61,6 +69,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -74,4 +83,24 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+/*
+ * TTBR0/TTBR1 split (PAGE_OFFSET):
+ * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+ * 0x80000000: T0SZ = 0, T1SZ = 1
+ * 0xc0000000: T0SZ = 0, T1SZ = 2
+ *
+ * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+ * booting secondary CPUs would end up using TTBR1 for the identity
+ * mapping set up in TTBR0.
+ */
+#if defined CONFIG_VMSPLIT_2G
+#define TTBR1_OFFSET 16 /* skip two L1 entries */
+#elif defined CONFIG_VMSPLIT_3G
+#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
+#else
+#define TTBR1_OFFSET 0
+#endif
+
+#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940..85c60adc8b6 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -33,7 +33,7 @@
#define PTRS_PER_PMD 512
#define PTRS_PER_PGD 4
-#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_PTRS (0)
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
@@ -48,20 +48,28 @@
#define PMD_SHIFT 21
#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
+#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 21
#define SECTION_SIZE (1UL << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
+#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
* "Linux" PTE definitions for LPAE.
*
* These bits overlap with the hardware bits but the naming is preserved for
@@ -79,6 +87,11 @@
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
+#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+
/*
* To be used in assembly code with the upper page attributes.
*/
@@ -104,11 +117,35 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
+/*
+ * 2nd stage PTE definitions for LPAE.
+ */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
+
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
+/*
+ * Hyp-mode PL2 PTE definitions for LPAE.
+ */
+#define L_PTE_HYP L_PTE_USER
+
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & 2))
#define pud_present(pud) (pud_val(pud))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
+#define pmd_large(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
do { \
@@ -148,8 +185,86 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
clean_pmd_entry(pmdp); \
} while (0)
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
+ : pte_val(pte_a)) \
+ == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
+ : pte_val(pte_b)))
+
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+#define pmd_mknotpresent(pmd) (__pmd(0))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+ PMD_SECT_VALID | PMD_SECT_NONE;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ BUG_ON(addr >= TASK_SIZE);
+
+ /* create a faulting entry if PROT_NONE protected */
+ if (pmd_val(pmd) & PMD_SECT_NONE)
+ pmd_val(pmd) &= ~PMD_SECT_VALID;
+
+ *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 7ec60d6075b..0642228ff78 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -79,8 +79,6 @@ extern unsigned int kobjsize(const void *objp);
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
-#define io_remap_pfn_range remap_pfn_range
-
/*
* All 32bit addresses are effectively valid for vmalloc...
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e..5478e5d6ad8 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,9 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
+
+#include <asm/tlbflush.h>
+
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level.h>
#else
@@ -58,7 +61,16 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS PAGE_SIZE
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
+
+/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING TASK_SIZE
+#endif
/*
* The pgprot_* and protection_map entries will be fixed up in runtime
@@ -70,6 +82,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
+extern pgprot_t pgprot_hyp_device;
+extern pgprot_t pgprot_s2;
+extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -82,6 +97,10 @@ extern pgprot_t pgprot_kernel;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -197,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
-#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
@@ -218,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long ext = 0;
- if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
@@ -235,12 +257,15 @@ PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
+PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN);
+PTE_BIT_FUNC(mknexec, |= L_PTE_XN);
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+ L_PTE_NONE | L_PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
@@ -301,13 +326,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
-
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c7..ae1919be8f9 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -71,6 +71,8 @@ struct arm_pmu {
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(struct perf_event *event);
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
new file mode 100644
index 00000000000..806cfe622a9
--- /dev/null
+++ b/arch/arm/include/asm/probes.h
@@ -0,0 +1,43 @@
+/*
+ * arch/arm/include/asm/probes.h
+ *
+ * Original contents copied from arch/arm/include/asm/kprobes.h
+ * which contains the following notice...
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ASM_PROBES_H
+#define _ASM_PROBES_H
+
+typedef u32 probes_opcode_t;
+
+struct arch_probes_insn;
+typedef void (probes_insn_handler_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef unsigned long (probes_check_cc)(unsigned long);
+typedef void (probes_insn_singlestep_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef void (probes_insn_fn_t)(void);
+
+/* Architecture specific copy of original instruction. */
+struct arch_probes_insn {
+ probes_opcode_t *insn;
+ probes_insn_handler_t *insn_handler;
+ probes_check_cc *insn_check_cc;
+ probes_insn_singlestep_t *insn_singlestep;
+ probes_insn_fn_t *insn_fn;
+};
+
+#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f3628fb3d2b..5324c1112f3 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -60,7 +60,7 @@ extern struct processor {
/*
* Set the page table
*/
- void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+ void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
-extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
#ifdef CONFIG_ARM_LPAE
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
@@ -116,13 +116,25 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#ifdef CONFIG_ARM_LPAE
+
+#define cpu_get_ttbr(nr) \
+ ({ \
+ u64 ttbr; \
+ __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
+ : "=r" (ttbr)); \
+ ttbr; \
+ })
+
+#define cpu_set_ttbr(nr, val) \
+ do { \
+ u64 ttbr = val; \
+ __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
+ : : "r" (ttbr)); \
+ } while (0)
+
#define cpu_get_pgd() \
({ \
- unsigned long pg, pg2; \
- __asm__("mrrc p15, 0, %0, %1, c2" \
- : "=r" (pg), "=r" (pg2) \
- : \
- : "cc"); \
+ u64 pg = cpu_get_ttbr(0); \
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
(pgd_t *)phys_to_virt(pg); \
})
@@ -137,6 +149,10 @@ extern void cpu_resume(void);
})
#endif
+#else /*!CONFIG_MMU */
+
+#define cpu_switch_mm(pgd,mm) { }
+
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaa..c3d5fc124a0 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,6 +22,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/unified.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -54,7 +55,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +65,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \
})
@@ -91,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
/*
* Prefetching support - only ARMv5.
*/
@@ -101,17 +109,22 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
- :
- : "p" (ptr)
- : "cc");
+ :: "p" (ptr));
}
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+ __asm__ __volatile__(
+ ".arch_extension mp\n"
+ __ALT_SMP_ASM(
+ WASM(pldw) "\t%a0",
+ WASM(pld) "\t%a0"
+ )
+ :: "p" (ptr));
+}
+#endif
#endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index a219227c3e4..cd94ef2ef28 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -11,22 +11,18 @@
#ifndef __ASMARM_PROM_H
#define __ASMARM_PROM_H
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
#ifdef CONFIG_OF
-extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
-extern void arm_dt_memblock_reserve(void);
+extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
{
return NULL;
}
-static inline void arm_dt_memblock_reserve(void) { }
static inline void arm_dt_init_cpu_maps(void) { }
#endif /* CONFIG_OF */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 00000000000..c25ef3ec6d1
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,48 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_PSCI_H
+#define __ASM_ARM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+extern struct smp_operations psci_smp_ops;
+
+#ifdef CONFIG_ARM_PSCI
+int psci_init(void);
+bool psci_smp_available(void);
+#else
+static inline int psci_init(void) { return 0; }
+static inline bool psci_smp_available(void) { return false; }
+#endif
+
+#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 3d52ee1bfb3..c877654fe3b 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -27,9 +27,13 @@ struct pt_regs {
#define thumb_mode(regs) (0)
#endif
+#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
- ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \
- (((regs)->ARM_cpsr & PSR_T_BIT) >> 5))
+ ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
+ (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
+#else
+#define isa_mode(regs) 1 /* Thumb */
+#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
@@ -45,6 +49,7 @@ struct pt_regs {
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
+#ifndef CONFIG_CPU_V7M
unsigned long mode = regs->ARM_cpsr & MODE_MASK;
/*
@@ -67,6 +72,9 @@ static inline int valid_user_regs(struct pt_regs *regs)
regs->ARM_cpsr |= USR_MODE;
return 0;
+#else /* ifndef CONFIG_CPU_V7M */
+ return 1;
+#endif
}
static inline long regs_return_value(struct pt_regs *regs)
@@ -76,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) (regs)->ARM_pc
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
deleted file mode 100644
index e3f75726343..00000000000
--- a/arch/arm/include/asm/sched_clock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * sched_clock.h: support for extending counters to full 64-bit ns counter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef ASM_SCHED_CLOCK
-#define ASM_SCHED_CLOCK
-
-extern void sched_clock_postinit(void);
-extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-
-#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f0560950..e0adb9f1bf9 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -21,35 +21,7 @@
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
-/*
- * Memory map description
- */
-#define NR_BANKS CONFIG_ARM_NR_BANKS
-
-struct membank {
- phys_addr_t start;
- phys_addr_t size;
- unsigned int highmem;
-};
-
-struct meminfo {
- int nr_banks;
- struct membank bank[NR_BANKS];
-};
-
-extern struct meminfo meminfo;
-
-#define for_each_bank(iter,mi) \
- for (iter = 0; iter < (mi)->nr_banks; iter++)
-
-#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
-#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
-#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
-#define bank_phys_start(bank) (bank)->start
-#define bank_phys_end(bank) ((bank)->start + (bank)->size)
-#define bank_phys_size(bank) (bank)->size
-
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
+extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index 9a0ea6ab988..c0eb412aff0 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -16,23 +16,7 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
+#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22bebe6c..2ec765c39ab 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -65,12 +65,16 @@ asmlinkage void secondary_start_kernel(void);
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
- unsigned long pgdir;
+ union {
+ unsigned long mpu_rgn_szr;
+ unsigned long pgdir;
+ };
unsigned long swapper_pg_dir;
void *stack;
};
extern struct secondary_data secondary_data;
extern volatile int pen_release;
+extern void secondary_startup(void);
extern int __cpu_disable(void);
@@ -81,6 +85,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
@@ -109,6 +115,15 @@ struct smp_operations {
#endif
};
+struct of_cpu_method {
+ const char *method;
+ struct smp_operations *ops;
+};
+
+#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpu_method __cpu_method_of_table_##name \
+ __used __section(__cpu_method_of_table) \
+ = { .method = _method, .ops = _ops }
/*
* set platform specific SMP operations
*/
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index aaa61b6f50f..a252c0bfacf 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -26,6 +26,9 @@ static inline bool is_smp(void)
}
/* all SMP configurations have the extended CPUID registers */
+#ifndef CONFIG_MMU
+#define tlb_ops_need_broadcast() 0
+#else
static inline int tlb_ops_need_broadcast(void)
{
if (!is_smp())
@@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+#endif
#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
#define cache_ops_need_broadcast() 0
@@ -49,7 +53,7 @@ static inline int cache_ops_need_broadcast(void)
/*
* Logical CPU mapping.
*/
-extern int __cpu_logical_map[];
+extern u32 __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
@@ -66,4 +70,25 @@ static inline int get_logical_index(u32 mpidr)
return -EINVAL;
}
+/*
+ * NOTE ! Assembly code relies on the following
+ * structure memory layout in order to carry out load
+ * multiple from its base address. For more
+ * information check arch/arm/kernel/sleep.S
+ */
+struct mpidr_hash {
+ u32 mask; /* used by sleep.S */
+ u32 shift_aff[3]; /* used by sleep.S */
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
+extern int platform_can_cpu_hotplug(void);
+
#endif
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 4eb6d005ffa..0393fbab8dd 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -6,9 +6,43 @@
#define SCU_PM_POWEROFF 3
#ifndef __ASSEMBLER__
+
+#include <asm/cputype.h>
+
+static inline bool scu_a9_has_base(void)
+{
+ return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+}
+
+static inline unsigned long scu_a9_get_base(void)
+{
+ unsigned long pa;
+
+ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (pa));
+
+ return pa;
+}
+
+#ifdef CONFIG_HAVE_ARM_SCU
unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
+#else
+static inline unsigned int scu_get_core_count(void __iomem *scu_base)
+{
+ return 0;
+}
+static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU)
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
#endif
#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 0f01f4677bd..7b2899c2f7f 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -34,12 +34,4 @@ struct twd_local_timer name __initdata = { \
int twd_local_timer_register(struct twd_local_timer *);
-#ifdef CONFIG_HAVE_ARM_TWD
-void twd_local_timer_of_register(void);
-#else
-static inline void twd_local_timer_of_register(void)
-{
-}
-#endif
-
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a6..ac4bfae2670 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#include <asm/processor.h>
+#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-#define ALT_SMP(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-
#ifdef CONFIG_THUMB2_KERNEL
-#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -31,31 +23,23 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) ALT_SMP( \
+#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
@@ -77,6 +61,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
+ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -97,19 +82,23 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long contended, res;
u32 slock;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" subs %1, %0, %0, ror #16\n"
-" addeq %0, %0, %3\n"
-" strexeq %1, %0, [%2]"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
-
- if (tmp == 0) {
+ prefetchw(&lock->slock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%3]\n"
+ " mov %2, #0\n"
+ " subs %1, %0, %0, ror #16\n"
+ " addeq %0, %0, %4\n"
+ " strexeq %2, %0, [%3]"
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -119,29 +108,19 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned long tmp;
- u32 slock;
-
smp_mb();
-
- __asm__ __volatile__(
-" mov %1, #1\n"
-"1: ldrex %0, [%2]\n"
-" uadd16 %0, %0, %1\n"
-" strex %1, %0, [%2]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock)
- : "cc");
-
+ lock->tickets.owner++;
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -163,6 +142,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -179,17 +159,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
-
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
-
- if (tmp == 0) {
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -211,7 +195,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
@@ -229,6 +213,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
@@ -249,6 +234,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -265,22 +251,31 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
-
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
-
- smp_mb();
- return tmp2 == 0;
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
+
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f8b47..47663fcb10a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
- volatile unsigned int lock;
+ u32 lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 1c0a551ae37..cd20029bcd9 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -1,6 +1,11 @@
#ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H
+struct sleep_save_sp {
+ u32 *save_ptr_stash;
+ u32 save_ptr_stash_phys;
+};
+
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index fa09e6b49bf..c99e259469f 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -4,6 +4,16 @@
#include <linux/thread_info.h>
/*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#define finish_arch_switch(prev) dsb(ish)
+#endif
+
+/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. schedule() itself
* contains the memory barrier to tell GCC not to cache `current'.
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf7..9732b8e11e6 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -2,7 +2,6 @@
#define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h>
-#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined.
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index f1d96d4e809..4651f6999b7 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -7,7 +7,7 @@
#ifndef _ASM_ARM_SYSCALL_H
#define _ASM_ARM_SYSCALL_H
-#include <linux/audit.h> /* for AUDIT_ARCH_* */
+#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
#include <linux/elf.h> /* for ELF_EM */
#include <linux/err.h>
#include <linux/sched.h>
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
@@ -97,8 +103,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
}
-static inline int syscall_get_arch(struct task_struct *task,
- struct pt_regs *regs)
+static inline int syscall_get_arch(void)
{
/* ARM tasks don't change audit architectures on the fly. */
return AUDIT_ARCH_ARM;
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1..00000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
-#include <asm/barrier.h>
-#include <asm/compiler.h>
-#include <asm/cmpxchg.h>
-#include <asm/switch_to.h>
-#include <asm/system_info.h>
-#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index dfd386d0c02..720ea0320a6 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -11,6 +11,7 @@
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
#define CPU_ARCH_ARMv7 9
+#define CPU_ARCH_ARMv7M 10
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 5a85f148b60..a3d61ad984a 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -6,11 +6,12 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <linux/reboot.h>
extern void cpu_init(void);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(char str, const char *cmd);
+extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#define UDBG_UNDEFINED (1 << 0)
@@ -21,9 +22,6 @@ extern void (*arm_pm_idle)(void);
extern unsigned int user_debug;
-extern void disable_hlt(void);
-extern void enable_hlt(void);
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index cddda1f41f0..e4e4208a913 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -43,6 +43,16 @@ struct cpu_context_save {
__u32 extra[2]; /* Xscale 'acc' register, etc */
};
+struct arm_restart_block {
+ union {
+ /* For user cache flushing */
+ struct {
+ unsigned long start;
+ unsigned long end;
+ } cache;
+ };
+};
+
/*
* low level task data that entry.S needs immediate access to.
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -58,7 +68,7 @@ struct thread_info {
struct cpu_context_save cpu_context; /* cpu context */
__u32 syscall; /* syscall number */
__u8 used_cp[16]; /* thread used copro */
- unsigned long tp_value;
+ unsigned long tp_value[2]; /* TLS registers */
#ifdef CONFIG_CRUNCH
struct crunch_state crunchstate;
#endif
@@ -68,6 +78,7 @@ struct thread_info {
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
struct restart_block restart_block;
+ struct arm_restart_block arm_restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -103,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
#define thread_saved_sp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
#define thread_saved_fp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+#else
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
extern void crunch_task_disable(struct thread_info *);
extern void crunch_task_copy(struct thread_info *, void *);
@@ -130,12 +147,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#endif
/*
- * We use bit 30 of the preempt_count to indicate that kernel
- * preemption is occurring. See <asm/hardirq.h>.
- */
-#define PREEMPT_ACTIVE 0x40000000
-
-/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SYSCAL_AUDIT - syscall auditing active
@@ -148,18 +159,20 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_UPROBE 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
+#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -173,7 +186,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 83f2aa83899..f6fcc67ef06 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -12,12 +12,6 @@
#ifndef _ASMARM_TIMEX_H
#define _ASMARM_TIMEX_H
-#ifdef CONFIG_ARCH_MULTIPLATFORM
-#define CLOCK_TICK_RATE 1000000
-#else
-#include <mach/timex.h>
-#endif
-
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 99a19512ee2..f1a0dace3ef 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,18 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb) 0
-#else
-#define tlb_fast_mode(tlb) 1
-#endif
-
#define MMU_GATHER_BUNDLE 8
/*
@@ -55,6 +43,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -109,22 +98,32 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
}
}
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
tlb_flush(tlb);
- if (!tlb_fast_mode(tlb)) {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
- }
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ if (tlb->pages == tlb->local)
+ __tlb_alloc_page(tlb);
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
@@ -178,11 +177,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
- }
-
tlb->pages[tlb->nr++] = page;
VM_BUG_ON(tlb->nr > tlb->max);
return tlb->max - tlb->nr;
@@ -223,6 +217,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#endif
}
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77e..def9e570199 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
#include <asm/glue.h>
-#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
-#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
@@ -34,10 +32,13 @@
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
+#define TLB_V6_BP (1 << 19)
+
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE (1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP (1 << 23)
#define TLB_BARRIER (1 << 28)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -49,7 +50,6 @@
* =============
*
* We have the following to choose from:
- * v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -150,7 +150,8 @@
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
- TLB_V6_I_ASID | TLB_V6_D_ASID)
+ TLB_V6_I_ASID | TLB_V6_D_ASID | \
+ TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -165,10 +166,12 @@
# define v6wbi_always_flags (-1UL)
#endif
-#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+ TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+ TLB_V6_U_ASID | TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V7
@@ -316,72 +319,112 @@ extern struct cpu_tlb_fns cpu_tlb;
#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
-static inline void local_flush_tlb_all(void)
+static inline void __local_flush_tlb_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_WB))
- dsb();
-
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
- tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(nsh);
isb();
}
}
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void __flush_tlb_all(void)
{
const int zero = 0;
- const int asid = ASID(mm);
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
- if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
- if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(ish);
+ isb();
+ }
+}
+
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
}
- put_cpu();
}
tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_mm(mm);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_mm(mm);
#ifdef CONFIG_ARM_ERRATA_720789
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
#else
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
static inline void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
- if (tlb_flag(TLB_WB))
- dsb();
-
- if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_page(vma, uaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_page(vma, uaddr);
#ifdef CONFIG_ARM_ERRATA_720789
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
#else
@@ -399,20 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
-static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- kaddr &= PAGE_MASK;
-
- if (tlb_flag(TLB_WB))
- dsb();
-
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -422,15 +489,78 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_kernel_page(kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(nsh);
+ isb();
+ }
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_kernel_page(kaddr);
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(ish);
isb();
}
}
/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void __flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+}
+
+/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
@@ -451,7 +581,7 @@ static inline void flush_pmd_entry(void *pmd)
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
}
static inline void clean_pmd_entry(void *pmd)
@@ -480,6 +610,7 @@ static inline void clean_pmd_entry(void *pmd)
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
#define flush_tlb_range local_flush_tlb_range
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all local_flush_bp_all
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +618,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
#endif
/*
@@ -505,8 +637,50 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
}
#endif
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
-#endif /* CONFIG_MMU */
+#elif defined(CONFIG_SMP) /* !CONFIG_MMU */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+
+static inline void local_flush_tlb_all(void) { }
+static inline void local_flush_tlb_mm(struct mm_struct *mm) { }
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { }
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { }
+static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { }
+static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { }
+static inline void local_flush_bp_all(void) { }
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
+#endif /* __ASSEMBLY__ */
+
+#endif
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 73409e6c025..83259b87333 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -2,27 +2,30 @@
#define __ASMARM_TLS_H
#ifdef __ASSEMBLY__
- .macro set_tls_none, tp, tmp1, tmp2
+#include <asm/asm-offsets.h>
+ .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
.endm
- .macro set_tls_v6k, tp, tmp1, tmp2
+ .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
+ mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
- mov \tmp1, #0
- mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
+ mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
+ str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
- .macro set_tls_v6, tp, tmp1, tmp2
+ .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
ldr \tmp1, =elf_hwcap
ldr \tmp1, [\tmp1, #0]
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- movne \tmp1, #0
- mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
+ mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
+ mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+ mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
+ strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
- .macro set_tls_software, tp, tmp1, tmp2
+ .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
@@ -31,19 +34,30 @@
#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu 1
#define has_tls_reg 1
-#define set_tls set_tls_none
+#define switch_tls switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
-#define set_tls set_tls_v6
+#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
-#define set_tls set_tls_v6k
+#define switch_tls switch_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
-#define set_tls set_tls_software
+#define switch_tls switch_tls_software
#endif
+#ifndef __ASSEMBLY__
+static inline unsigned long get_tpuser(void)
+{
+ unsigned long reg = 0;
+
+ if (has_tls_reg && !tls_emu)
+ __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
+
+ return reg;
+}
+#endif
#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd..2fe85fff5cc 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define mc_capable() (cpu_topology[0].socket_id != -1)
-#define smt_capable() (cpu_topology[0].thread_id != -1)
-
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h
new file mode 100644
index 00000000000..624e1d436c6
--- /dev/null
+++ b/arch/arm/include/asm/trusted_foundations.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Support for the Trusted Foundations secure monitor.
+ *
+ * Trusted Foundation comes active on some ARM consumer devices (most
+ * Tegra-based devices sold on the market are concerned). Such devices can only
+ * perform some basic operations, like setting the CPU reset vector, through
+ * SMC calls to the secure monitor. The calls are completely specific to
+ * Trusted Foundations, and do *not* follow the SMC calling convention or the
+ * PSCI standard.
+ */
+
+#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H
+#define __ASM_ARM_TRUSTED_FOUNDATIONS_H
+
+#include <linux/kconfig.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+
+struct trusted_foundations_platform_data {
+ unsigned int version_major;
+ unsigned int version_minor;
+};
+
+#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS)
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd);
+void of_register_trusted_foundations(void);
+
+#else /* CONFIG_TRUSTED_FOUNDATIONS */
+
+static inline void register_trusted_foundations(
+ struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * If the system requires TF and we cannot provide it, continue booting
+ * but disable features that cannot be provided.
+ */
+ pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
+ pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_SMP)
+ setup_max_cpus = 0;
+#endif
+ cpu_idle_poll_ctrl(true);
+}
+
+static inline void of_register_trusted_foundations(void)
+{
+ /*
+ * If we find the target should enable TF but does not support it,
+ * fail as the system won't be able to do much anyway
+ */
+ if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"))
+ register_trusted_foundations(NULL);
+}
+#endif /* CONFIG_TRUSTED_FOUNDATIONS */
+
+#endif
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644
index 00000000000..a53cdb8f068
--- /dev/null
+++ b/arch/arm/include/asm/types.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
+ * unambiguous on ARM as you would expect. For the types below, there is a
+ * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
+ * and the kernel itself, which results in build errors if you try to build with
+ * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
+ * in order to use NEON intrinsics)
+ *
+ * As the typedefs for these types in 'stdint.h' are based on builtin defines
+ * supplied by GCC, we can tweak these to align with the kernel's idea of those
+ * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
+ * source file (provided that -ffreestanding is used).
+ *
+ * int32_t uint32_t uintptr_t
+ * bare metal GCC long unsigned long unsigned int
+ * glibc GCC int unsigned int unsigned int
+ * kernel int unsigned int unsigned long
+ */
+
+#ifdef __INT32_TYPE__
+#undef __INT32_TYPE__
+#define __INT32_TYPE__ int
+#endif
+
+#ifdef __UINT32_TYPE__
+#undef __UINT32_TYPE__
+#define __UINT32_TYPE__ unsigned int
+#endif
+
+#ifdef __UINTPTR_TYPE__
+#undef __UINTPTR_TYPE__
+#define __UINTPTR_TYPE__ unsigned long
+#endif
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f6..75d95799b6e 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
#include <asm/unified.h>
#include <asm/compiler.h>
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -164,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
#define __put_user_check(x,p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ const typeof(*(p)) __user *__tmp_p = (p); \
register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f46b4d..b88beaba6b4 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,6 +38,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr.w
#define BSYM(sym) sym + 1
+#else
+#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr
#define BSYM(sym) sym
+#else
+#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 21a2700d295..43876245fc5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls (380)
+#define __NR_syscalls (384)
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
@@ -26,8 +26,6 @@
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
@@ -46,18 +44,9 @@
#define __ARCH_WANT_SYS_CLONE
/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-/*
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
-#define __IGNORE_kcmp
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
new file mode 100644
index 00000000000..9472c20b7d4
--- /dev/null
+++ b/arch/arm/include/asm/uprobes.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/opcodes.h>
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES 64
+
+#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
+#define UPROBE_SS_ARM_INSN 0xe7f001fa
+#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
+#define UPROBE_SWBP_INSN_SIZE 4
+
+struct arch_uprobe_task {
+ u32 backup;
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ u8 insn[MAX_UINSN_BYTES];
+ unsigned long ixol[2];
+ uprobe_opcode_t bpinsn;
+ bool simulate;
+ u32 pcreg;
+ void (*prehandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ void (*posthandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ struct arch_probes_insn asi;
+};
+
+#endif
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
new file mode 100644
index 00000000000..615781c6162
--- /dev/null
+++ b/arch/arm/include/asm/v7m.h
@@ -0,0 +1,56 @@
+/*
+ * Common defines for v7m cpus
+ */
+#define V7M_SCS_ICTR IOMEM(0xe000e004)
+#define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f
+
+#define BASEADDR_V7M_SCB IOMEM(0xe000ed00)
+
+#define V7M_SCB_CPUID 0x00
+
+#define V7M_SCB_ICSR 0x04
+#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
+#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
+#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+
+#define V7M_SCB_VTOR 0x08
+
+#define V7M_SCB_AIRCR 0x0c
+#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
+#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
+
+#define V7M_SCB_SCR 0x10
+#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
+
+#define V7M_SCB_CCR 0x14
+#define V7M_SCB_CCR_STKALIGN (1 << 9)
+
+#define V7M_SCB_SHPR2 0x1c
+#define V7M_SCB_SHPR3 0x20
+
+#define V7M_SCB_SHCSR 0x24
+#define V7M_SCB_SHCSR_USGFAULTENA (1 << 18)
+#define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17)
+#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
+
+#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
+#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+
+/*
+ * When branching to an address that has bits [31:28] == 0xf an exception return
+ * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP
+ * extension Bit [4] defines if the exception frame has space allocated for FP
+ * state information, SBOP otherwise. Bit [3] defines the mode that is returned
+ * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used
+ * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
+ */
+#define EXC_RET_STACK_MASK 0x00000004
+#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+
+#ifndef __ASSEMBLY__
+
+enum reboot_mode;
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd);
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 86164df86cb..4371f45c578 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -24,11 +24,12 @@
/*
* Flag indicating that the kernel was not entered in the same mode on every
* CPU. The zImage loader stashes this value in an SPSR, so we need an
- * architecturally defined flag bit here (the N flag, as it happens)
+ * architecturally defined flag bit here.
*/
-#define BOOT_CPU_MODE_MISMATCH (1<<31)
+#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT
/*
@@ -41,10 +42,21 @@
*/
extern int __boot_cpu_mode;
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
#endif
#ifndef ZIMAGE
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index 4d52f92967a..a6d0a29861e 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -48,10 +48,14 @@ static inline unsigned long find_zero(unsigned long mask)
return ret;
}
-#ifdef CONFIG_DCACHE_WORD_ACCESS
-
#define zero_bytemask(mask) (mask)
+#else /* __ARMEB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
/*
* Load an unaligned word from kernel space.
*
@@ -73,7 +77,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" bic %2, %2, #0x3\n"
" ldr %0, [%2]\n"
" lsl %1, %1, #0x3\n"
+#ifndef __ARMEB__
" lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
" b 2b\n"
" .popsection\n"
" .pushsection __ex_table,\"a\"\n"
@@ -86,11 +94,5 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
return ret;
}
-
#endif /* DCACHE_WORD_ACCESS */
-
-#else /* __ARMEB__ */
-#include <asm-generic/word-at-a-time.h>
-#endif
-
#endif /* __ASM_ARM_WORD_AT_A_TIME_H */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 94b4e9020b0..8b1f37bfeee 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -2,6 +2,7 @@
#define _ASM_ARM_XEN_EVENTS_H
#include <asm/ptrace.h>
+#include <asm/atomic.h>
enum ipi_vector {
XEN_PLACEHOLDER_VECTOR,
@@ -15,4 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+ atomic64_t, \
+ counter), (val))
+
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 8a823253d77..712b50e0a6d 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -34,6 +34,7 @@
#define _ASM_ARM_XEN_HYPERCALL_H
#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
long privcmd_call(unsigned call, unsigned long a1,
unsigned long a2, unsigned long a3,
@@ -46,6 +47,18 @@ int HYPERVISOR_event_channel_op(int cmd, void *arg);
unsigned long HYPERVISOR_hvm_op(int op, void *arg);
int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg);
+int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
+int HYPERVISOR_tmem_op(void *arg);
+int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
+
+static inline int
+HYPERVISOR_suspend(unsigned long start_info_mfn)
+{
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+ /* start_info_mfn is unused on ARM */
+ return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+}
static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
@@ -61,9 +74,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
BUG();
}
-static inline int
-HYPERVISOR_multicall(void *call_list, int nr_calls)
-{
- BUG();
-}
#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a0c9e..1317ee40f4d 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
+extern struct dma_map_ops *xen_dma_ops;
+
#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 1151188bcd8..50066006e6b 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -40,6 +40,8 @@ typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn "llx"
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong "llx"
+typedef int64_t xen_long_t;
+#define PRI_xen_long "llx"
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
new file mode 100644
index 00000000000..1109017499e
--- /dev/null
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index c6b9096cef9..ded062f9b35 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
#include <linux/pfn.h>
#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
#include <xen/interface/grant_table.h>
-#define pfn_to_mfn(pfn) (pfn)
#define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define pte_mfn pte_pfn
@@ -32,6 +32,38 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
+unsigned long __pfn_to_mfn(unsigned long pfn);
+unsigned long __mfn_to_pfn(unsigned long mfn);
+extern struct rb_root phys_to_mach;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ mfn = __pfn_to_mfn(pfn);
+ if (mfn != INVALID_P2M_ENTRY)
+ return mfn;
+ }
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ pfn = __mfn_to_pfn(mfn);
+ if (pfn != INVALID_P2M_ENTRY)
+ return pfn;
+ }
+
+ return mfn;
+}
+
+#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -45,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
}
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
@@ -65,25 +96,24 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
return NULL;
}
-static inline int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
-{
- return 0;
-}
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
-static inline int m2p_remove_override(struct page *page, bool clear_pte)
-{
- return 0;
-}
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
-static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
-}
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+ unsigned long nr_pages);
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
return __set_phys_to_machine(pfn, mfn);
}
+
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+#define xen_unmap(cookie) iounmap((cookie))
+
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 7604673dc42..4ffb26d4cad 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -7,7 +7,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/hardirq.h>
#include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
#define __XOR(a1, a2) a1 ^= a2
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
xor_speed(&xor_block_arm4regs); \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
+ NEON_TEMPLATES; \
} while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_2(bytes, p1, p2);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_2(bytes, p1, p2);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_3(bytes, p1, p2, p3);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_4(bytes, p1, p2, p3, p4);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+ kernel_neon_end();
+ }
+}
+
+static struct xor_block_template xor_block_neon = {
+ .name = "neon",
+ .do_2 = xor_neon_2,
+ .do_3 = xor_neon_3,
+ .do_4 = xor_neon_4,
+ .do_5 = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+ do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif