aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c2
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/common/vic.c9
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smp_twd.c6
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-v7.S12
10 files changed, 44 insertions, 11 deletions
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 6ce11c48117..797f04bedb4 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -77,6 +77,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
+ if (!atag->u.mem.size)
+ continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 5f6045f1766..dc7e8ce8e6b 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -273,7 +273,7 @@ restart: adr r0, LC0
add r0, r0, #0x100
mov r1, r6
sub r2, sp, r6
- blne atags_to_fdt
+ bleq atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7a66311f306..7e288f96ced 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -427,19 +427,18 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
/*
* Handle each interrupt in a single VIC. Returns non-zero if we've
- * handled at least one interrupt. This does a single read of the
- * status register and handles all interrupts in order from LSB first.
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
{
u32 stat, irq;
int handled = 0;
- stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
- while (stat) {
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
- stat &= ~(1 << irq);
handled = 1;
}
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5c5ca2ea62b..bfc198c7591 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -14,7 +14,7 @@
#define JUMP_LABEL_NOP "nop"
#endif
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b91411371ae..ebfac782593 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -523,7 +523,21 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size)
*/
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
- bank->size = size & PAGE_MASK;
+
+#ifndef CONFIG_LPAE
+ if (bank->start + size < bank->start) {
+ printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
+ "32-bit physical address space\n", (long long)start);
+ /*
+ * To ensure bank->start + bank->size is representable in
+ * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
+ * This means we lose a page after masking.
+ */
+ size = ULONG_MAX - bank->start;
+ }
+#endif
+
+ bank->size = size & PAGE_MASK;
/*
* Check whether this memory region has non-zero size or
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fef42b21cec..5b150afb995 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -118,10 +118,14 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
+ *
+ * Only wait for it to finish, if the cpu is active to avoid
+ * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
+ * booting of that cpu.
*/
if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
- NULL, 1);
+ NULL, cpu_active(freqs->cpu));
return NOTIFY_OK;
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7edef912163..7c8a7d8467b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -723,7 +723,7 @@ config CPU_HIGH_VECTOR
bool "Select the High exception vector"
help
Say Y here to select high exception vector(0xFFFF0000~).
- The exception vector can be vary depending on the platform
+ The exception vector can vary depending on the platform
design in nommu mode. If your platform needs to select
high exception vector, say Y.
Otherwise or if you are unsure, say N, and the low exception
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9055b5a84ec..f0746753336 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -320,7 +320,7 @@ retry:
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 6486d2f253c..d51225f90ae 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -13,6 +13,7 @@
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/setup.h>
+#include <asm/traps.h>
#include <asm/mach/arch.h>
#include "mm.h"
@@ -39,6 +40,7 @@ void __init sanity_check_meminfo(void)
*/
void __init paging_init(struct machine_desc *mdesc)
{
+ early_trap_init((void *)CONFIG_VECTORS_BASE);
bootmem_init();
}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f1c8486f750..c2e2b66f72b 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -255,6 +255,18 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+#ifndef CONFIG_ARM_THUMBEE
+ mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
+ and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+ teq r0, #(1 << 12) @ check if ThumbEE is present
+ bne 1f
+ mov r5, #0
+ mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
+ mrc p14, 6, r0, c0, c0, 0 @ load TEECR
+ orr r0, r0, #1 @ set the 1st bit in order to
+ mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
+1:
+#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
#ifdef CONFIG_CPU_ENDIAN_BE8