aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/pgtable.h33
-rw-r--r--arch/arm64/kernel/debug-monitors.c20
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/smp.c1
8 files changed, 67 insertions, 66 deletions
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc501828..519c4b2c068 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b850..b2fcfbc51ec 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
+#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
+#define local_async_disable() asm("msr daifset, #4" : : : "memory")
+
/*
* Save the current interrupt enable state.
*/
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 17bd3af0a11..7f2b60affbb 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,10 +25,11 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
-#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
+#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+ /* bit 57 for PMD_SECT_SPLITTING */
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
@@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
- * bits 0, 2: present (must both be zero)
- * bit 3: PTE_FILE
- * bits 4-8: swap type
- * bits 9-63: swap offset
+ * bits 0-1: present (must be zero)
+ * bit 2: PTE_FILE
+ * bits 3-8: swap type
+ * bits 9-57: swap offset
*/
-#define __SWP_TYPE_SHIFT 4
+#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a file entry:
- * bits 0, 2: present (must both be zero)
- * bit 3: PTE_FILE
- * bits 4-63: file offset / PAGE_SIZE
+ * bits 0-1: present (must be zero)
+ * bit 2: PTE_FILE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 4)
-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 3)
+#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 60
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 6a0a9b132d7..4ae68579031 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
int aarch32_break_handler(struct pt_regs *regs)
{
siginfo_t info;
- unsigned int instr;
+ u32 arm_instr;
+ u16 thumb_instr;
bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
- get_user(instr, (u16 __user *)pc);
- if (instr == AARCH32_BREAK_THUMB2_LO) {
+ get_user(thumb_instr, (u16 __user *)pc);
+ thumb_instr = le16_to_cpu(thumb_instr);
+ if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
- get_user(instr, (u16 __user *)(pc + 2));
- bp = instr == AARCH32_BREAK_THUMB2_HI;
+ get_user(thumb_instr, (u16 __user *)(pc + 2));
+ thumb_instr = le16_to_cpu(thumb_instr);
+ bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else {
- bp = instr == AARCH32_BREAK_THUMB;
+ bp = thumb_instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
- get_user(instr, (u32 __user *)pc);
- bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ get_user(arm_instr, (u32 __user *)pc);
+ arm_instr = le32_to_cpu(arm_instr);
+ bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1166145ca2..4d2c6f3f0c4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -309,15 +309,12 @@ el1_irq:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
-#ifdef CONFIG_PREEMPT
- get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- add w0, w24, #1 // increment it
- str w0, [tsk, #TI_PREEMPT]
-#endif
+
irq_handler
+
#ifdef CONFIG_PREEMPT
- str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ get_thread_info tsk
+ ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
@@ -507,22 +504,10 @@ el0_irq_naked:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- get_thread_info tsk
-#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- add w23, w24, #1 // increment it
- str w23, [tsk, #TI_PREEMPT]
-#endif
+
irq_handler
-#ifdef CONFIG_PREEMPT
- ldr w0, [tsk, #TI_PREEMPT]
- str w24, [tsk, #TI_PREEMPT]
- cmp w0, w23
- b.eq 1f
- mov x1, #0
- str x1, [x1] // BUG
-1:
-#endif
+ get_thread_info tsk
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fecdbf7de82..6777a2192b8 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -636,28 +636,27 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
switch (idx) {
case 15:
- reg = (void *)&task_pt_regs(target)->pc;
+ reg = task_pt_regs(target)->pc;
break;
case 16:
- reg = (void *)&task_pt_regs(target)->pstate;
+ reg = task_pt_regs(target)->pstate;
break;
case 17:
- reg = (void *)&task_pt_regs(target)->orig_x0;
+ reg = task_pt_regs(target)->orig_x0;
break;
default:
- reg = (void *)&task_pt_regs(target)->regs[idx];
+ reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
-
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
if (ret)
break;
- else
- ubuf += sizeof(compat_ulong_t);
+
+ ubuf += sizeof(reg);
}
return ret;
@@ -685,28 +684,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
+
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret)
+ return ret;
+
+ ubuf += sizeof(reg);
switch (idx) {
case 15:
- reg = (void *)&newregs.pc;
+ newregs.pc = reg;
break;
case 16:
- reg = (void *)&newregs.pstate;
+ newregs.pstate = reg;
break;
case 17:
- reg = (void *)&newregs.orig_x0;
+ newregs.orig_x0 = reg;
break;
default:
- reg = (void *)&newregs.regs[idx];
+ newregs.regs[idx] = reg;
}
- ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
-
- if (ret)
- goto out;
- else
- ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -714,7 +713,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
-out:
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0bc5e4cbc01..bd9bbd0e44e 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
+ /*
+ * Unmask asynchronous aborts early to catch possible system errors.
+ */
+ local_async_enable();
+
setup_processor();
setup_machine_fdt(__fdt_pointer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a5aeefab03c..a0c2ca602cf 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
+ local_async_enable();
/*
* OK, it's off to the idle thread for us