aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/traps.c')
-rw-r--r--arch/arm/kernel/traps.c174
1 files changed, 116 insertions, 58 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index b0179b89a04..abd2fc06773 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,10 +34,15 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
-
-#include "signal.h"
-
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+#include <asm/opcodes.h>
+
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -57,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
@@ -204,13 +209,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
}
#endif
-void dump_stack(void)
-{
- dump_backtrace(NULL, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
dump_backtrace(NULL, tsk);
@@ -296,7 +294,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
bust_spinlocks(0);
die_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
@@ -350,15 +348,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
@@ -411,26 +411,30 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
- instr = ((u16 *)pc)[0];
+ instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
- instr <<= 16;
- instr |= ((u16 *)pc)[1];
+ u16 inst2;
+ inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+ instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
- instr = *(u32 *) pc;
+ instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
+ instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
- instr <<= 16;
- instr |= instr2;
+ instr2 = __mem_to_opcode_thumb16(instr2);
+ instr = __opcode_thumb32_compose(instr, instr2);
}
- } else if (get_user(instr, (u32 __user *)pc)) {
- goto die_sig;
+ } else {
+ if (get_user(instr, (u32 __user *)pc))
+ goto die_sig;
+ instr = __mem_to_opcode_arm(instr);
}
if (call_undef_hook(regs, instr) == 0)
@@ -441,6 +445,7 @@ die_sig:
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
+ __show_regs(regs);
dump_instr(KERN_INFO, regs);
}
#endif
@@ -506,28 +511,65 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
+static long do_cache_op_restart(struct restart_block *);
+
static inline int
-do_cache_op(unsigned long start, unsigned long end, int flags)
+__do_cache_op(unsigned long start, unsigned long end)
+{
+ int ret;
+
+ do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
+ if (signal_pending(current)) {
+ struct thread_info *ti = current_thread_info();
+
+ ti->restart_block = (struct restart_block) {
+ .fn = do_cache_op_restart,
+ };
+
+ ti->arm_restart_block = (struct arm_restart_block) {
+ {
+ .cache = {
+ .start = start,
+ .end = end,
+ },
+ },
+ };
+
+ return -ERESTART_RESTARTBLOCK;
+ }
+
+ ret = flush_cache_user_range(start, start + chunk);
+ if (ret)
+ return ret;
+
+ cond_resched();
+ start += chunk;
+ } while (start < end);
+
+ return 0;
+}
+
+static long do_cache_op_restart(struct restart_block *unused)
{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *vma;
+ struct arm_restart_block *restart_block;
+
+ restart_block = &current_thread_info()->arm_restart_block;
+ return __do_cache_op(restart_block->cache.start,
+ restart_block->cache.end);
+}
+static inline int
+do_cache_op(unsigned long start, unsigned long end, int flags)
+{
if (end < start || flags)
return -EINVAL;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, start);
- if (vma && vma->vm_start < end) {
- if (start < vma->vm_start)
- start = vma->vm_start;
- if (end > vma->vm_end)
- end = vma->vm_end;
+ if (!access_ok(VERIFY_READ, start, end - start))
+ return -EFAULT;
- up_read(&mm->mmap_sem);
- return flush_cache_user_range(start, end);
- }
- up_read(&mm->mmap_sem);
- return -EINVAL;
+ return __do_cache_op(start, end);
}
/*
@@ -588,7 +630,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
return regs->ARM_r0;
case NR(set_tls):
- thread->tp_value = regs->ARM_r0;
+ thread->tp_value[0] = regs->ARM_r0;
if (tls_emu)
return 0;
if (has_tls_reg) {
@@ -706,7 +748,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
int reg = (instr >> 12) & 15;
if (reg == 15)
return 1;
- regs->uregs[reg] = current_thread_info()->tp_value;
+ regs->uregs[reg] = current_thread_info()->tp_value[0];
regs->ARM_pc += 4;
return 0;
}
@@ -807,47 +849,63 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
+}
+#else
+static inline void __init kuser_init(void *vectors)
+{
}
+#endif
void __init early_trap_init(void *vectors_base)
{
+#ifndef CONFIG_CPU_V7M
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
+ kuser_init(vectors_base);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+#else /* ifndef CONFIG_CPU_V7M */
/*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
+ * on V7-M there is no need to copy the vector table to a dedicated
+ * memory area. The address is configurable and so a table in the kernel
+ * image can be used.
*/
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
-
- flush_icache_range(vectors, vectors + PAGE_SIZE);
- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+#endif
}