aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/alignment.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/alignment.c')
-rw-r--r--arch/arm/mm/alignment.c368
1 files changed, 283 insertions, 85 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e162cca5917..b8cb1a2688a 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -11,17 +11,24 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/moduleparam.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
-#include <asm/uaccess.h>
+#include <asm/cp15.h>
+#include <asm/system_info.h>
#include <asm/unaligned.h>
+#include <asm/opcodes.h>
#include "fault.h"
+#include "mm.h"
/*
* 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
@@ -61,6 +68,12 @@
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
+#define BAD_INSTR 0xdeadc0de
+
+/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
+#define IS_T32(hi16) \
+ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
+
static unsigned long ai_user;
static unsigned long ai_sys;
static unsigned long ai_skipped;
@@ -69,6 +82,40 @@ static unsigned long ai_word;
static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
+static unsigned long cr_no_alignment;
+
+core_param(alignment, ai_usermode, int, 0600);
+
+#define UM_WARN (1 << 0)
+#define UM_FIXUP (1 << 1)
+#define UM_SIGNAL (1 << 2)
+
+/* Return true if and only if the ARMv6 unaligned access model is in use. */
+static bool cpu_is_v6_unaligned(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
+}
+
+static int safe_usermode(int new_usermode, bool warn)
+{
+ /*
+ * ARMv6 and later CPUs can perform unaligned accesses for
+ * most single load and store instructions up to word size.
+ * LDM, STM, LDRD and STRD still need to be handled.
+ *
+ * Ignoring the alignment fault is not an option on these
+ * CPUs since we spin re-faulting the instruction without
+ * making any progress.
+ */
+ if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
+ new_usermode |= UM_FIXUP;
+
+ if (warn)
+ printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
+ }
+
+ return new_usermode;
+}
#ifdef CONFIG_PROC_FS
static const char *usermode_action[] = {
@@ -80,36 +127,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -117,11 +157,18 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
- ai_usermode = mode - '0';
+ ai_usermode = safe_usermode(mode - '0', true);
}
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -148,17 +195,19 @@ union offset_union {
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
@@ -204,20 +253,22 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
@@ -234,26 +285,32 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "2: "ins" %1, [%2], #1\n" \
+ ARM( "2: "ins" %1, [%2], #1\n" ) \
+ THUMB( "2: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "3: "ins" %1, [%2], #1\n" \
+ ARM( "3: "ins" %1, [%2], #1\n" ) \
+ THUMB( "3: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
@@ -327,38 +384,48 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
-
- if (((rd & 1) == 1) || (rd == 14))
+ unsigned int rd2;
+ int load;
+
+ if ((instr & 0xfe000000) == 0xe8000000) {
+ /* ARMv7 Thumb-2 32-bit LDRD/STRD */
+ rd2 = (instr >> 8) & 0xf;
+ load = !!(LDST_L_BIT(instr));
+ } else if (((rd & 1) == 1) || (rd == 14))
goto bad;
+ else {
+ load = ((instr & 0xf0) == 0xd0);
+ rd2 = rd + 1;
+ }
ai_dword += 1;
if (user_mode(regs))
goto user;
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
- put32_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32t_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
- put32t_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32t_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
@@ -611,32 +678,112 @@ thumb2arm(u16 tinstr)
/* Else fall through for illegal instruction case */
default:
- return 0xdeadc0de;
+ return BAD_INSTR;
}
}
+/*
+ * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
+ * handlable by ARM alignment handler, also find the corresponding handler,
+ * so that we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * @pinstr: original Thumb-2 instruction; returns new handlable instruction
+ * @regs: register context.
+ * @poffset: return offset from faulted addr for later writeback
+ *
+ * NOTES:
+ * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
+ * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
+ */
+static void *
+do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+ union offset_union *poffset)
+{
+ unsigned long instr = *pinstr;
+ u16 tinst1 = (instr >> 16) & 0xffff;
+ u16 tinst2 = instr & 0xffff;
+
+ switch (tinst1 & 0xffe0) {
+ /* A6.3.5 Load/Store multiple */
+ case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
+ case 0xe8a0: /* ...above writeback version */
+ case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
+ case 0xe920: /* ...above writeback version */
+ /* no need offset decision since handler calculates it */
+ return do_alignment_ldmstm;
+
+ case 0xf840: /* POP/PUSH T3 (single register) */
+ if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
+ u32 L = !!(LDST_L_BIT(instr));
+ const u32 subset[2] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ };
+ *pinstr = subset[L] | (1<<RD_BITS(instr));
+ return do_alignment_ldmstm;
+ }
+ /* Else fall through for illegal instruction case */
+ break;
+
+ /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
+ case 0xe860:
+ case 0xe960:
+ case 0xe8e0:
+ case 0xe9e0:
+ poffset->un = (tinst2 & 0xff) << 2;
+ case 0xe940:
+ case 0xe9c0:
+ return do_alignment_ldrdstrd;
+
+ /*
+ * No need to handle load/store instructions up to word size
+ * since ARMv6 and later CPUs can perform unaligned accesses.
+ */
+ default:
+ break;
+ }
+ return NULL;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- union offset_union offset;
+ union offset_union uninitialized_var(offset);
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
- mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
instrptr = instruction_pointer(regs);
- fs = get_fs();
- set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
- fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
- if (!(fault))
- instr = thumb2arm(tinstr);
- } else
- fault = __get_user(instr, (u32 *)instrptr);
- set_fs(fs);
+ u16 *ptr = (u16 *)(instrptr & ~1);
+ fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+ fault = probe_kernel_address(ptr + 1, tinst2);
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
+ }
+ }
+ } else {
+ fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
@@ -650,7 +797,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
fixup:
- regs->ARM_pc += thumb_mode(regs) ? 2 : 4;
+ regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
@@ -709,18 +856,28 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
handler = do_alignment_ldrstr;
break;
- case 0x08000000: /* ldm or stm */
- handler = do_alignment_ldmstm;
+ case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
+ if (thumb2_32b) {
+ offset.un = 0;
+ handler = do_alignment_t32_to_handler(&instr, regs, &offset);
+ } else {
+ offset.un = 0;
+ handler = do_alignment_ldmstm;
+ }
break;
default:
goto bad;
}
+ if (!handler)
+ goto bad;
type = handler(addr, instr, regs);
- if (type == TYPE_ERROR || type == TYPE_FAULT)
+ if (type == TYPE_ERROR || type == TYPE_FAULT) {
+ regs->ARM_pc -= isize;
goto bad_or_fault;
+ }
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
@@ -730,7 +887,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
- regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
/*
* We got a fault - fix it up, or die.
*/
@@ -746,33 +902,62 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
*/
printk(KERN_ERR "Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr, instrptr);
+ isize << 1,
+ isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
user:
ai_user += 1;
- if (ai_usermode & 1)
+ if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr,
+ isize << 1,
+ isize == 2 ? tinstr : instr,
addr, fsr);
- if (ai_usermode & 2)
+ if (ai_usermode & UM_FIXUP)
goto fixup;
- if (ai_usermode & 4)
- force_sig(SIGBUS, current);
- else
- set_cr(cr_no_alignment);
+ if (ai_usermode & UM_SIGNAL) {
+ siginfo_t si;
+
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void __user *)addr;
+
+ force_sig_info(si.si_signo, &si, current);
+ } else {
+ /*
+ * We're about to disable the alignment trap and return to
+ * user space. But if an interrupt occurs before actually
+ * reaching user space, then the IRQ vector entry code will
+ * notice that we were still in kernel space and therefore
+ * the alignment trap won't be re-enabled in that case as it
+ * is presumed to be always on from kernel space.
+ * Let's prevent that race by disabling interrupts here (they
+ * are disabled on the way back to user space anyway in
+ * entry-common.S) and disable the alignment trap only if
+ * there is no work pending for this thread.
+ */
+ raw_local_irq_disable();
+ if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+ set_cr(cr_no_alignment);
+ }
return 0;
}
+static int __init noalign_setup(char *__unused)
+{
+ set_cr(__clear_cr(CR_A));
+ return 1;
+}
+__setup("noalign", noalign_setup);
+
/*
* This needs to be done after sysctl_init, otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
@@ -784,20 +969,33 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = proc_mkdir("cpu", NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
+#endif
- res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res);
- if (!res)
- return -ENOMEM;
+ if (cpu_is_v6_unaligned()) {
+ set_cr(__clear_cr(CR_A));
+ ai_usermode = safe_usermode(ai_usermode, false);
+ }
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
-#endif
+ cr_no_alignment = get_cr() & ~CR_A;
- hook_fault_code(1, do_alignment, SIGILL, "alignment exception");
- hook_fault_code(3, do_alignment, SIGILL, "alignment exception");
+ hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
+ "alignment exception");
+
+ /*
+ * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
+ * fault, not as alignment error.
+ *
+ * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
+ * needed.
+ */
+ if (cpu_architecture() <= CPU_ARCH_ARMv6) {
+ hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
+ "alignment exception");
+ }
return 0;
}