diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 56 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm946.S | 3 |
5 files changed, 49 insertions, 19 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c71a8f3535..5ebc5d922ea 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -195,8 +195,7 @@ config VECTORS_BASE The base address of exception vectors. config ARM_PATCH_PHYS_VIRT - bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "Patch physical to virtual translations at runtime" depends on !XIP_KERNEL && MMU depends on !ARCH_REALVIEW || !SPARSEMEM help diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 05b377616fd..cc2020c2c70 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, #endif s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); if (s && !is_smp()) +#ifdef CONFIG_SMP_ON_UP fixup_smp((void *)s->sh_addr, s->sh_size); +#else + return -EINVAL; +#endif return 0; } diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index be7c638b648..cfbcf8b9559 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,6 +22,7 @@ #include <linux/sched.h> #include <linux/uaccess.h> +#include <asm/system.h> #include <asm/unaligned.h> #include "fault.h" @@ -95,6 +96,33 @@ static const char *usermode_action[] = { "signal+warn" }; +/* Return true if and only if the ARMv6 unaligned access model is in use. */ +static bool cpu_is_v6_unaligned(void) +{ + return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); +} + +static int safe_usermode(int new_usermode, bool warn) +{ + /* + * ARMv6 and later CPUs can perform unaligned accesses for + * most single load and store instructions up to word size. + * LDM, STM, LDRD and STRD still need to be handled. + * + * Ignoring the alignment fault is not an option on these + * CPUs since we spin re-faulting the instruction without + * making any progress. + */ + if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { + new_usermode |= UM_FIXUP; + + if (warn) + printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); + } + + return new_usermode; +} + static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", ai_user); @@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') - ai_usermode = mode - '0'; + ai_usermode = safe_usermode(mode - '0', true); } return count; } @@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_FIXUP) goto fixup; - if (ai_usermode & UM_SIGNAL) - force_sig(SIGBUS, current); - else { + if (ai_usermode & UM_SIGNAL) { + siginfo_t si; + + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_ADRALN; + si.si_addr = (void __user *)addr; + + force_sig_info(si.si_signo, &si, current); + } else { /* * We're about to disable the alignment trap and return to * user space. But if an interrupt occurs before actually @@ -926,20 +961,11 @@ static int __init alignment_init(void) return -ENOMEM; #endif - /* - * ARMv6 and later CPUs can perform unaligned accesses for - * most single load and store instructions up to word size. - * LDM, STM, LDRD and STRD still need to be handled. - * - * Ignoring the alignment fault is not an option on these - * CPUs since we spin re-faulting the instruction without - * making any progress. - */ - if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) { + if (cpu_is_v6_unaligned()) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); - ai_usermode = UM_FIXUP; + ai_usermode = safe_usermode(ai_usermode, false); } hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2fee782077c..91bca355cd3 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) static inline void poison_init_mem(void *s, size_t count) { u32 *p = (u32 *)s; - while ((count = count - 4)) + for (; count != 0; count -= 4) *p++ = 0xe7fddef0; } diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index f8f7ea34bfc..683af3a182b 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -410,6 +410,7 @@ __arm946_proc_info: .long 0x41009460 .long 0xff00fff0 .long 0 + .long 0 b __arm946_setup .long cpu_arch_name .long cpu_elf_name @@ -418,6 +419,6 @@ __arm946_proc_info: .long arm946_processor_functions .long 0 .long 0 - .long arm940_cache_fns + .long arm946_cache_fns .size __arm946_proc_info, . - __arm946_proc_info |