diff options
Diffstat (limited to 'arch/arm/kernel/process.c')
| -rw-r--r-- | arch/arm/kernel/process.c | 441 |
1 files changed, 224 insertions, 217 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index e76fcaadce0..81ef686a91c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -10,7 +10,7 @@ */ #include <stdarg.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -30,14 +30,17 @@ #include <linux/uaccess.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> +#include <linux/leds.h> +#include <linux/reboot.h> #include <asm/cacheflush.h> -#include <asm/leds.h> +#include <asm/idmap.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> +#include <asm/system_misc.h> #include <asm/mach/time.h> +#include <asm/tls.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> @@ -45,86 +48,74 @@ unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif -static const char *processor_modes[] = { +static const char *processor_modes[] __maybe_unused = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; -static const char *isa_modes[] = { +static const char *isa_modes[] __maybe_unused = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void setup_mm_for_reboot(char mode); - -static volatile int hlt_counter; +extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); +typedef void (*phys_reset_t)(unsigned long); -#include <mach/system.h> +/* + * A temporary stack to use for CPU reset. This is static so that we + * don't clobber it with the identity mapping. When running with this + * stack, any references to the current task *will not work* so you + * should really do as little as possible before jumping to your reset + * code. + */ +static u64 soft_restart_stack[16]; -void disable_hlt(void) +static void __soft_restart(void *addr) { - hlt_counter++; -} + phys_reset_t phys_reset; -EXPORT_SYMBOL(disable_hlt); + /* Take out a flat memory mapping. */ + setup_mm_for_reboot(); -void enable_hlt(void) -{ - hlt_counter--; -} + /* Clean and invalidate caches */ + flush_cache_all(); -EXPORT_SYMBOL(enable_hlt); + /* Turn off caching */ + cpu_proc_fin(); -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} + /* Switch to the identity mapping. */ + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset((unsigned long)addr); -__setup("nohlt", nohlt_setup); -__setup("hlt", hlt_setup); + /* Should never get here. */ + BUG(); +} -void arm_machine_restart(char mode, const char *cmd) +void soft_restart(unsigned long addr) { + u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); + /* Disable interrupts first */ - local_irq_disable(); + raw_local_irq_disable(); local_fiq_disable(); - /* - * Tell the mm system that we are going to reboot - - * we may need it to insert some 1:1 mappings so that - * soft boot works. - */ - setup_mm_for_reboot(mode); + /* Disable the L2 if we're the last man standing. */ + if (num_online_cpus() == 1) + outer_disable(); - /* Clean and invalidate caches */ - flush_cache_all(); + /* Change to the new stack and continue with the reset. */ + call_with_stack(__soft_restart, (void *)addr, (void *)stack); - /* Turn off caching */ - cpu_proc_fin(); - - /* Push out any further dirty data, and ensure cache is empty */ - flush_cache_all(); - - /* - * Now call the architecture specific reboot code. - */ - arch_reset(mode, cmd); + /* Should never get here. */ + BUG(); +} - /* - * Whoops - the architecture was unable to reboot. - * Tell the user! - */ - mdelay(1000); - printk("Reboot failed -- System halted\n"); - while (1); +static void null_restart(enum reboot_mode reboot_mode, const char *cmd) +{ } /* @@ -133,122 +124,121 @@ void arm_machine_restart(char mode, const char *cmd) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; +void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); -static void do_nothing(void *unused) -{ -} - /* - * cpu_idle_wait - Used to ensure that all the CPUs discard old value of - * pm_idle and update to new pm_idle value. Required while changing pm_idle - * handler on SMP systems. - * - * Caller must have changed pm_idle to the new value before the call. Old - * pm_idle value will not be used by any CPU after the return of this function. + * This is our default idle handler. */ -void cpu_idle_wait(void) -{ - smp_mb(); - /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 1); -} -EXPORT_SYMBOL_GPL(cpu_idle_wait); + +void (*arm_pm_idle)(void); /* - * This is our default idle handler. We need to disable - * interrupts here to ensure we don't miss a wakeup call. + * Called from the core idle loop. */ -static void default_idle(void) + +void arch_cpu_idle(void) { - if (!need_resched()) - arch_idle(); + if (arm_pm_idle) + arm_pm_idle(); + else + cpu_do_idle(); local_irq_enable(); } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL(pm_idle); - -/* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way. The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. - */ -void cpu_idle(void) +void arch_cpu_idle_prepare(void) { local_fiq_enable(); +} - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_stop_sched_tick(1); - leds_event(led_idle_start); - while (!need_resched()) { -#ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); +void arch_cpu_idle_enter(void) +{ + ledtrig_cpu(CPU_LED_IDLE_START); +#ifdef CONFIG_PL310_ERRATA_769419 + wmb(); #endif - - local_irq_disable(); - if (hlt_counter) { - local_irq_enable(); - cpu_relax(); - } else { - stop_critical_timings(); - pm_idle(); - start_critical_timings(); - /* - * This will eventually be removed - pm_idle - * functions should always return with IRQs - * enabled. - */ - WARN_ON(irqs_disabled()); - local_irq_enable(); - } - } - leds_event(led_idle_end); - tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } } -static char reboot_mode = 'h'; - -int __init reboot_setup(char *str) +void arch_cpu_idle_exit(void) { - reboot_mode = str[0]; - return 1; + ledtrig_cpu(CPU_LED_IDLE_END); } -__setup("reboot=", reboot_setup); +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + cpu_die(); +} +#endif +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in disable_nonboot_cpus() to achieve this. + */ void machine_shutdown(void) { -#ifdef CONFIG_SMP - smp_send_stop(); -#endif + disable_nonboot_cpus(); } +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ void machine_halt(void) { - machine_shutdown(); + local_irq_disable(); + smp_send_stop(); + + local_irq_disable(); while (1); } +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ void machine_power_off(void) { - machine_shutdown(); + local_irq_disable(); + smp_send_stop(); + if (pm_power_off) pm_power_off(); } +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with a single CPU can + * use soft_restart() as their machine descriptor's .restart hook, since that + * will cause the only available CPU to reset. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ void machine_restart(char *cmd) { - machine_shutdown(); + local_irq_disable(); + smp_send_stop(); + arm_pm_restart(reboot_mode, cmd); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + printk("Reboot failed -- System halted\n"); + local_irq_disable(); + while (1); } void __show_regs(struct pt_regs *regs) @@ -256,11 +246,8 @@ void __show_regs(struct pt_regs *regs) unsigned long flags; char buf[64]; - printk("CPU: %d %s (%s %.*s)\n", - raw_smp_processor_id(), print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); + show_regs_print_info(KERN_DEFAULT); + print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" @@ -284,12 +271,17 @@ void __show_regs(struct pt_regs *regs) buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; +#ifndef CONFIG_CPU_V7M printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], isa_modes[isa_mode(regs)], get_fs() == get_ds() ? "kernel" : "user"); +#else + printk("xPSR: %08lx\n", regs->ARM_cpsr); +#endif + #ifdef CONFIG_CPU_CP15 { unsigned int ctrl; @@ -315,9 +307,8 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); - __backtrace(); + dump_stack(); } ATOMIC_NOTIFIER_HEAD(thread_notify_head); @@ -354,23 +345,34 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) + unsigned long stk_sz, struct task_struct *p) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); - *childregs = *regs; - childregs->ARM_r0 = 0; - childregs->ARM_sp = stack_start; - memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); - thread->cpu_context.sp = (unsigned long)childregs; + + if (likely(!(p->flags & PF_KTHREAD))) { + *childregs = *current_pt_regs(); + childregs->ARM_r0 = 0; + if (stack_start) + childregs->ARM_sp = stack_start; + } else { + memset(childregs, 0, sizeof(struct pt_regs)); + thread->cpu_context.r4 = stk_sz; + thread->cpu_context.r5 = stack_start; + childregs->ARM_cpsr = SVC_MODE; + } thread->cpu_context.pc = (unsigned long)ret_from_fork; + thread->cpu_context.sp = (unsigned long)childregs; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value = regs->ARM_r3; + thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[1] = get_tpuser(); + + thread_notify(THREAD_NOTIFY_COPY, thread); return 0; } @@ -399,66 +401,10 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp) } EXPORT_SYMBOL(dump_fpu); -/* - * Shuffle the argument into the correct register before calling the - * thread function. r4 is the thread argument, r5 is the pointer to - * the thread function, and r6 points to the exit function. - */ -extern void kernel_thread_helper(void); -asm( ".pushsection .text\n" -" .align\n" -" .type kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -#ifdef CONFIG_TRACE_IRQFLAGS -" bl trace_hardirqs_on\n" -#endif -" msr cpsr_c, r7\n" -" mov r0, r4\n" -" mov lr, r6\n" -" mov pc, r5\n" -" .size kernel_thread_helper, . - kernel_thread_helper\n" -" .popsection"); - -#ifdef CONFIG_ARM_UNWIND -extern void kernel_thread_exit(long code); -asm( ".pushsection .text\n" -" .align\n" -" .type kernel_thread_exit, #function\n" -"kernel_thread_exit:\n" -" .fnstart\n" -" .cantunwind\n" -" bl do_exit\n" -" nop\n" -" .fnend\n" -" .size kernel_thread_exit, . - kernel_thread_exit\n" -" .popsection"); -#else -#define kernel_thread_exit do_exit -#endif - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - - regs.ARM_r4 = (unsigned long)arg; - regs.ARM_r5 = (unsigned long)fn; - regs.ARM_r6 = (unsigned long)kernel_thread_exit; - regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; - regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; - - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; + unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -467,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); + stack_page = (unsigned long)task_stack_page(p); do { - int ret = unwind_frame(&frame); - if (ret < 0) + if (frame.sp < stack_page || + frame.sp >= stack_page + THREAD_SIZE || + unwind_frame(&frame) < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; @@ -483,23 +431,82 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +#ifdef CONFIG_MMU +#ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the - * atomic helpers and the signal restart code. Let's declare a mapping - * for it so it is visible through ptrace and /proc/<pid>/mem. + * atomic helpers. Insert it into the gate_vma so that it is visible + * through ptrace and /proc/<pid>/mem. */ +static struct vm_area_struct gate_vma = { + .vm_start = 0xffff0000, + .vm_end = 0xffff0000 + PAGE_SIZE, + .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, +}; -int vectors_user_mapping(void) +static int __init gate_vma_init(void) { - struct mm_struct *mm = current->mm; - return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYEXEC | - VM_ALWAYSDUMP | VM_RESERVED, - NULL); + gate_vma.vm_page_prot = PAGE_READONLY_EXEC; + return 0; +} +arch_initcall(gate_vma_init); + +struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +{ + return &gate_vma; } +int in_gate_area(struct mm_struct *mm, unsigned long addr) +{ + return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); +} + +int in_gate_area_no_mm(unsigned long addr) +{ + return in_gate_area(NULL, addr); +} +#define is_gate_vma(vma) ((vma) == &gate_vma) +#else +#define is_gate_vma(vma) 0 +#endif + const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : + (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? + "[sigpage]" : NULL; +} + +static struct page *signal_page; +extern struct page *get_signal_page(void); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret; + + if (!signal_page) + signal_page = get_signal_page(); + if (!signal_page) + return -ENOMEM; + + down_write(&mm->mmap_sem); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &signal_page); + + if (ret == 0) + mm->context.sigpage = addr; + + up_fail: + up_write(&mm->mmap_sem); + return ret; } +#endif |
