diff options
Diffstat (limited to 'arch/arm/kernel/entry-common.S')
| -rw-r--r-- | arch/arm/kernel/entry-common.S | 469 |
1 files changed, 345 insertions, 124 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6c90c50a9ee..7139d4a7dea 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -9,7 +9,15 @@ */ #include <asm/unistd.h> -#include <asm/arch/entry-macro.S> +#include <asm/ftrace.h> +#include <asm/unwind.h> + +#ifdef CONFIG_NEED_RET_TO_USER +#include <mach/entry-macro.S> +#else + .macro arch_ret_to_user, tmp1, tmp2 + .endm +#endif #include "entry-header.S" @@ -21,22 +29,20 @@ * stack. */ ret_fast_syscall: + UNWIND(.fnstart ) + UNWIND(.cantunwind ) disable_irq @ disable interrupts ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne fast_work_pending + asm_trace_hardirqs_on /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr + ct_user_enter - @ fast_restore_user_regs - ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr - ldr lr, [sp, #S_OFF + S_PC]! @ get pc - msr spsr_cxsf, r1 @ save in spsr_svc - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr - mov r0, r0 - add sp, sp, #S_FRAME_SIZE - S_PC - movs pc, lr @ return & move spsr_svc into cpsr + restore_user_regs fast = 1, offset = S_OFF + UNWIND(.fnend ) /* * Ok, we need to do extra processing, enter the slow path. @@ -44,91 +50,325 @@ ret_fast_syscall: fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 work_pending: - tst r1, #_TIF_NEED_RESCHED - bne work_resched - tst r1, #_TIF_SIGPENDING - beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' - bl do_notify_resume - b ret_slow_syscall @ Check work again + bl do_work_pending + cmp r0, #0 + beq no_work_pending + movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) + ldmia sp, {r0 - r6} @ have to reload r0 - r6 + b local_restart @ ... and off we go -work_resched: - bl schedule /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ ENTRY(ret_to_user) ret_slow_syscall: disable_irq @ disable interrupts +ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne work_pending no_work_pending: + asm_trace_hardirqs_on + /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr + ct_user_enter save = 0 - @ slow_restore_user_regs - ldr r1, [sp, #S_PSR] @ get calling cpsr - ldr lr, [sp, #S_PC]! @ get pc - msr spsr_cxsf, r1 @ save in spsr_svc - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr - mov r0, r0 - add sp, sp, #S_FRAME_SIZE - S_PC - movs pc, lr @ return & move spsr_svc into cpsr + restore_user_regs fast = 0, offset = 0 +ENDPROC(ret_to_user_from_irq) +ENDPROC(ret_to_user) /* * This is how we return from a fork. */ ENTRY(ret_from_fork) bl schedule_tail - get_thread_info tsk - ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing - mov why, #1 - tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? - beq ret_slow_syscall - mov r1, sp - mov r0, #1 @ trace exit [IP = 1] - bl syscall_trace + cmp r5, #0 + movne r0, r4 + adrne lr, BSYM(1f) + movne pc, r5 +1: get_thread_info tsk b ret_slow_syscall - +ENDPROC(ret_from_fork) .equ NR_syscalls,0 #define CALL(x) .equ NR_syscalls,NR_syscalls+1 #include "calls.S" + +/* + * Ensure that the system call table is equal to __NR_syscalls, + * which is the value the rest of the system sees + */ +.ifne NR_syscalls - __NR_syscalls +.error "__NR_syscalls is not equal to the size of the syscall table" +.endif + #undef CALL #define CALL(x) .long x -/*============================================================================= - * SWI handler - *----------------------------------------------------------------------------- +#ifdef CONFIG_FUNCTION_TRACER +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). */ - /* If we're optimising for StrongARM the resulting code won't - run on an ARM7 and we can save a couple of instructions. - --pb */ -#ifdef CONFIG_CPU_ARM710 -#define A710(code...) code -.Larm710bug: - ldmia sp, {r0 - lr}^ @ Get calling r0 - lr +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + +.macro mcount_adjust_addr rd, rn + bic \rd, \rn, #1 @ clear the Thumb bit if present + sub \rd, \rd, #MCOUNT_INSN_SIZE +.endm + +.macro __mcount suffix + mcount_enter + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, .Lftrace_stub + cmp r0, r2 + bne 1f + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldr r1, =ftrace_graph_return + ldr r2, [r1] + cmp r0, r2 + bne ftrace_graph_caller\suffix + + ldr r1, =ftrace_graph_entry + ldr r2, [r1] + ldr r0, =ftrace_graph_entry_stub + cmp r0, r2 + bne ftrace_graph_caller\suffix +#endif + + mcount_exit + +1: mcount_get_lr r1 @ lr of instrumented func + mcount_adjust_addr r0, lr @ instrumented function + adr lr, BSYM(2f) + mov pc, r2 +2: mcount_exit +.endm + +.macro __ftrace_caller suffix + mcount_enter + + mcount_get_lr r1 @ lr of instrumented func + mcount_adjust_addr r0, lr @ instrumented function + + .globl ftrace_call\suffix +ftrace_call\suffix: + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call\suffix +ftrace_graph_call\suffix: mov r0, r0 - add sp, sp, #S_FRAME_SIZE - subs pc, lr, #4 +#endif + + mcount_exit +.endm + +.macro __ftrace_graph_caller + sub r0, fp, #4 @ &lr of instrumented routine (&parent) +#ifdef CONFIG_DYNAMIC_FTRACE + @ called from __ftrace_caller, saved in mcount_enter + ldr r1, [sp, #16] @ instrumented routine (func) + mcount_adjust_addr r1, r1 +#else + @ called from __mcount, untouched in lr + mcount_adjust_addr r1, lr @ instrumented routine (func) +#endif + mov r2, fp @ frame pointer + bl prepare_ftrace_return + mcount_exit +.endm + +#ifdef CONFIG_OLD_MCOUNT +/* + * mcount + */ + +.macro mcount_enter + stmdb sp!, {r0-r3, lr} +.endm + +.macro mcount_get_lr reg + ldr \reg, [fp, #-4] +.endm + +.macro mcount_exit + ldr lr, [fp, #-4] + ldmia sp!, {r0-r3, pc} +.endm + +ENTRY(mcount) +#ifdef CONFIG_DYNAMIC_FTRACE + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} +#else + __mcount _old +#endif +ENDPROC(mcount) + +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller_old) + __ftrace_caller _old +ENDPROC(ftrace_caller_old) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller_old) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller_old) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit +#endif + +/* + * __gnu_mcount_nc + */ + +.macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) + stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) +.endm + +.macro mcount_get_lr reg + ldr \reg, [sp, #20] +.endm + +.macro mcount_exit + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +.endm + +ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) +#ifdef CONFIG_DYNAMIC_FTRACE + mov ip, lr + ldmia sp!, {lr} + mov pc, ip #else -#define A710(code...) + __mcount +#endif +UNWIND(.fnend) +ENDPROC(__gnu_mcount_nc) + +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) +UNWIND(.fnstart) + __ftrace_caller +UNWIND(.fnend) +ENDPROC(ftrace_caller) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) + __ftrace_graph_caller +UNWIND(.fnend) +ENDPROC(ftrace_graph_caller) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl return_to_handler +return_to_handler: + stmdb sp!, {r0-r3} + mov r0, fp @ frame pointer + bl ftrace_return_to_handler + mov lr, r0 @ r0 has real ret addr + ldmia sp!, {r0-r3} + mov pc, lr #endif +ENTRY(ftrace_stub) +.Lftrace_stub: + mov pc, lr +ENDPROC(ftrace_stub) + +#endif /* CONFIG_FUNCTION_TRACER */ + +/*============================================================================= + * SWI handler + *----------------------------------------------------------------------------- + */ + .align 5 ENTRY(vector_swi) +#ifdef CONFIG_CPU_V7M + v7m_exception_entry +#else sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 - add r8, sp, #S_PC - stmdb r8, {sp, lr}^ @ Calling sp, lr + ARM( add r8, sp, #S_PC ) + ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr + THUMB( mov r8, sp ) + THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr mrs r8, spsr @ called from non-FIQ mode, so ok. str lr, [sp, #S_PC] @ Save calling PC str r8, [sp, #S_PSR] @ Save CPSR str r0, [sp, #S_OLD_R0] @ Save OLD_R0 +#endif zero_fp + alignment_trap ip, __cr_alignment + enable_irq + ct_user_exit + get_thread_info tsk /* * Get the system call number. @@ -143,51 +383,29 @@ ENTRY(vector_swi) #ifdef CONFIG_ARM_THUMB tst r8, #PSR_T_BIT movne r10, #0 @ no thumb OABI emulation - ldreq r10, [lr, #-4] @ get SWI instruction + USER( ldreq r10, [lr, #-4] ) @ get SWI instruction #else - ldr r10, [lr, #-4] @ get SWI instruction - A710( and ip, r10, #0x0f000000 @ check for SWI ) - A710( teq ip, #0x0f000000 ) - A710( bne .Larm710bug ) + USER( ldr r10, [lr, #-4] ) @ get SWI instruction #endif + ARM_BE8(rev r10, r10) @ little endian instruction #elif defined(CONFIG_AEABI) /* * Pure EABI user space always put syscall number into scno (r7). */ - A710( ldr ip, [lr, #-4] @ get SWI instruction ) - A710( and ip, ip, #0x0f000000 @ check for SWI ) - A710( teq ip, #0x0f000000 ) - A710( bne .Larm710bug ) - #elif defined(CONFIG_ARM_THUMB) - /* Legacy ABI only, possibly thumb mode. */ tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in - ldreq scno, [lr, #-4] + USER( ldreq scno, [lr, #-4] ) #else - /* Legacy ABI only. */ - ldr scno, [lr, #-4] @ get SWI instruction - A710( and ip, scno, #0x0f000000 @ check for SWI ) - A710( teq ip, #0x0f000000 ) - A710( bne .Larm710bug ) - + USER( ldr scno, [lr, #-4] ) @ get SWI instruction #endif -#ifdef CONFIG_ALIGNMENT_TRAP - ldr ip, __cr_alignment - ldr ip, [ip] - mcr p15, 0, ip, c1, c0 @ update control register -#endif - enable_irq - - get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -204,45 +422,65 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif +local_restart: + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + + tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit - adr lr, ret_fast_syscall @ return address + adr lr, BSYM(ret_fast_syscall) @ return address ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF -2: mov why, #0 @ no longer a real syscall - cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) +2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back - bcs arm_syscall + bcs arm_syscall + mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func +#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) + /* + * We failed to handle a fault trying to access the page + * containing the swi instruction, but we're not really in a + * position to return -EFAULT. Instead, return back to the + * instruction and re-enter the user fault handling path trying + * to page it in. This will likely result in sending SEGV to the + * current task. + */ +9001: + sub lr, lr, #4 + str lr, [sp, #S_PC] + b ret_fast_syscall +#endif +ENDPROC(vector_swi) + /* * This is the really slow path. We're going to be doing * context switches, and waiting for our parent to respond. */ __sys_trace: - mov r2, scno - add r1, sp, #S_OFF - mov r0, #0 @ trace entry [IP = 0] - bl syscall_trace + mov r1, scno + add r0, sp, #S_OFF + bl syscall_trace_enter - adr lr, __sys_trace_return @ return address + adr lr, BSYM(__sys_trace_return) @ return address mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit - ldmccia r1, {r0 - r3} @ have to reload r0 - r3 + ldmccia r1, {r0 - r6} @ have to reload r0 - r6 + stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine - b 2b + cmp scno, #-1 @ skip the syscall? + bne 2b + add sp, sp, #S_OFF @ restore stack + b ret_slow_syscall __sys_trace_return: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 - mov r2, scno - mov r1, sp - mov r0, #1 @ trace exit [IP = 1] - bl syscall_trace + mov r0, sp + bl syscall_trace_exit b ret_slow_syscall .align 5 @@ -275,7 +513,6 @@ ENTRY(sys_call_table) */ @ r0 = syscall number @ r8 = syscall table - .type sys_syscall, #function sys_syscall: bic scno, r0, #__NR_OABI_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE @@ -287,53 +524,31 @@ sys_syscall: movlo r3, r4 ldrlo pc, [tbl, scno, lsl #2] b sys_ni_syscall - -sys_fork_wrapper: - add r0, sp, #S_OFF - b sys_fork - -sys_vfork_wrapper: - add r0, sp, #S_OFF - b sys_vfork - -sys_execve_wrapper: - add r3, sp, #S_OFF - b sys_execve - -sys_clone_wrapper: - add ip, sp, #S_OFF - str ip, [sp, #4] - b sys_clone - -sys_sigsuspend_wrapper: - add r3, sp, #S_OFF - b sys_sigsuspend - -sys_rt_sigsuspend_wrapper: - add r2, sp, #S_OFF - b sys_rt_sigsuspend +ENDPROC(sys_syscall) sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn +ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn - -sys_sigaltstack_wrapper: - ldr r2, [sp, #S_OFF + S_SP] - b do_sigaltstack +ENDPROC(sys_rt_sigreturn_wrapper) sys_statfs64_wrapper: teq r1, #88 moveq r1, #84 b sys_statfs64 +ENDPROC(sys_statfs64_wrapper) sys_fstatfs64_wrapper: teq r1, #88 moveq r1, #84 b sys_fstatfs64 +ENDPROC(sys_fstatfs64_wrapper) /* * Note: off_4k (r5) is always units of 4K. If we can't do the requested @@ -344,13 +559,14 @@ sys_mmap2: tst r5, #PGOFF_MASK moveq r5, r5, lsr #PAGE_SHIFT - 12 streq r5, [sp, #4] - beq do_mmap2 + beq sys_mmap_pgoff mov r0, #-EINVAL mov pc, lr #else str r5, [sp, #4] - b do_mmap2 + b sys_mmap_pgoff #endif +ENDPROC(sys_mmap2) #ifdef CONFIG_OABI_COMPAT @@ -361,26 +577,31 @@ sys_mmap2: sys_oabi_pread64: stmia sp, {r3, r4} b sys_pread64 +ENDPROC(sys_oabi_pread64) sys_oabi_pwrite64: stmia sp, {r3, r4} b sys_pwrite64 +ENDPROC(sys_oabi_pwrite64) sys_oabi_truncate64: mov r3, r2 mov r2, r1 b sys_truncate64 +ENDPROC(sys_oabi_truncate64) sys_oabi_ftruncate64: mov r3, r2 mov r2, r1 b sys_ftruncate64 +ENDPROC(sys_oabi_ftruncate64) sys_oabi_readahead: str r3, [sp] mov r3, r2 mov r2, r1 b sys_readahead +ENDPROC(sys_oabi_readahead) /* * Let's declare a second syscall table for old ABI binaries |
