diff options
Diffstat (limited to 'arch/ia64/kernel/fsys.S')
-rw-r--r-- | arch/ia64/kernel/fsys.S | 147 |
1 files changed, 84 insertions, 63 deletions
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 962b6c4e32b..7d7684a369d 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -531,93 +531,114 @@ GLOBAL_ENTRY(fsys_bubble_down) .altrp b6 .body /* - * We get here for syscalls that don't have a lightweight handler. For those, we - * need to bubble down into the kernel and that requires setting up a minimal - * pt_regs structure, and initializing the CPU state more or less as if an - * interruption had occurred. To make syscall-restarts work, we setup pt_regs - * such that cr_iip points to the second instruction in syscall_via_break. - * Decrementing the IP hence will restart the syscall via break and not - * decrementing IP will return us to the caller, as usual. Note that we preserve - * the value of psr.pp rather than initializing it from dcr.pp. This makes it - * possible to distinguish fsyscall execution from other privileged execution. + * We get here for syscalls that don't have a lightweight + * handler. For those, we need to bubble down into the kernel + * and that requires setting up a minimal pt_regs structure, + * and initializing the CPU state more or less as if an + * interruption had occurred. To make syscall-restarts work, + * we setup pt_regs such that cr_iip points to the second + * instruction in syscall_via_break. Decrementing the IP + * hence will restart the syscall via break and not + * decrementing IP will return us to the caller, as usual. + * Note that we preserve the value of psr.pp rather than + * initializing it from dcr.pp. This makes it possible to + * distinguish fsyscall execution from other privileged + * execution. * * On entry: - * - normal fsyscall handler register usage, except that we also have: + * - normal fsyscall handler register usage, except + * that we also have: * - r18: address of syscall entry point * - r21: ar.fpsr * - r26: ar.pfs * - r27: ar.rsc * - r29: psr + * + * We used to clear some PSR bits here but that requires slow + * serialization. Fortuntely, that isn't really necessary. + * The rationale is as follows: we used to clear bits + * ~PSR_PRESERVED_BITS in PSR.L. Since + * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we + * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}. + * However, + * + * PSR.BE : already is turned off in __kernel_syscall_via_epc() + * PSR.AC : don't care (kernel normally turns PSR.AC on) + * PSR.I : already turned off by the time fsys_bubble_down gets + * invoked + * PSR.DFL: always 0 (kernel never turns it on) + * PSR.DFH: don't care --- kernel never touches f32-f127 on its own + * initiative + * PSR.DI : always 0 (kernel never turns it on) + * PSR.SI : always 0 (kernel never turns it on) + * PSR.DB : don't care --- kernel never enables kernel-level + * breakpoints + * PSR.TB : must be 0 already; if it wasn't zero on entry to + * __kernel_syscall_via_epc, the branch to fsys_bubble_down + * will trigger a taken branch; the taken-trap-handler then + * converts the syscall into a break-based system-call. */ -# define PSR_PRESERVED_BITS (IA64_PSR_UP | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_PK \ - | IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_RT \ - | IA64_PSR_IC) /* - * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. The rest we have - * to synthesize. + * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. + * The rest we have to synthesize. */ -# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) | (0x1 << IA64_PSR_RI_BIT) \ +# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \ + | (0x1 << IA64_PSR_RI_BIT) \ | IA64_PSR_BN | IA64_PSR_I) - invala - movl r8=PSR_ONE_BITS + invala // M0|1 + movl r14=ia64_ret_from_syscall // X - mov r25=ar.unat // save ar.unat (5 cyc) - movl r9=PSR_PRESERVED_BITS + nop.m 0 + movl r28=__kernel_syscall_via_break // X create cr.iip + ;; - mov ar.rsc=0 // set enforced lazy mode, pl 0, little-endian, loadrs=0 - movl r28=__kernel_syscall_via_break + mov r2=r16 // A get task addr to addl-addressable register + adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A + mov r31=pr // I0 save pr (2 cyc) ;; - mov r23=ar.bspstore // save ar.bspstore (12 cyc) - mov r31=pr // save pr (2 cyc) - mov r20=r1 // save caller's gp in r20 + st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag + addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS + add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A ;; - mov r2=r16 // copy current task addr to addl-addressable register - and r9=r9,r29 - mov r19=b6 // save b6 (2 cyc) + ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags + lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store + nop.i 0 ;; - mov psr.l=r9 // slam the door (17 cyc to srlz.i) - or r29=r8,r29 // construct cr.ipsr value to save - addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS + mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 + nop.m 0 + nop.i 0 ;; - // GAS reports a spurious RAW hazard on the read of ar.rnat because it thinks - // we may be reading ar.itc after writing to psr.l. Avoid that message with - // this directive: - dv_serialize_data - mov.m r24=ar.rnat // read ar.rnat (5 cyc lat) - lfetch.fault.excl.nt1 [r22] - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2 - - // ensure previous insn group is issued before we stall for srlz.i: + mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore + mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!) + nop.i 0 ;; - srlz.i // ensure new psr.l has been established - ///////////////////////////////////////////////////////////////////////////// - ////////// from this point on, execution is not interruptible anymore - ///////////////////////////////////////////////////////////////////////////// - addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // compute base of memory stack - cmp.ne pKStk,pUStk=r0,r0 // set pKStk <- 0, pUStk <- 1 + mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS + movl r8=PSR_ONE_BITS // X ;; - st1 [r16]=r0 // clear current->thread.on_ustack flag - mov ar.bspstore=r22 // switch to kernel RBS - mov b6=r18 // copy syscall entry-point to b6 (7 cyc) - add r3=TI_FLAGS+IA64_TASK_SIZE,r2 + mov r25=ar.unat // M2 (5 cyc) save ar.unat + mov r19=b6 // I0 save b6 (2 cyc) + mov r20=r1 // A save caller's gp in r20 ;; - ld4 r3=[r3] // r2 = current_thread_info()->flags - mov r18=ar.bsp // save (kernel) ar.bsp (12 cyc) - mov ar.rsc=0x3 // set eager mode, pl 0, little-endian, loadrs=0 - br.call.sptk.many b7=ia64_syscall_setup - ;; - ssm psr.i - movl r2=ia64_ret_from_syscall + or r29=r8,r29 // A construct cr.ipsr value to save + mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc) + addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack + + mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc) + cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 + br.call.sptk.many b7=ia64_syscall_setup // B ;; - mov rp=r2 // set the real return addr - and r3=_TIF_SYSCALL_TRACEAUDIT,r3 + mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 + mov rp=r14 // I0 set the real return addr + and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A ;; - cmp.eq p8,p0=r3,r0 + ssm psr.i // M2 we're on kernel stacks now, reenable irqs + cmp.eq p8,p0=r3,r0 // A +(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT -(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 -(p8) br.call.sptk.many b6=b6 // ignore this return addr - br.cond.sptk ia64_trace_syscall + nop.m 0 +(p8) br.call.sptk.many b6=b6 // B (ignore return address) + br.cond.spnt ia64_trace_syscall // B END(fsys_bubble_down) .rodata |