diff options
Diffstat (limited to 'arch/parisc/kernel/syscall.S')
| -rw-r--r-- | arch/parisc/kernel/syscall.S | 180 |
1 files changed, 121 insertions, 59 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 69b6eebc466..83878601103 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -1,12 +1,35 @@ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * - * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> + * System call entry code / Linux gateway page + * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> * Licensed under the GNU GPL. * thanks to Philipp Rumpf, Mike Shaver and various others * sorry about the wall, puffin.. */ +/* +How does the Linux gateway page on PA-RISC work? +------------------------------------------------ +The Linux gateway page on PA-RISC is "special". +It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc +terminology it's Execute, promote to PL0) in the page map. So anything +executing on this page executes with kernel level privilege (there's more to it +than that: to have this happen, you also have to use a branch with a ,gate +completer to activate the privilege promotion). The upshot is that everything +that runs on the gateway page runs at kernel privilege but with the current +user process address space (although you have access to kernel space via %sr2). +For the 0x100 syscall entry, we redo the space registers to point to the kernel +address space (preserving the user address space in %sr3), move to wide mode if +required, save the user registers and branch into the kernel syscall entry +point. For all the other functions, we execute at kernel privilege but don't +flip address spaces. The basic upshot of this is that these code snippets are +executed atomically (because the kernel can't be pre-empted) and they may +perform architecturally forbidden (to PL3) operations (like setting control +registers). +*/ + + #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/errno.h> @@ -15,6 +38,7 @@ #include <asm/thread_info.h> #include <asm/assembly.h> #include <asm/processor.h> +#include <asm/cache.h> #include <linux/linkage.h> @@ -47,18 +71,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -157,7 +180,7 @@ linux_gateway_entry: STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */ STREG %r27, TASK_PT_GR27(%r1) /* user dp */ STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */ - STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */ + STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */ STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */ STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */ @@ -181,9 +204,10 @@ linux_gateway_entry: /* Are we being ptraced? */ mfctl %cr30, %r1 - LDREG TI_TASK(%r1),%r1 - ldw TASK_PTRACE(%r1), %r1 - bb,<,n %r1,31,.Ltracesys + LDREG TI_FLAGS(%r1),%r1 + ldi _TIF_SYSCALL_TRACE_MASK, %r19 + and,COND(=) %r1, %r19, %r0 + b,n .Ltracesys /* Note! We cannot use the syscall table that is mapped nearby since the gateway page is mapped execute-only. */ @@ -288,26 +312,34 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ - ldil L%syscall_trace,%r1 + copy %r2,%r26 + ldil L%do_syscall_trace_enter,%r1 ldil L%tracesys_next,%r2 - be R%syscall_trace(%sr7,%r1) + be R%do_syscall_trace_enter(%sr7,%r1) ldo R%tracesys_next(%r2),%r2 -tracesys_next: +tracesys_next: + /* do_syscall_trace_enter either returned the syscallno, or -1L, + * so we skip restoring the PT_GR20 below, since we pulled it from + * task->thread.regs.gr[20] above. + */ + copy %ret0,%r20 ldil L%sys_call_table,%r1 ldo R%sys_call_table(%r1), %r19 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 - LDREG TASK_PT_GR20(%r1), %r20 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 LDREG TASK_PT_GR23(%r1), %r23 -#ifdef CONFIG_64BIT LDREG TASK_PT_GR22(%r1), %r22 LDREG TASK_PT_GR21(%r1), %r21 +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ +#else + stw %r22, -52(%r30) /* 5th argument */ + stw %r21, -56(%r30) /* 6th argument */ #endif comiclr,>>= __NR_Linux_syscalls, %r20, %r0 @@ -336,7 +368,8 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 + ldo TASK_REGS(%r1),%r26 + bl do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -353,29 +386,63 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG 0(%r1), %r1 + LDREG TI_TASK(%r1), %r1 #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 - nop + bl do_syscall_trace_exit,%r2 + ldo TASK_REGS(%r1),%r26 ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) /********************************************************* - Light-weight-syscall code + 32/64-bit Light-Weight-Syscall ABI + + * - Indicates a hint for userspace inline asm + implementations. + + Syscall number (caller-saves) + - %r20 + * In asm clobber. + + Argument registers (caller-saves) + - %r26, %r25, %r24, %r23, %r22 + * In asm input. + + Return registers (caller-saves) + - %r28 (return), %r21 (errno) + * In asm output. - r20 - lws number - r26,r25,r24,r23,r22 - Input registers - r28 - Function return register - r21 - Error code. + Caller-saves registers + - %r1, %r27, %r29 + - %r2 (return pointer) + - %r31 (ble link register) + * In asm clobber. - Scracth: Any of the above that aren't being - currently used, including r1. + Callee-saves registers + - %r3-%r18 + - %r30 (stack pointer) + * Not in asm clobber. - Return pointer: r31 (Not usable) + If userspace is 32-bit: + Callee-saves registers + - %r19 (32-bit PIC register) + + Differences from 32-bit calling convention: + - Syscall number in %r20 + - Additional argument register %r22 (arg4) + - Callee-saves %r19. + + If userspace is 64-bit: + Callee-saves registers + - %r27 (64-bit PIC register) + + Differences from 64-bit calling convention: + - Syscall number in %r20 + - Additional argument register %r22 (arg4) + - Callee-saves %r27. Error codes returned by entry path: @@ -383,9 +450,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -402,7 +466,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -433,7 +497,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -473,7 +537,8 @@ lws_compare_and_swap64: b,n lws_compare_and_swap #else /* If we are not a 64-bit kernel, then we don't - * implement having 64-bit input registers + * have 64-bit input registers, and calling + * the 64-bit LWS CAS returns ENOSYS. */ b,n lws_exit_nosys #endif @@ -488,7 +553,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -525,14 +589,15 @@ cas_nocontend: # endif /* ENABLE_LWS_DEBUG */ + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -557,30 +622,29 @@ cas_action: stw %r1, 4(%sr2,%r20) #endif /* The load and store could fail */ -1: ldw 0(%sr3,%r26), %r28 +1: ldw,ma 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP +2: stw,ma %r24, 0(%sr3,%r26) /* Free lock */ - stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG + stw,ma %r20, 0(%sr2,%r20) +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif + /* Enable interrupts */ + ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ b lws_exit copy %r0, %r21 3: - /* Error occured on load or store */ -#ifdef CONFIG_SMP + /* Error occurred on load or store */ /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif + ssm PSW_SM_I, %r0 b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ nop @@ -591,10 +655,8 @@ cas_action: /* Two exception table entries, one for the load, the other for the store. Either return -EFAULT. Each of the entries must be relocated. */ - .section __ex_table,"aw" - ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page) - ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page) - .previous + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) /* Make sure nothing else is placed on this page */ @@ -609,7 +671,7 @@ ENTRY(end_linux_gateway_page) .section .rodata,"a" - .align PAGE_SIZE + .align 8 /* Light-weight-syscall table */ /* Start of lws table. */ ENTRY(lws_table) @@ -618,29 +680,31 @@ ENTRY(lws_table) END(lws_table) /* End of lws table */ - .align PAGE_SIZE + .align 8 ENTRY(sys_call_table) #include "syscall_table.S" END(sys_call_table) #ifdef CONFIG_64BIT - .align PAGE_SIZE + .align 8 ENTRY(sys_call_table64) #define SYSCALL_TABLE_64BIT #include "syscall_table.S" END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks + + NOTE: The lws_lock_start symbol must be + at least 16-byte aligned for safe use + with ldcw. */ .section .data - .align PAGE_SIZE + .align L1_CACHE_BYTES ENTRY(lws_lock_start) /* lws locks */ - .align 16 .rept 16 /* Keep locks aligned at 16-bytes */ .word 1 @@ -650,8 +714,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end |
