aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/syscall.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/syscall.S')
-rw-r--r--arch/parisc/kernel/syscall.S278
1 files changed, 148 insertions, 130 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index b29b76b42bb..83878601103 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -1,21 +1,46 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
- * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * System call entry code / Linux gateway page
+ * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
* Licensed under the GNU GPL.
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
-#include <linux/config.h> /* for CONFIG_SMP */
+
+/*
+How does the Linux gateway page on PA-RISC work?
+------------------------------------------------
+The Linux gateway page on PA-RISC is "special".
+It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
+terminology it's Execute, promote to PL0) in the page map. So anything
+executing on this page executes with kernel level privilege (there's more to it
+than that: to have this happen, you also have to use a branch with a ,gate
+completer to activate the privilege promotion). The upshot is that everything
+that runs on the gateway page runs at kernel privilege but with the current
+user process address space (although you have access to kernel space via %sr2).
+For the 0x100 syscall entry, we redo the space registers to point to the kernel
+address space (preserving the user address space in %sr3), move to wide mode if
+required, save the user registers and branch into the kernel syscall entry
+point. For all the other functions, we execute at kernel privilege but don't
+flip address spaces. The basic upshot of this is that these code snippets are
+executed atomically (because the kernel can't be pre-empted) and they may
+perform architecturally forbidden (to PL3) operations (like setting control
+registers).
+*/
+
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
+#include <asm/page.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
-
#include <asm/assembly.h>
#include <asm/processor.h>
+#include <asm/cache.h>
+
+#include <linux/linkage.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
@@ -23,29 +48,12 @@
*/
#define KILL_INSN break 0,0
-#ifdef CONFIG_64BIT
- .level 2.0w
-#else
- .level 1.1
-#endif
-
-#ifndef CONFIG_64BIT
- .macro fixup_branch,lbl
- b \lbl
- .endm
-#else
- .macro fixup_branch,lbl
- ldil L%\lbl, %r1
- ldo R%\lbl(%r1), %r1
- bv,n %r0(%r1)
- .endm
-#endif
+ .level LEVEL
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
- .export linux_gateway_page
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
@@ -55,26 +63,25 @@
* pointers.
*/
- .align 4096
-linux_gateway_page:
+ .align PAGE_SIZE
+ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
KILL_INSN
.endr
- /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
+ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2)
lws_entry:
- /* Unconditional branch to lws_start, located on the
- same gateway page */
- b,n lws_start
+ gate lws_start, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
- /* Fill from 0xb4 to 0xe0 */
- .rept 11
+ /* Fill from 0xb8 to 0xe0 */
+ .rept 10
KILL_INSN
.endr
@@ -164,7 +171,7 @@ linux_gateway_entry:
#endif
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
- STREG %r20, TASK_PT_GR20(%r1)
+ STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
STREG %r21, TASK_PT_GR21(%r1)
STREG %r22, TASK_PT_GR22(%r1)
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
@@ -173,7 +180,7 @@ linux_gateway_entry:
STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
STREG %r27, TASK_PT_GR27(%r1) /* user dp */
STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
- STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
+ STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */
STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
@@ -197,9 +204,10 @@ linux_gateway_entry:
/* Are we being ptraced? */
mfctl %cr30, %r1
- LDREG TI_TASK(%r1),%r1
- LDREG TASK_PTRACE(%r1), %r1
- bb,<,n %r1,31,.Ltracesys
+ LDREG TI_FLAGS(%r1),%r1
+ ldi _TIF_SYSCALL_TRACE_MASK, %r19
+ and,COND(=) %r1, %r19, %r0
+ b,n .Ltracesys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
@@ -215,7 +223,7 @@ linux_gateway_entry:
ldil L%sys_call_table, %r1
ldo R%sys_call_table(%r1), %r19
#endif
- comiclr,>>= __NR_Linux_syscalls, %r20, %r0
+ comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
@@ -304,26 +312,34 @@ tracesys:
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
- ldil L%syscall_trace,%r1
+ copy %r2,%r26
+ ldil L%do_syscall_trace_enter,%r1
ldil L%tracesys_next,%r2
- be R%syscall_trace(%sr7,%r1)
+ be R%do_syscall_trace_enter(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
-tracesys_next:
+tracesys_next:
+ /* do_syscall_trace_enter either returned the syscallno, or -1L,
+ * so we skip restoring the PT_GR20 below, since we pulled it from
+ * task->thread.regs.gr[20] above.
+ */
+ copy %ret0,%r20
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
- LDREG TASK_PT_GR20(%r1), %r20
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
-#ifdef CONFIG_64BIT
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ stw %r22, -52(%r30) /* 5th argument */
+ stw %r21, -56(%r30) /* 6th argument */
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
@@ -352,7 +368,8 @@ tracesys_exit:
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
+ ldo TASK_REGS(%r1),%r26
+ bl do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
@@ -369,29 +386,63 @@ tracesys_exit:
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
- LDREG 0(%r1), %r1
+ LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
- nop
+ bl do_syscall_trace_exit,%r2
+ ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
/*********************************************************
- Light-weight-syscall code
+ 32/64-bit Light-Weight-Syscall ABI
+
+ * - Indicates a hint for userspace inline asm
+ implementations.
+
+ Syscall number (caller-saves)
+ - %r20
+ * In asm clobber.
+
+ Argument registers (caller-saves)
+ - %r26, %r25, %r24, %r23, %r22
+ * In asm input.
+
+ Return registers (caller-saves)
+ - %r28 (return), %r21 (errno)
+ * In asm output.
- r20 - lws number
- r26,r25,r24,r23,r22 - Input registers
- r28 - Function return register
- r21 - Error code.
+ Caller-saves registers
+ - %r1, %r27, %r29
+ - %r2 (return pointer)
+ - %r31 (ble link register)
+ * In asm clobber.
- Scracth: Any of the above that aren't being
- currently used, including r1.
+ Callee-saves registers
+ - %r3-%r18
+ - %r30 (stack pointer)
+ * Not in asm clobber.
- Return pointer: r31 (Not usable)
+ If userspace is 32-bit:
+ Callee-saves registers
+ - %r19 (32-bit PIC register)
+
+ Differences from 32-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r19.
+
+ If userspace is 64-bit:
+ Callee-saves registers
+ - %r27 (64-bit PIC register)
+
+ Differences from 64-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r27.
Error codes returned by entry path:
@@ -399,9 +450,6 @@ tracesys_sigexit:
*********************************************************/
lws_start:
- /* Gate and ensure we return to userspace */
- gate .+8, %r0
- depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
@@ -418,7 +466,7 @@ lws_start:
#endif
/* Is the lws entry number valid? */
- comiclr,>>= __NR_lws_entries, %r20, %r0
+ comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
@@ -449,7 +497,7 @@ lws_exit:
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
- be,n 0(%sr3, %r31)
+ be,n 0(%sr7, %r31)
@@ -489,7 +537,8 @@ lws_compare_and_swap64:
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
- * implement having 64-bit input registers
+ * have 64-bit input registers, and calling
+ * the 64-bit LWS CAS returns ENOSYS.
*/
b,n lws_exit_nosys
#endif
@@ -504,7 +553,6 @@ lws_compare_and_swap32:
#endif
lws_compare_and_swap:
-#ifdef CONFIG_SMP
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
@@ -518,7 +566,7 @@ lws_compare_and_swap:
shlw %r20, 4, %r20
add %r20, %r28, %r20
-# ifdef ENABLE_LWS_DEBUG
+# if ENABLE_LWS_DEBUG
/*
DEBUG, check for deadlock!
If the thread register values are the same
@@ -527,6 +575,7 @@ lws_compare_and_swap:
We *must* giveup this call and fail.
*/
ldw 4(%sr2,%r20), %r28 /* Load thread register */
+ /* WARNING: If cr27 cycles to the same value we have problems */
mfctl %cr27, %r21 /* Get current thread register */
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
b lws_exit /* Return error! */
@@ -540,14 +589,15 @@ cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
- ldcw 0(%sr2,%r20), %r28 /* Try to acquire the lock */
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+ /* COW breaks can cause contention on UP systems */
+ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
+ ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
-#endif
-/* CONFIG_SMP */
/*
prev = *addr;
@@ -566,36 +616,35 @@ cas_wouldblock:
perspective
*/
cas_action:
-#if defined CONFIG_SMP && defined ENABLE_LWS_DEBUG
+#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
/* DEBUG */
mfctl %cr27, %r1
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw 0(%sr3,%r26), %r28
+1: ldw,ma 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw %r24, 0(%sr3,%r26)
-#ifdef CONFIG_SMP
+2: stw,ma %r24, 0(%sr3,%r26)
/* Free lock */
- stw %r20, 0(%sr2,%r20)
-# ifdef ENABLE_LWS_DEBUG
+ stw,ma %r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
-# endif
#endif
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
3:
- /* Error occured on load or store */
-#ifdef CONFIG_SMP
+ /* Error occurred on load or store */
/* Free lock */
stw %r20, 0(%sr2,%r20)
-# ifdef ENABLE_LWS_DEBUG
+#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
-# endif
#endif
+ ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
@@ -606,86 +655,56 @@ cas_action:
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
- .section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(2b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (2b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
- .previous
-
- .section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(1b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (1b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
-end_compare_and_swap:
/* Make sure nothing else is placed on this page */
- .align 4096
- .export end_linux_gateway_page
-end_linux_gateway_page:
+ .align PAGE_SIZE
+END(linux_gateway_page)
+ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
-#ifdef CONFIG_64BIT
- /* FIXME: The code will always be on the gateay page
- and thus it will be on the first 4k, the
- assembler seems to think that the final
- subtraction result is only a word in
- length, so we pad the value.
- */
-#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
-#else
-#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
-#endif
- .align 4096
+#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
+
+ .section .rodata,"a"
+
+ .align 8
/* Light-weight-syscall table */
/* Start of lws table. */
- .export lws_table
-.Llws_table:
-lws_table:
+ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+END(lws_table)
/* End of lws table */
- .align 4096
- .export sys_call_table
-.Lsys_call_table:
-sys_call_table:
+ .align 8
+ENTRY(sys_call_table)
#include "syscall_table.S"
+END(sys_call_table)
#ifdef CONFIG_64BIT
- .align 4096
- .export sys_call_table64
-.Lsys_call_table64:
-sys_call_table64:
+ .align 8
+ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
+END(sys_call_table64)
#endif
-#ifdef CONFIG_SMP
/*
All light-weight-syscall atomic operations
will use this set of locks
+
+ NOTE: The lws_lock_start symbol must be
+ at least 16-byte aligned for safe use
+ with ldcw.
*/
.section .data
- .align 4096
- .export lws_lock_start
-.Llws_lock_start:
-lws_lock_start:
+ .align L1_CACHE_BYTES
+ENTRY(lws_lock_start)
/* lws locks */
- .align 16
.rept 16
/* Keep locks aligned at 16-bytes */
.word 1
@@ -693,9 +712,8 @@ lws_lock_start:
.word 0
.word 0
.endr
+END(lws_lock_start)
.previous
-#endif
-/* CONFIG_SMP for lws_lock_start */
.end