/* arch/arm26/kernel/entry.S
*
* Assembled from chunks of code in arch/arm
*
* Copyright (C) 2003 Ian Molton
* Based on the work of RMK.
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm_offsets.h>
#include <asm/errno.h>
#include <asm/hardware.h>
#include <asm/sysirq.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
.macro zero_fp
#ifndef CONFIG_NO_FRAME_POINTER
mov fp, #0
#endif
.endm
.text
@ Bad Abort numbers
@ -----------------
@
#define BAD_PREFETCH 0
#define BAD_DATA 1
#define BAD_ADDREXCPTN 2
#define BAD_IRQ 3
#define BAD_UNDEFINSTR 4
@ OS version number used in SWIs
@ RISC OS is 0
@ RISC iX is 8
@
#define OS_NUMBER 9
#define ARMSWI_OFFSET 0x000f0000
@
@ Stack format (ensured by USER_* and SVC_*)
@ PSR and PC are comined on arm26
@
#define S_OFF 8
#define S_OLD_R0 64
#define S_PC 60
#define S_LR 56
#define S_SP 52
#define S_IP 48
#define S_FP 44
#define S_R10 40
#define S_R9 36
#define S_R8 32
#define S_R7 28
#define S_R6 24
#define S_R5 20
#define S_R4 16
#define S_R3 12
#define S_R2 8
#define S_R1 4
#define S_R0 0
.macro save_user_regs
str r0, [sp, #-4]! @ Store SVC r0
str lr, [sp, #-4]! @ Store user mode PC
sub sp, sp, #15*4
stmia sp, {r0 - lr}^ @ Store the other user-mode regs
mov r0, r0
.endm
.macro slow_restore_user_regs
ldmia sp, {r0 - lr}^ @ restore the user regs not including PC
mov r0, r0
ldr lr, [sp, #15*4] @ get user PC
add sp, sp, #15*4+8 @ free stack
movs pc, lr @ return
.endm
.macro fast_restore_user_regs
add sp, sp, #S_OFF
ldmib sp, {r1 - lr}^
mov r0, r0
ldr lr, [sp, #15*4]
add sp, sp, #15*4+8
movs pc, lr
.endm
.macro save_svc_regs
str sp, [sp, #-16]!
str lr, [sp, #8]
str lr, [sp, #4]
stmfd sp!, {r0 - r12}
mov r0, #-1
str r0, [sp, #S_OLD_R0]
zero_fp
.endm
.macro save_svc_regs_irq
str sp, [sp, #-16]!
str lr, [sp, #4]
ldr lr, .LCirq
ldr lr, [lr]
str lr, [sp, #8]
stmfd sp!, {r0 - r12}
mov r0, #-1
str r0, [sp, #S_OLD_R0]
zero_fp
.endm
.macro restore_svc_regs
ldmfd sp, {r0 - pc}^
.endm
.macro mask_pc, rd, rm
bic \rd, \rm, #PCMASK
.endm
.macro disable_irqs, temp
mov \temp, pc
orr \temp, \temp, #PSR_I_BIT
teqp \temp, #0
.endm
.macro enable_irqs, temp
mov \temp, pc
and \temp, \temp, #~PSR_I_BIT
teqp \temp, #0
.endm
.macro initialise_traps_extra
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
*
* Note that tbl == why is intentional.
*
* We must set at least "tsk" and "why" when calling ret_with_reschedule.
*/
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
/*
* Get the system call number.
*/
.macro get_scno
mask_pc lr, lr
ldr scno, [lr, #-4] @ get SWI instruction
.endm
/*
* -----------------------------------------------------------------------
*/
/*
* We rely on the fact that R0 is at the bottom of the stack (due to
* slow/fast restore user regs).
*/
#if S_R0 != 0
#error "Please fix"
#endif
/*
* This is the fast syscall return path. We do as little as
* possible here, and this includes saving r0 back into the SVC
* stack.
*/
ret_fast_syscall:
disable_irqs r1 @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
fast_restore_user_regs
/*
* Ok, we need to do extra processing, enter the slow path.
*/
fast_work_pending:
str