diff options
Diffstat (limited to 'arch/frv/kernel/entry.S')
| -rw-r--r-- | arch/frv/kernel/entry.S | 249 |
1 files changed, 170 insertions, 79 deletions
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 5f6548388b7..dfcd263c051 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -27,8 +27,6 @@ * */ -#include <linux/sys.h> -#include <linux/config.h> #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/setup.h> @@ -40,7 +38,7 @@ #define nr_syscalls ((syscall_table_size)/4) - .text + .section .text..entry .balign 4 .macro LEDS val @@ -141,7 +139,10 @@ __entry_uspace_external_interrupt_reentry: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # interrupts start off fully disabled in the interrupt handler + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ # set up kernel global registers sethi.p %hi(__kernel_current_task),gr5 @@ -193,9 +194,8 @@ __entry_uspace_external_interrupt_reentry: .type __entry_kernel_external_interrupt,@function __entry_kernel_external_interrupt: LEDS 0x6210 - - sub sp,gr15,gr31 - LEDS32 +// sub sp,gr15,gr31 +// LEDS32 # set up the stack pointer or.p sp,gr0,gr30 @@ -231,7 +231,10 @@ __entry_kernel_external_interrupt_reentry: stdi gr24,@(gr28,#REG_GR(24)) stdi gr26,@(gr28,#REG_GR(26)) sti gr29,@(gr28,#REG_GR(29)) - stdi gr30,@(gr28,#REG_GR(30)) + stdi.p gr30,@(gr28,#REG_GR(30)) + + # note virtual interrupts will be fully enabled upon return + subicc gr0,#1,gr0,icc2 /* clear Z, set C */ movsg tbr ,gr20 movsg psr ,gr22 @@ -250,7 +253,7 @@ __entry_kernel_external_interrupt_reentry: andi.p gr5,#~PSR_ET,gr5 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel - # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt + # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt andi gr25,#~0xc0,gr25 sti gr20,@(gr28,#REG_TBR) @@ -267,7 +270,10 @@ __entry_kernel_external_interrupt_reentry: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # interrupts start off fully disabled in the interrupt handler + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ # set the return address sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 @@ -291,6 +297,45 @@ __entry_kernel_external_interrupt_reentry: .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt +############################################################################### +# +# deal with interrupts that were actually virtually disabled +# - we need to really disable them, flag the fact and return immediately +# - if you change this, you must alter break.S also +# +############################################################################### + .balign L1_CACHE_BYTES + .globl __entry_kernel_external_interrupt_virtually_disabled + .type __entry_kernel_external_interrupt_virtually_disabled,@function +__entry_kernel_external_interrupt_virtually_disabled: + movsg psr,gr30 + andi gr30,#~PSR_PIL,gr30 + ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only + movgs gr30,psr + subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C + rett #0 + + .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled + +############################################################################### +# +# deal with re-enablement of interrupts that were pending when virtually re-enabled +# - set ICC2.C, re-enable the real interrupts and return +# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI] +# - if you change this, you must alter break.S also +# +############################################################################### + .balign L1_CACHE_BYTES + .globl __entry_kernel_external_interrupt_virtual_reenable + .type __entry_kernel_external_interrupt_virtual_reenable,@function +__entry_kernel_external_interrupt_virtual_reenable: + movsg psr,gr30 + andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts + movgs gr30,psr + subicc gr0,#1,gr0,icc2 ; clear Z, set C + rett #0 + + .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable ############################################################################### # @@ -335,6 +380,7 @@ __entry_uspace_softprog_interrupt_reentry: sethi.p %hi(__entry_return_from_user_exception),gr23 setlo %lo(__entry_return_from_user_exception),gr23 + bra __entry_common .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt @@ -399,7 +445,7 @@ __entry_kernel_softprog_interrupt_reentry: sti gr22,@(sp,#REG_SP) # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel - # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt + # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt movsg cccr,gr20 andi gr20,#~0xc0,gr20 movgs gr20,cccr @@ -495,7 +541,10 @@ __entry_common: movsg gner0,gr4 movsg gner1,gr5 - stdi gr4,@(gr28,#REG_GNER0) + stdi.p gr4,@(gr28,#REG_GNER0) + + # set up virtual interrupt disablement + subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */ # set up kernel global registers sethi.p %hi(__kernel_current_task),gr5 @@ -607,6 +656,26 @@ __entry_debug_exception: ############################################################################### # +# handle atomic operation emulation for userspace +# +############################################################################### + .globl __entry_atomic_op +__entry_atomic_op: + LEDS 0x6012 + sethi.p %hi(atomic_operation),gr5 + setlo %lo(atomic_operation),gr5 + movsg esfr1,gr8 + movsg epcr0,gr9 + movsg esr0,gr10 + + # now that we've accessed the exception regs, we can enable exceptions + movsg psr,gr4 + ori gr4,#PSR_ET,gr4 + movgs gr4,psr + jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0) + +############################################################################### +# # handle media exception # ############################################################################### @@ -794,6 +863,14 @@ ret_from_fork: setlos.p #0,gr8 bra __syscall_exit + .globl ret_from_kernel_thread +ret_from_kernel_thread: + lddi.p @(gr28,#REG_GR(8)),gr20 + call schedule_tail + calll.p @(gr21,gr0) + or gr20,gr20,gr8 + bra __syscall_exit + ################################################################################################### # # Return to user mode is not as complex as all this looks, @@ -817,7 +894,6 @@ system_call: bnc icc0,#0,__syscall_badsys ldi @(gr15,#TI_FLAGS),gr4 - ori gr4,#_TIF_SYSCALL_TRACE,gr4 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 bne icc0,#0,__syscall_trace_entry @@ -837,18 +913,19 @@ __syscall_call: __syscall_exit: LEDS 0x6300 - sti gr8,@(gr28,#REG_GR(8)) ; save return value + # keep current PSR in GR23 + movsg psr,gr23 - # rebuild saved psr - execve will change it for init/main.c ldi @(gr28,#REG_PSR),gr22 + + sti.p gr8,@(gr28,#REG_GR(8)) ; save return value + + # rebuild saved psr - execve will change it for init/main.c srli gr22,#1,gr5 andi.p gr22,#~PSR_PS,gr22 andi gr5,#PSR_PS,gr5 or gr5,gr22,gr22 - ori gr22,#PSR_S,gr22 - - # keep current PSR in GR23 - movsg psr,gr23 + ori.p gr22,#PSR_S,gr22 # make sure we don't miss an interrupt setting need_resched or sigpending between # sampling and the RETT @@ -856,9 +933,7 @@ __syscall_exit: movgs gr23,psr ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_ALLWORK_MASK),gr5 - setlo %lo(_TIF_ALLWORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_ALLWORK_MASK,gr0,icc0 bne icc0,#0,__syscall_exit_work # restore all registers and return @@ -1000,27 +1075,10 @@ __entry_return_from_kernel_interrupt: subicc gr5,#0,gr0,icc0 beq icc0,#0,__entry_return_direct -__entry_preempt_need_resched: - ldi @(gr15,#TI_FLAGS),gr4 - andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 - beq icc0,#1,__entry_return_direct - - setlos #PREEMPT_ACTIVE,gr5 - sti gr5,@(gr15,#TI_FLAGS) - - andi gr23,#~PSR_PIL,gr23 - movgs gr23,psr - - call schedule - sti gr0,@(gr15,#TI_PRE_COUNT) - - movsg psr,gr23 - ori gr23,#PSR_PIL_14,gr23 - movgs gr23,psr - bra __entry_preempt_need_resched -#else - bra __entry_return_direct + subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ + call preempt_schedule_irq #endif + bra __entry_return_direct ############################################################################### @@ -1043,9 +1101,7 @@ __entry_resume_userspace: __entry_return_from_user_interrupt: LEDS 0x6402 ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_WORK_MASK),gr5 - setlo %lo(_TIF_WORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_WORK_MASK,gr0,icc0 beq icc0,#1,__entry_return_direct __entry_work_pending: @@ -1065,9 +1121,7 @@ __entry_work_resched: LEDS 0x6401 ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_WORK_MASK),gr5 - setlo %lo(_TIF_WORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_WORK_MASK,gr0,icc0 beq icc0,#1,__entry_return_direct andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 bne icc0,#1,__entry_work_resched @@ -1081,11 +1135,10 @@ __entry_work_notifysig: # perform syscall entry tracing __syscall_trace_entry: LEDS 0x6320 - setlos.p #0,gr8 - call do_syscall_trace + call syscall_trace_entry - ldi @(gr28,#REG_SYSCALLNO),gr7 - lddi @(gr28,#REG_GR(8)) ,gr8 + lddi.p @(gr28,#REG_GR(8)) ,gr8 + ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno lddi @(gr28,#REG_GR(10)),gr10 lddi.p @(gr28,#REG_GR(12)),gr12 @@ -1096,15 +1149,16 @@ __syscall_trace_entry: # perform syscall exit tracing __syscall_exit_work: LEDS 0x6340 - andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 + andicc gr22,#PSR_PS,gr0,icc1 ; don't handle on return to kernel mode + andicc.p gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 + bne icc1,#0,__entry_return_direct beq icc0,#1,__entry_work_pending movsg psr,gr23 - andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule() + andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule() movgs gr23,psr - setlos.p #1,gr8 - call do_syscall_trace + call syscall_trace_exit bra __entry_resume_userspace __syscall_badsys: @@ -1119,12 +1173,6 @@ __syscall_badsys: # syscall vector table # ############################################################################### -#ifdef CONFIG_MMU -#define __MMU(X) X -#else -#define __MMU(X) sys_ni_syscall -#endif - .section .rodata ALIGN .globl sys_call_table @@ -1254,7 +1302,7 @@ sys_call_table: .long sys_newuname .long sys_ni_syscall /* old "cacheflush" */ .long sys_adjtimex - .long __MMU(sys_mprotect) /* 125 */ + .long sys_mprotect /* 125 */ .long sys_sigprocmask .long sys_ni_syscall /* old "create_module" */ .long sys_init_module @@ -1273,16 +1321,16 @@ sys_call_table: .long sys_getdents .long sys_select .long sys_flock - .long __MMU(sys_msync) + .long sys_msync .long sys_readv /* 145 */ .long sys_writev .long sys_getsid .long sys_fdatasync .long sys_sysctl - .long __MMU(sys_mlock) /* 150 */ - .long __MMU(sys_munlock) - .long __MMU(sys_mlockall) - .long __MMU(sys_munlockall) + .long sys_mlock /* 150 */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall .long sys_sched_setparam .long sys_sched_getparam /* 155 */ .long sys_sched_setscheduler @@ -1292,13 +1340,13 @@ sys_call_table: .long sys_sched_get_priority_min /* 160 */ .long sys_sched_rr_get_interval .long sys_nanosleep - .long __MMU(sys_mremap) + .long sys_mremap .long sys_setresuid16 .long sys_getresuid16 /* 165 */ .long sys_ni_syscall /* for vm86 */ .long sys_ni_syscall /* Old sys_query_module */ .long sys_poll - .long sys_nfsservctl + .long sys_ni_syscall /* Old nfsservctl */ .long sys_setresgid16 /* 170 */ .long sys_getresgid16 .long sys_prctl @@ -1347,8 +1395,8 @@ sys_call_table: .long sys_setfsuid /* 215 */ .long sys_setfsgid .long sys_pivot_root - .long __MMU(sys_mincore) - .long __MMU(sys_madvise) + .long sys_mincore + .long sys_madvise .long sys_getdents64 /* 220 */ .long sys_fcntl64 .long sys_ni_syscall /* reserved for TUX */ @@ -1386,7 +1434,7 @@ sys_call_table: .long sys_epoll_create .long sys_epoll_ctl /* 255 */ .long sys_epoll_wait - .long __MMU(sys_remap_file_pages) + .long sys_remap_file_pages .long sys_set_tid_address .long sys_timer_create .long sys_timer_settime /* 260 */ @@ -1418,11 +1466,54 @@ sys_call_table: .long sys_add_key .long sys_request_key .long sys_keyctl - .long sys_ni_syscall // sys_vperfctr_open - .long sys_ni_syscall // sys_vperfctr_control /* 290 */ - .long sys_ni_syscall // sys_vperfctr_unlink - .long sys_ni_syscall // sys_vperfctr_iresume - .long sys_ni_syscall // sys_vperfctr_read - + .long sys_ioprio_set + .long sys_ioprio_get /* 290 */ + .long sys_inotify_init + .long sys_inotify_add_watch + .long sys_inotify_rm_watch + .long sys_migrate_pages + .long sys_openat /* 295 */ + .long sys_mkdirat + .long sys_mknodat + .long sys_fchownat + .long sys_futimesat + .long sys_fstatat64 /* 300 */ + .long sys_unlinkat + .long sys_renameat + .long sys_linkat + .long sys_symlinkat + .long sys_readlinkat /* 305 */ + .long sys_fchmodat + .long sys_faccessat + .long sys_pselect6 + .long sys_ppoll + .long sys_unshare /* 310 */ + .long sys_set_robust_list + .long sys_get_robust_list + .long sys_splice + .long sys_sync_file_range + .long sys_tee /* 315 */ + .long sys_vmsplice + .long sys_move_pages + .long sys_getcpu + .long sys_epoll_pwait + .long sys_utimensat /* 320 */ + .long sys_signalfd + .long sys_timerfd_create + .long sys_eventfd + .long sys_fallocate + .long sys_timerfd_settime /* 325 */ + .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 + .long sys_dup3 /* 330 */ + .long sys_pipe2 + .long sys_inotify_init1 + .long sys_preadv + .long sys_pwritev + .long sys_rt_tgsigqueueinfo /* 335 */ + .long sys_perf_event_open + .long sys_setns syscall_table_size = (. - sys_call_table) |
