diff options
Diffstat (limited to 'arch/sh/kernel/cpu/sh5/entry.S')
| -rw-r--r-- | arch/sh/kernel/cpu/sh5/entry.S | 254 |
1 files changed, 78 insertions, 176 deletions
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ba8750176d9..0c8d0377d40 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S @@ -2,7 +2,7 @@ * arch/sh/kernel/cpu/sh5/entry.S * * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2004 - 2007 Paul Mundt + * Copyright (C) 2004 - 2008 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -10,8 +10,9 @@ * for more details. */ #include <linux/errno.h> +#include <linux/init.h> #include <linux/sys.h> -#include <asm/cpu/registers.h> +#include <cpu/registers.h> #include <asm/processor.h> #include <asm/unistd.h> #include <asm/thread_info.h> @@ -143,12 +144,22 @@ resvec_save_area: trap_jtable: .long do_exception_error /* 0x000 */ .long do_exception_error /* 0x020 */ +#ifdef CONFIG_MMU .long tlb_miss_load /* 0x040 */ .long tlb_miss_store /* 0x060 */ +#else + .long do_exception_error + .long do_exception_error +#endif ! ARTIFICIAL pseudo-EXPEVT setting .long do_debug_interrupt /* 0x080 */ +#ifdef CONFIG_MMU .long tlb_miss_load /* 0x0A0 */ .long tlb_miss_store /* 0x0C0 */ +#else + .long do_exception_error + .long do_exception_error +#endif .long do_address_error_load /* 0x0E0 */ .long do_address_error_store /* 0x100 */ #ifdef CONFIG_SH_FPU @@ -176,7 +187,7 @@ trap_jtable: .rept 6 .long do_exception_error /* 0x880 - 0x920 */ .endr - .long do_software_break_point /* 0x940 */ + .long breakpoint_trap_handler /* 0x940 */ .long do_exception_error /* 0x960 */ .long do_single_step /* 0x980 */ @@ -185,10 +196,18 @@ trap_jtable: .endr .long do_IRQ /* 0xA00 */ .long do_IRQ /* 0xA20 */ +#ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xA40 */ +#else + .long do_IRQ +#endif .long do_IRQ /* 0xA60 */ .long do_IRQ /* 0xA80 */ +#ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xAA0 */ +#else + .long do_IRQ +#endif .long do_exception_error /* 0xAC0 */ .long do_address_error_exec /* 0xAE0 */ .rept 8 @@ -274,6 +293,7 @@ not_a_tlb_miss: * Instead of '.space 1024-TEXT_SIZE' place the RESVEC * block making sure the final alignment is correct. */ +#ifdef CONFIG_MMU tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, KCR1 @@ -315,7 +335,7 @@ tlb_miss: /* If the fast path handler fixed the fault, just drop through quickly to the restore code right away to return to the excepting context. */ - beqi/u r2, 0, tr1 + bnei/u r2, 0, tr1 fast_tlb_miss_restore: ld.q SP, SAVED_TR0, r2 @@ -377,6 +397,9 @@ fixup_to_invoke_general_handler: getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO +#else /* CONFIG_MMU */ + .balign 256 +#endif /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE DOES END UP AT VBR+0x600 */ @@ -789,27 +812,6 @@ no_underflow: ! exceptions add SP, ZERO, r14 -#ifdef CONFIG_POOR_MANS_STRACE - /* We've pushed all the registers now, so only r2-r4 hold anything - * useful. Move them into callee save registers */ - or r2, ZERO, r28 - or r3, ZERO, r29 - or r4, ZERO, r30 - - /* Preserve r2 as the event code */ - movi evt_debug, r3 - ori r3, 1, r3 - ptabs r3, tr0 - - or SP, ZERO, r6 - getcon TRA, r5 - blink tr0, LINK - - or r28, ZERO, r2 - or r29, ZERO, r3 - or r30, ZERO, r4 -#endif - /* For syscall and debug race condition, get TRA now */ getcon TRA, r5 @@ -864,11 +866,6 @@ no_underflow: */ .global ret_from_irq ret_from_irq: -#ifdef CONFIG_POOR_MANS_STRACE - pta evt_debug_ret_from_irq, tr0 - ori SP, 0, r2 - blink tr0, LINK -#endif ld.q SP, FRAME_S(FSSR), r6 shlri r6, 30, r6 andi r6, 1, r6 @@ -882,12 +879,6 @@ ret_from_irq: ret_from_exception: preempt_stop() -#ifdef CONFIG_POOR_MANS_STRACE - pta evt_debug_ret_from_exc, tr0 - ori SP, 0, r2 - blink tr0, LINK -#endif - ld.q SP, FRAME_S(FSSR), r6 shlri r6, 30, r6 andi r6, 1, r6 @@ -901,6 +892,8 @@ ret_from_exception: blink tr0, ZERO resume_kernel: + CLI() + pta restore_all, tr0 getcon KCR0, r6 @@ -917,19 +910,11 @@ need_resched: andi r7, 0xf0, r7 bne r7, ZERO, tr0 - movi ((PREEMPT_ACTIVE >> 16) & 65535), r8 - shori (PREEMPT_ACTIVE & 65535), r8 - st.l r6, TI_PRE_COUNT, r8 - - STI() - movi schedule, r7 + movi preempt_schedule_irq, r7 ori r7, 1, r7 ptabs r7, tr1 blink tr1, LINK - st.l r6, TI_PRE_COUNT, ZERO - CLI() - pta need_resched, tr1 blink tr1, ZERO #endif @@ -948,7 +933,7 @@ ret_with_reschedule: pta restore_all, tr1 - movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 + movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8 and r8, r7, r8 pta work_notifysig, tr0 bne r8, ZERO, tr0 @@ -965,11 +950,11 @@ work_resched: work_notifysig: gettr tr1, LINK - movi do_signal, r6 + movi do_notify_resume, r6 ptabs r6, tr0 or SP, ZERO, r2 - or ZERO, ZERO, r3 - blink tr0, LINK /* Call do_signal(regs, 0), return here */ + or r7, ZERO, r3 + blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */ restore_all: /* Do prefetches */ @@ -1094,50 +1079,50 @@ restore_all: * * Kernel TLB fault handlers will get a slightly different interface. * (r2) struct pt_regs *, original register's frame pointer - * (r3) writeaccess, whether it's a store fault as opposed to load fault - * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault - * (r5) Effective Address of fault + * (r3) page fault error code (see asm/thread_info.h) + * (r4) Effective Address of fault * (LINK) return address * (SP) = r2 * * fpu_error_or_IRQ? is a helper to deflect to the right cause. * */ +#ifdef CONFIG_MMU tlb_miss_load: or SP, ZERO, r2 or ZERO, ZERO, r3 /* Read */ - or ZERO, ZERO, r4 /* Data */ - getcon TEA, r5 + getcon TEA, r4 pta call_do_page_fault, tr0 beq ZERO, ZERO, tr0 tlb_miss_store: or SP, ZERO, r2 - movi 1, r3 /* Write */ - or ZERO, ZERO, r4 /* Data */ - getcon TEA, r5 + movi FAULT_CODE_WRITE, r3 /* Write */ + getcon TEA, r4 pta call_do_page_fault, tr0 beq ZERO, ZERO, tr0 itlb_miss_or_IRQ: pta its_IRQ, tr0 beqi/u r4, EVENT_INTERRUPT, tr0 + + /* ITLB miss */ or SP, ZERO, r2 - or ZERO, ZERO, r3 /* Read */ - movi 1, r4 /* Text */ - getcon TEA, r5 + movi FAULT_CODE_ITLB, r3 + getcon TEA, r4 /* Fall through */ call_do_page_fault: movi do_page_fault, r6 ptabs r6, tr0 blink tr0, ZERO +#endif /* CONFIG_MMU */ fpu_error_or_IRQA: pta its_IRQ, tr0 beqi/l r4, EVENT_INTERRUPT, tr0 #ifdef CONFIG_SH_FPU - movi do_fpu_state_restore, r6 + movi fpu_state_restore_trap_handler, r6 #else movi do_exception_error, r6 #endif @@ -1148,7 +1133,7 @@ fpu_error_or_IRQB: pta its_IRQ, tr0 beqi/l r4, EVENT_INTERRUPT, tr0 #ifdef CONFIG_SH_FPU - movi do_fpu_state_restore, r6 + movi fpu_state_restore_trap_handler, r6 #else movi do_exception_error, r6 #endif @@ -1217,18 +1202,6 @@ syscall_bad: .global syscall_ret syscall_ret: st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */ - -#ifdef CONFIG_POOR_MANS_STRACE - /* nothing useful in registers at this point */ - - movi evt_debug2, r5 - ori r5, 1, r5 - ptabs r5, tr0 - ld.q SP, FRAME_R(9), r2 - or SP, ZERO, r3 - blink tr0, LINK -#endif - ld.q SP, FRAME_S(FSPC), r2 addi r2, 4, r2 /* Move PC, being pre-execution event */ st.q SP, FRAME_S(FSPC), r2 @@ -1249,16 +1222,24 @@ ret_from_fork: ptabs r5, tr0 blink tr0, LINK -#ifdef CONFIG_POOR_MANS_STRACE - /* nothing useful in registers at this point */ + ld.q SP, FRAME_S(FSPC), r2 + addi r2, 4, r2 /* Move PC, being pre-execution event */ + st.q SP, FRAME_S(FSPC), r2 + pta ret_from_syscall, tr0 + blink tr0, ZERO + +.global ret_from_kernel_thread +ret_from_kernel_thread: - movi evt_debug2, r5 + movi schedule_tail,r5 ori r5, 1, r5 ptabs r5, tr0 - ld.q SP, FRAME_R(9), r2 - or SP, ZERO, r3 blink tr0, LINK -#endif + + ld.q SP, FRAME_R(2), r2 + ld.q SP, FRAME_R(3), r3 + ptabs r3, tr0 + blink tr0, LINK ld.q SP, FRAME_S(FSPC), r2 addi r2, 4, r2 /* Move PC, being pre-execution event */ @@ -1266,8 +1247,6 @@ ret_from_fork: pta ret_from_syscall, tr0 blink tr0, ZERO - - syscall_allowed: /* Use LINK to deflect the exit point, default is syscall_ret */ pta syscall_ret, tr0 @@ -1276,18 +1255,20 @@ syscall_allowed: getcon KCR0, r2 ld.l r2, TI_FLAGS, r4 - movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6 + movi _TIF_WORK_SYSCALL_MASK, r6 and r6, r4, r6 beq/l r6, ZERO, tr0 /* Trace it by calling syscall_trace before and after */ - movi syscall_trace, r4 + movi do_syscall_trace_enter, r4 or SP, ZERO, r2 - or ZERO, ZERO, r3 ptabs r4, tr0 blink tr0, LINK - /* Reload syscall number as r5 is trashed by syscall_trace */ + /* Save the retval */ + st.q SP, FRAME_R(2), r2 + + /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */ ld.q SP, FRAME_S(FSYSCALL_ID), r5 andi r5, 0x1ff, r5 @@ -1319,9 +1300,8 @@ syscall_ret_trace: /* We get back here only if under trace */ st.q SP, FRAME_R(9), r2 /* Save return value */ - movi syscall_trace, LINK + movi do_syscall_trace_leave, LINK or SP, ZERO, r2 - movi 1, r3 ptabs LINK, tr0 blink tr0, LINK @@ -1390,8 +1370,8 @@ peek_real_address_q: r2(out) : result quadword This is provided as a cheapskate way of manipulating device - registers for debugging (to avoid the need to onchip_remap the debug - module, and to avoid the need to onchip_remap the watchpoint + registers for debugging (to avoid the need to ioremap the debug + module, and to avoid the need to ioremap the watchpoint controller in a way that identity maps sufficient bits to avoid the SH5-101 cut2 silicon defect). @@ -1439,8 +1419,8 @@ poke_real_address_q: r3 : quadword value to write. This is provided as a cheapskate way of manipulating device - registers for debugging (to avoid the need to onchip_remap the debug - module, and to avoid the need to onchip_remap the watchpoint + registers for debugging (to avoid the need to ioremap the debug + module, and to avoid the need to ioremap the watchpoint controller in a way that identity maps sufficient bits to avoid the SH5-101 cut2 silicon defect). @@ -1481,6 +1461,7 @@ poke_real_address_q: ptabs LINK, tr0 blink tr0, r63 +#ifdef CONFIG_MMU /* * --- User Access Handling Section */ @@ -1604,86 +1585,7 @@ ___clear_user_exit: ptabs LINK, tr0 blink tr0, ZERO - -/* - * int __strncpy_from_user(unsigned long __dest, unsigned long __src, - * int __count) - * - * Inputs: - * (r2) target address - * (r3) source address - * (r4) maximum size in bytes - * - * Ouputs: - * (*r2) copied data - * (r2) -EFAULT (in case of faulting) - * copied data (otherwise) - */ - .global __strncpy_from_user -__strncpy_from_user: - pta ___strncpy_from_user1, tr0 - pta ___strncpy_from_user_done, tr1 - or r4, ZERO, r5 /* r5 = original count */ - beq/u r4, r63, tr1 /* early exit if r4==0 */ - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ - -___strncpy_from_user1: - ld.b r3, 0, r7 /* Fault address: only in reading */ - st.b r2, 0, r7 - addi r2, 1, r2 - addi r3, 1, r3 - beq/u ZERO, r7, tr1 - addi r4, -1, r4 /* return real number of copied bytes */ - bne/l ZERO, r4, tr0 - -___strncpy_from_user_done: - sub r5, r4, r6 /* If done, return copied */ - -___strncpy_from_user_exit: - or r6, ZERO, r2 - ptabs LINK, tr0 - blink tr0, ZERO - -/* - * extern long __strnlen_user(const char *__s, long __n) - * - * Inputs: - * (r2) source address - * (r3) source size in bytes - * - * Ouputs: - * (r2) -EFAULT (in case of faulting) - * string length (otherwise) - */ - .global __strnlen_user -__strnlen_user: - pta ___strnlen_user_set_reply, tr0 - pta ___strnlen_user1, tr1 - or ZERO, ZERO, r5 /* r5 = counter */ - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ - beq r3, ZERO, tr0 - -___strnlen_user1: - ldx.b r2, r5, r7 /* Fault address: only in reading */ - addi r3, -1, r3 /* No real fixup */ - addi r5, 1, r5 - beq r3, ZERO, tr0 - bne r7, ZERO, tr1 -! The line below used to be active. This meant led to a junk byte lying between each pair -! of entries in the argv & envp structures in memory. Whilst the program saw the right data -! via the argv and envp arguments to main, it meant the 'flat' representation visible through -! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. -! addi r5, 1, r5 /* Include '\0' */ - -___strnlen_user_set_reply: - or r5, ZERO, r6 /* If done, return counter */ - -___strnlen_user_exit: - or r6, ZERO, r2 - ptabs LINK, tr0 - blink tr0, ZERO +#endif /* CONFIG_MMU */ /* * extern long __get_user_asm_?(void *val, long addr) @@ -2014,11 +1916,11 @@ sa_default_restorer: .global asm_uaccess_start /* Just a marker */ asm_uaccess_start: +#ifdef CONFIG_MMU .long ___copy_user1, ___copy_user_exit .long ___copy_user2, ___copy_user_exit .long ___clear_user1, ___clear_user_exit - .long ___strncpy_from_user1, ___strncpy_from_user_exit - .long ___strnlen_user1, ___strnlen_user_exit +#endif .long ___get_user_asm_b1, ___get_user_asm_b_exit .long ___get_user_asm_w1, ___get_user_asm_w_exit .long ___get_user_asm_l1, ___get_user_asm_l_exit @@ -2035,10 +1937,10 @@ asm_uaccess_end: /* - * --- .text.init Section + * --- .init.text Section */ - .section .text.init, "ax" + __INIT /* * void trap_init (void) |
