aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel/cpu/sh3/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu/sh3/entry.S')
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S674
1 files changed, 247 insertions, 427 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 8c0dc2700c6..262db6ec067 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -1,8 +1,8 @@
/*
- * arch/sh/kernel/entry.S
+ * arch/sh/kernel/cpu/sh3/entry.S
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2003 - 2006 Paul Mundt
+ * Copyright (C) 2003 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -14,9 +14,10 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
-#include <asm/cpu/mmu_context.h>
-#include <asm/pgtable.h>
+#include <cpu/mmu_context.h>
#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -53,10 +54,6 @@
* syscall #
*
*/
-#if defined(CONFIG_KGDB_NMI)
-NMI_VEC = 0x1c0 ! Must catch early for debounce
-#endif
-
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
@@ -71,7 +68,6 @@ OFF_PC = (16*4)
OFF_SR = (16*4+8)
OFF_TRA = (16*4+6*4)
-
#define k0 r0
#define k1 r1
#define k2 r2
@@ -113,40 +109,60 @@ OFF_TRA = (16*4+6*4)
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
- bra call_dpf
+ bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
- bra call_dpf
- mov #1, r5
+ bra call_handle_tlbmiss
+ mov #FAULT_CODE_WRITE, r5
.align 2
ENTRY(initial_page_write)
- bra call_dpf
- mov #1, r5
+ bra call_handle_tlbmiss
+ mov #FAULT_CODE_INITIAL, r5
.align 2
ENTRY(tlb_protection_violation_load)
- bra call_dpf
- mov #0, r5
+ bra call_do_page_fault
+ mov #FAULT_CODE_PROT, r5
.align 2
ENTRY(tlb_protection_violation_store)
- bra call_dpf
- mov #1, r5
+ bra call_do_page_fault
+ mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
-call_dpf:
+call_handle_tlbmiss:
mov.l 1f, r0
- mov.l @r0, r6 ! address
- mov.l 3f, r0
+ mov r5, r8
+ mov.l @r0, r6
+ mov.l 2f, r0
+ sts pr, r10
+ jsr @r0
+ mov r15, r4
+ !
+ tst r0, r0
+ bf/s 0f
+ lds r10, pr
+ rts
+ nop
+0:
+ mov r8, r5
+call_do_page_fault:
+ mov.l 1f, r0
+ mov.l @r0, r6
+ mov.l 3f, r0
+ mov.l 4f, r1
+ mov r15, r4
jmp @r0
- mov r15, r4 ! regs
+ lds r1, pr
.align 2
1: .long MMU_TEA
+2: .long handle_tlbmiss
3: .long do_page_fault
+4: .long ret_from_exception
.align 2
ENTRY(address_error_load)
@@ -173,45 +189,36 @@ call_dae:
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
-debug_kernel_fw:
- mov.l @r15+, r0
- mov.l @r15+, r1
- mov.l @r15+, r2
- mov.l @r15+, r3
- mov.l @r15+, r4
- mov.l @r15+, r5
- mov.l @r15+, r6
- mov.l @r15+, r7
- stc sr, r8
- mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
- or r9, r8
- ldc r8, sr ! here, change the register bank
- mov.l @r15+, r8
- mov.l @r15+, r9
- mov.l @r15+, r10
- mov.l @r15+, r11
- mov.l @r15+, r12
- mov.l @r15+, r13
- mov.l @r15+, r14
- mov.l @r15+, k0
- ldc.l @r15+, spc
- lds.l @r15+, pr
- mov.l @r15+, k1
- ldc.l @r15+, gbr
- lds.l @r15+, mach
- lds.l @r15+, macl
- mov k0, r15
+ENTRY(sh_bios_handler)
+ mov.l 1f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
+ mov k4, r15
!
mov.l 2f, k0
mov.l @k0, k0
jmp @k0
- ldc k1, ssr
+ ldc k3, ssr
.align 2
1: .long 0x300000f0
2: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
-restore_all:
+! restore_regs()
+! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
+! - switch bank
+! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
+! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
+! k2 returns original pr
+! k3 returns original sr
+! k4 returns original stack pointer
+! r8 passes SR bitmask, overwritten with restored data on return
+! r9 trashed
+! BL=0 on entry, on exit BL=1 (depending on r8).
+
+ENTRY(restore_regs)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
@@ -221,10 +228,9 @@ restore_all:
mov.l @r15+, r6
mov.l @r15+, r7
!
- stc sr, r8
- mov.l 7f, r9
- or r9, r8 ! BL =1, RB=1
- ldc r8, sr ! here, change the register bank
+ stc sr, r9
+ or r8, r9
+ ldc r9, sr
!
mov.l @r15+, r8
mov.l @r15+, r9
@@ -235,53 +241,27 @@ restore_all:
mov.l @r15+, r14
mov.l @r15+, k4 ! original stack pointer
ldc.l @r15+, spc
- lds.l @r15+, pr
+ mov.l @r15+, k2 ! original PR
mov.l @r15+, k3 ! original SR
ldc.l @r15+, gbr
lds.l @r15+, mach
lds.l @r15+, macl
- add #4, r15 ! Skip syscall number
- !
-#ifdef CONFIG_SH_DSP
- mov.l @r15+, k0 ! DSP mode marker
- mov.l 5f, k1
- cmp/eq k0, k1 ! Do we have a DSP stack frame?
- bf skip_restore
-
- stc sr, k0 ! Enable CPU DSP mode
- or k1, k0 ! (within kernel it may be disabled)
- ldc k0, sr
- mov r2, k0 ! Backup r2
-
- ! Restore DSP registers from stack
- mov r15, r2
- movs.l @r2+, a1
- movs.l @r2+, a0g
- movs.l @r2+, a1g
- movs.l @r2+, m0
- movs.l @r2+, m1
- mov r2, r15
-
- lds.l @r15+, a0
- lds.l @r15+, x0
- lds.l @r15+, x1
- lds.l @r15+, y0
- lds.l @r15+, y1
- lds.l @r15+, dsr
- ldc.l @r15+, rs
- ldc.l @r15+, re
- ldc.l @r15+, mod
-
- mov k0, r2 ! Restore r2
-skip_restore:
-#endif
+ rts
+ add #4, r15 ! Skip syscall number
+
+restore_all:
+ mov.l 7f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
!
! Calculate new SR value
mov k3, k2 ! original SR value
- mov #0xf0, k1
+ mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
- and k1, k2 ! Mask orignal SR value
+ and k1, k2 ! Mask original SR value
!
mov k3, k0 ! Calculate IMASK-bits
shlr2 k0
@@ -294,13 +274,6 @@ skip_restore:
6: or k0, k2 ! Set the IMASK-bits
ldc k2, ssr
!
-#if defined(CONFIG_KGDB_NMI)
- ! Clear in_nmi
- mov.l 6f, k0
- mov #0, k1
- mov.b k1, @k0
-#endif
- mov.l @r15+, k2 ! restore EXPEVT
mov k4, r15
rte
nop
@@ -320,227 +293,41 @@ skip_restore:
ENTRY(vbr_base)
.long 0
!
+! 0x100: General exception vector
+!
.balign 256,0,256
general_exception:
- mov.l 1f, k2
- mov.l 2f, k3
bra handle_exception
- mov.l @k2, k2
- .align 2
-1: .long EXPEVT
-2: .long ret_from_exception
-!
-!
-
-/* This code makes some assumptions to improve performance.
- * Make sure they are stil true. */
-#if PTRS_PER_PGD != PTRS_PER_PTE
-#error PGD and PTE sizes don't match
-#endif
-
-/* gas doesn't flag impossible values for mov #immediate as an error */
-#if (_PAGE_PRESENT >> 2) > 0x7f
-#error cannot load PAGE_PRESENT as an immediate
-#endif
-#if _PAGE_DIRTY > 0x7f
-#error cannot load PAGE_DIRTY as an immediate
+ sts pr, k3 ! save original pr value in k3
+
+! prepare_stack()
+! - roll back gRB
+! - switch to kernel stack
+! k0 returns original sp (after roll back)
+! k1 trashed
+! k2 trashed
+
+prepare_stack:
+#ifdef CONFIG_GUSA
+ ! Check for roll back gRB (User and Kernel)
+ mov r15, k0
+ shll k0
+ bf/s 1f
+ shll k0
+ bf/s 1f
+ stc spc, k1
+ stc r0_bank, k0
+ cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
+ bt/s 2f
+ stc r1_bank, k1
+
+ add #-2, k0
+ add r15, k0
+ ldc k0, spc ! PC = saved r0 + r15 - 2
+2: mov k1, r15 ! SP = r1
+1:
#endif
-#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
-#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
-#endif
-
-#if defined(CONFIG_CPU_SH4)
-#define ldmmupteh(r) mov.l 8f, r
-#else
-#define ldmmupteh(r) mov #MMU_PTEH, r
-#endif
-
- .balign 1024,0,1024
-tlb_miss:
-#ifdef COUNT_EXCEPTIONS
- ! Increment the counts
- mov.l 9f, k1
- mov.l @k1, k2
- add #1, k2
- mov.l k2, @k1
-#endif
-
- ! k0 scratch
- ! k1 pgd and pte pointers
- ! k2 faulting address
- ! k3 pgd and pte index masks
- ! k4 shift
-
- ! Load up the pgd entry (k1)
-
- ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
-
- mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
- mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
-
- mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
-
- mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
-
- mov k2, k0 ! 5 MT (latency=0)
- shld k4, k0 ! 99 EX
-
- and k3, k0 ! 78 EX
-
- mov.l @(k0, k1), k1 ! 21 LS (latency=2)
- mov #-(PAGE_SHIFT-2), k4 ! 6 EX
-
- ! Load up the pte entry (k2)
-
- mov k2, k0 ! 5 MT (latency=0)
- shld k4, k0 ! 99 EX
-
- tst k1, k1 ! 86 MT
-
- bt 20f ! 110 BR
-
- and k3, k0 ! 78 EX
- mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
-
- mov.l @(k0, k1), k2 ! 21 LS (latency=2)
- add k0, k1 ! 49 EX
-
-#ifdef CONFIG_CPU_HAS_PTEA
- ! Test the entry for present and _PAGE_ACCESSED
-
- mov #-28, k3 ! 6 EX
- mov k2, k0 ! 5 MT (latency=0)
-
- tst k4, k2 ! 68 MT
- shld k3, k0 ! 99 EX
-
- bt 20f ! 110 BR
-
- ! Set PTEA register
- ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
- !
- ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
-
- and #0xe, k0 ! 79 EX
-
- mov k0, k3 ! 5 MT (latency=0)
- mov k2, k0 ! 5 MT (latency=0)
-
- and #1, k0 ! 79 EX
-
- or k0, k3 ! 82 EX
-
- ldmmupteh(k0) ! 9 LS (latency=2)
- shll2 k4 ! 101 EX _PAGE_ACCESSED
-
- tst k4, k2 ! 68 MT
-
- mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
-
- mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
-
- ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
-#else
-
- ! Test the entry for present and _PAGE_ACCESSED
-
- mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
- tst k4, k2 ! 68 MT
-
- shll2 k4 ! 101 EX _PAGE_ACCESSED
- ldmmupteh(k0) ! 9 LS (latency=2)
-
- bt 20f ! 110 BR
- tst k4, k2 ! 68 MT
-
- ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
-
-#endif
-
- ! Set up the entry
-
- and k2, k3 ! 78 EX
- bt/s 10f ! 108 BR
-
- mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
-
- ldtlb ! 128 CO
-
- ! At least one instruction between ldtlb and rte
- nop ! 119 NOP
-
- rte ! 126 CO
-
- nop ! 119 NOP
-
-
-10: or k4, k2 ! 82 EX
-
- ldtlb ! 128 CO
-
- ! At least one instruction between ldtlb and rte
- mov.l k2, @k1 ! 27 LS
-
- rte ! 126 CO
-
- ! Note we cannot execute mov here, because it is executed after
- ! restoring SSR, so would be executed in user space.
- nop ! 119 NOP
-
-
- .align 5
- ! Once cache line if possible...
-1: .long swapper_pg_dir
-4: .short (PTRS_PER_PGD-1) << 2
-5: .short _PAGE_PRESENT
-7: .long _PAGE_FLAGS_HARDWARE_MASK
-8: .long MMU_PTEH
-#ifdef COUNT_EXCEPTIONS
-9: .long exception_count_miss
-#endif
-
- ! Either pgd or pte not present
-20: mov.l 1f, k2
- mov.l 4f, k3
- bra handle_exception
- mov.l @k2, k2
-!
- .balign 512,0,512
-interrupt:
- mov.l 2f, k2
- mov.l 3f, k3
-#if defined(CONFIG_KGDB_NMI)
- ! Debounce (filter nested NMI)
- mov.l @k2, k0
- mov.l 5f, k1
- cmp/eq k1, k0
- bf 0f
- mov.l 6f, k1
- tas.b @k1
- bt 0f
- rte
- nop
- .align 2
-5: .long NMI_VEC
-6: .long in_nmi
-0:
-#endif /* defined(CONFIG_KGDB_NMI) */
- bra handle_exception
- mov #-1, k2 ! interrupt exception marker
-
- .align 2
-1: .long EXPEVT
-2: .long INTEVT
-3: .long ret_from_irq
-4: .long ret_from_exception
-
-!
-!
- .align 2
-ENTRY(handle_exception)
- ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
- ! save all registers onto stack.
- !
+ ! Switch to kernel stack if needed
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
@@ -553,65 +340,67 @@ ENTRY(handle_exception)
add current, k1
mov k1, r15 ! change to kernel stack
!
-1: mov.l 2f, k1
- !
-#ifdef CONFIG_SH_DSP
- mov.l r2, @-r15 ! Save r2, we need another reg
- stc sr, k4
- mov.l 1f, r2
- tst r2, k4 ! Check if in DSP mode
- mov.l @r15+, r2 ! Restore r2 now
- bt/s skip_save
- mov #0, k4 ! Set marker for no stack frame
-
- mov r2, k4 ! Backup r2 (in k4) for later
-
- ! Save DSP registers on stack
- stc.l mod, @-r15
- stc.l re, @-r15
- stc.l rs, @-r15
- sts.l dsr, @-r15
- sts.l y1, @-r15
- sts.l y0, @-r15
- sts.l x1, @-r15
- sts.l x0, @-r15
- sts.l a0, @-r15
-
- ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
-
- ! FIXME: Make sure that this is still the case with newer toolchains,
- ! as we're not at all interested in supporting ancient toolchains at
- ! this point. -- PFM.
-
- mov r15, r2
- .word 0xf653 ! movs.l a1, @-r2
- .word 0xf6f3 ! movs.l a0g, @-r2
- .word 0xf6d3 ! movs.l a1g, @-r2
- .word 0xf6c3 ! movs.l m0, @-r2
- .word 0xf6e3 ! movs.l m1, @-r2
- mov r2, r15
-
- mov k4, r2 ! Restore r2
- mov.l 1f, k4 ! Force DSP stack frame
-skip_save:
- mov.l k4, @-r15 ! Push DSP mode marker onto stack
-#endif
- ! Save the user registers on the stack.
- mov.l k2, @-r15 ! EXPEVT
+1:
+ rts
+ nop
- mov #-1, k4
- mov.l k4, @-r15 ! set TRA (default: -1)
- !
+!
+! 0x400: Instruction and Data TLB miss exception vector
+!
+ .balign 1024,0,1024
+tlb_miss:
+ sts pr, k3 ! save original pr value in k3
+
+handle_exception:
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 5f, k2 ! vector register address
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov.l @k2, k2 ! read out vector and keep in k2
+
+handle_exception_special:
+ setup_frame_reg
+
+ ! Setup return address and jump to exception handler
+ mov.l 7f, r9 ! fetch return address
+ stc r2_bank, r0 ! k2 (vector)
+ mov.l 6f, r10
+ shlr2 r0
+ shlr r0
+ mov.l @(r0, r10), r10
+ jmp @r10
+ lds r9, pr ! put return address in pr
+
+ .align L1_CACHE_SHIFT
+
+! save_regs()
+! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
+! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
+! - switch bank
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k0 contains original stack pointer*
+! k1 trashed
+! k3 passes original pr*
+! k4 passes SR bitmask
+! BL=1 on entry, on exit BL=0.
+
+ENTRY(save_regs)
+ mov #-1, r1
+ mov.l k1, @-r15 ! set TRA (default: -1)
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
stc.l ssr, @-r15
- sts.l pr, @-r15
+ mov.l k3, @-r15 ! original pr in k3
stc.l spc, @-r15
- !
- lds k3, pr ! Set the return address to pr
- !
- mov.l k0, @-r15 ! save orignal stack
+
+ mov.l k0, @-r15 ! original stack pointer in k0
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
@@ -619,13 +408,23 @@ skip_save:
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
- !
- stc sr, r8 ! Back to normal register bank, and
- or k1, r8 ! Block all interrupts
- mov.l 3f, k1
- and k1, r8 ! ...
- ldc r8, sr ! ...changed here.
- !
+
+ mov.l 0f, k3 ! SR bits to set in k3
+
+ ! fall-through
+
+! save_low_regs()
+! - modify SR for bank switch
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k3 passes bits to set in SR
+! k4 passes bits to clear in SR
+
+ENTRY(save_low_regs)
+ stc sr, r8
+ or k3, r8
+ and k4, r8
+ ldc r8, sr
+
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
@@ -633,61 +432,82 @@ skip_save:
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
- mov.l r0, @-r15
-
- /*
- * This gets a bit tricky.. in the INTEVT case we don't want to use
- * the VBR offset as a destination in the jump call table, since all
- * of the destinations are the same. In this case, (interrupt) sets
- * a marker in r2 (now r2_bank since SR.RB changed), which we check
- * to determine the exception type. For all other exceptions, we
- * forcibly read EXPEVT from memory and fix up the jump address, in
- * the interrupt exception case we jump to do_IRQ() and defer the
- * INTEVT read until there. As a bonus, we can also clean up the SR.RB
- * checks that do_IRQ() was doing..
- */
- stc r2_bank, r8
- cmp/pz r8
- bf interrupt_exception
- shlr2 r8
- shlr r8
-
-#ifdef COUNT_EXCEPTIONS
- mov.l 5f, r9
- add r8, r9
- mov.l @r9, r10
- add #1, r10
- mov.l r10, @r9
-#endif
-
- mov.l 4f, r9
- add r8, r9
- mov.l @r9, r9
- jmp @r9
- nop
rts
- nop
+ mov.l r0, @-r15
- .align 2
-1: .long 0x00001000 ! DSP=1
-2: .long 0x000080f0 ! FD=1, IMASK=15
-3: .long 0xcfffffff ! RB=0, BL=0
-4: .long exception_handling_table
-#ifdef COUNT_EXCEPTIONS
-5: .long exception_count_table
-#endif
+!
+! 0x600: Interrupt / NMI vector
+!
+ .balign 512,0,512
+ENTRY(handle_interrupt)
+ sts pr, k3 ! save original pr value in k3
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov #-1, k2 ! default vector kept in k2
+
+ setup_frame_reg
+
+ stc sr, r0 ! get status register
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bf 9f
+ TRACE_IRQS_OFF
+9:
+
+ ! Setup return address and jump to do_IRQ
+ mov.l 4f, r9 ! fetch return address
+ lds r9, pr ! put return address in pr
+ mov.l 2f, r4
+ mov.l 3f, r9
+ mov.l @r4, r4 ! pass INTEVT vector as arg0
+
+ shlr2 r4
+ shlr r4
+ mov r4, r0 ! save vector->jmp table offset for later
+
+ shlr2 r4 ! vector to IRQ# conversion
+ add #-0x10, r4
+
+ cmp/pz r4 ! is it a valid IRQ?
+ bt 10f
-interrupt_exception:
- mov.l 1f, r9
+ /*
+ * We got here as a result of taking the INTEVT path for something
+ * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
+ * path and special case the event dispatch instead. This is the
+ * expected path for the NMI (and any other brilliantly implemented
+ * exception), which effectively wants regular exception dispatch
+ * but is unfortunately reported through INTEVT rather than
+ * EXPEVT. Grr.
+ */
+ mov.l 6f, r9
+ mov.l @(r0, r9), r9
jmp @r9
- nop
- rts
- nop
+ mov r15, r8 ! trap handlers take saved regs in r8
- .align 2
-1: .long do_IRQ
+10:
+ jmp @r9 ! Off to do_IRQ() we go.
+ mov r15, r5 ! pass saved registers as arg1
- .align 2
ENTRY(exception_none)
rts
nop
+
+ .align L1_CACHE_SHIFT
+exception_data:
+0: .long 0x000080f0 ! FD=1, IMASK=15
+1: .long 0xcfffffff ! RB=0, BL=0
+2: .long INTEVT
+3: .long do_IRQ
+4: .long ret_from_irq
+5: .long EXPEVT
+6: .long exception_handling_table
+7: .long ret_from_exception