aboutsummaryrefslogtreecommitdiff
path: root/arch/microblaze/kernel/hw_exception_handler.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel/hw_exception_handler.S')
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S232
1 files changed, 148 insertions, 84 deletions
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 781195438ee..0b11a4469de 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -75,8 +75,11 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/signal.h>
+#include <asm/registers.h>
#include <asm/asm-offsets.h>
+#undef DEBUG
+
/* Helpful Macros */
#define NUM_TO_REG(num) r ## num
@@ -91,7 +94,7 @@
lwi r6, r1, PT_R6; \
lwi r11, r1, PT_R11; \
lwi r31, r1, PT_R31; \
- lwi r1, r0, TOPHYS(r0_ram + 0);
+ lwi r1, r1, PT_R1;
#endif /* CONFIG_MMU */
#define LWREG_NOP \
@@ -144,19 +147,14 @@
or r3, r0, NUM_TO_REG (regnum);
/* Shift right instruction depending on available configuration */
- #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
- #define BSRLI(rD, rA, imm) \
- bsrli rD, rA, imm
- #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
- #define BSRLI(rD, rA, imm) \
- ori rD, r0, (1 << imm); \
- idivu rD, rD, rA
- #else
- #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
+ #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0
/* Only the used shift constants defined here - add more if needed */
#define BSRLI2(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */
+ #define BSRLI4(rD, rA) \
+ BSRLI2(rD, rA); \
+ BSRLI2(rD, rD)
#define BSRLI10(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */ \
@@ -171,7 +169,33 @@
#define BSRLI20(rD, rA) \
BSRLI10(rD, rA); \
BSRLI10(rD, rD)
+
+ .macro bsrli, rD, rA, IMM
+ .if (\IMM) == 2
+ BSRLI2(\rD, \rA)
+ .elseif (\IMM) == 10
+ BSRLI10(\rD, \rA)
+ .elseif (\IMM) == 12
+ BSRLI2(\rD, \rA)
+ BSRLI10(\rD, \rD)
+ .elseif (\IMM) == 14
+ BSRLI4(\rD, \rA)
+ BSRLI10(\rD, \rD)
+ .elseif (\IMM) == 20
+ BSRLI20(\rD, \rA)
+ .elseif (\IMM) == 24
+ BSRLI4(\rD, \rA)
+ BSRLI20(\rD, \rD)
+ .elseif (\IMM) == 28
+ BSRLI4(\rD, \rA)
+ BSRLI4(\rD, \rD)
+ BSRLI20(\rD, \rD)
+ .else
+ .error "BSRLI shift macros \IMM"
+ .endif
+ .endm
#endif
+
#endif /* CONFIG_MMU */
.extern other_exception_handler /* Defined in exception.c */
@@ -194,8 +218,8 @@
* - W S REG EXC
*
*
- * STACK FRAME STRUCTURE (for NO_MMU)
- * ---------------------------------
+ * STACK FRAME STRUCTURE (for CONFIG_MMU=n)
+ * ----------------------------------------
*
* +-------------+ + 0
* | MSR |
@@ -210,8 +234,8 @@
* | . |
* | . |
*
- * NO_MMU kernel use the same r0_ram pointed space - look to vmlinux.lds.S
- * which is used for storing register values - old style was, that value were
+ * MMU kernel uses the same 'pt_pool_space' pointed space
+ * which is used for storing register values - noMMu style was, that values were
* stored in stack but in case of failure you lost information about register.
* Currently you can see register value in memory in specific place.
* In compare to with previous solution the speed should be the same.
@@ -230,8 +254,22 @@
*/
/* wrappers to restore state before coming to entry.S */
-
#ifdef CONFIG_MMU
+.section .data
+.align 4
+pt_pool_space:
+ .space PT_SIZE
+
+#ifdef DEBUG
+/* Create space for exception counting. */
+.section .data
+.global exception_debug_table
+.align 4
+exception_debug_table:
+ /* Look at exception vector table. There is 32 exceptions * word size */
+ .space (32 * 4)
+#endif /* DEBUG */
+
.section .rodata
.align 4
_MB_HW_ExceptionVectorTable:
@@ -291,10 +329,10 @@ _hw_exception_handler:
#ifndef CONFIG_MMU
addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
#else
- swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
+ swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */
/* Save date to kernel memory. Here is the problem
* when you came from user space */
- ori r1, r0, TOPHYS(r0_ram + 28);
+ ori r1, r0, TOPHYS(pt_pool_space);
#endif
swi r3, r1, PT_R3
swi r4, r1, PT_R4
@@ -333,12 +371,12 @@ not_in_delay_slot:
#ifdef DEBUG
/* counting which exception happen */
- lwi r5, r0, 0x200 + TOPHYS(r0_ram)
+ lwi r5, r0, TOPHYS(exception_debug_table)
addi r5, r5, 1
- swi r5, r0, 0x200 + TOPHYS(r0_ram)
- lwi r5, r6, 0x200 + TOPHYS(r0_ram)
+ swi r5, r0, TOPHYS(exception_debug_table)
+ lwi r5, r6, TOPHYS(exception_debug_table)
addi r5, r5, 1
- swi r5, r6, 0x200 + TOPHYS(r0_ram)
+ swi r5, r6, TOPHYS(exception_debug_table)
#endif
/* end */
/* Load the HW Exception vector */
@@ -478,7 +516,7 @@ ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
/* Form load_word jump table offset (lw_table + (8 * regnum)) */
- la r6, r0, TOPHYS(lw_table);
+ addik r6, r0, TOPHYS(lw_table);
addk r5, r5, r5;
addk r5, r5, r5;
addk r5, r5, r5;
@@ -489,7 +527,7 @@ ex_sw:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
/* Form store_word jump table offset (sw_table + (8 * regnum)) */
- la r6, r0, TOPHYS(sw_table);
+ addik r6, r0, TOPHYS(sw_table);
add r5, r5, r5;
add r5, r5, r5;
add r5, r5, r5;
@@ -569,7 +607,7 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- andi r4, r4, 0x800 /* ESR_Z - zone protection */
+ andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */
bnei r4, ex2
ori r4, r0, swapper_pg_dir
@@ -583,25 +621,25 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- andi r4, r4, 0x800 /* ESR_Z */
+ andi r4, r4, ESR_DIZ /* ESR_Z */
bnei r4, ex2
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex4:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex2 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -620,7 +658,9 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ori r4, r4, _PAGE_HWEXEC /* make it executable */
/* find the TLB index that caused the fault. It has to be here*/
@@ -689,18 +729,18 @@ ex_handler_done:
lwi r4, r4, TASK_THREAD+PGDIR
ex6:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex7 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -719,7 +759,8 @@ ex_handler_done:
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -759,18 +800,18 @@ ex_handler_done:
lwi r4, r4, TASK_THREAD+PGDIR
ex9:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex10 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -789,7 +830,8 @@ ex_handler_done:
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -808,19 +850,26 @@ ex_handler_done:
* Upon exit, we reload everything and RFI.
* A common place to load the TLB.
*/
+.section .data
+.align 4
+.global tlb_skip
+ tlb_skip:
+ .long MICROBLAZE_TLB_SKIP
tlb_index:
- .long 1 /* MS: storing last used tlb index */
+ /* MS: storing last used tlb index */
+ .long MICROBLAZE_TLB_SIZE/2
+.previous
finish_tlb_load:
/* MS: load the last used TLB index. */
lwi r5, r0, TOPHYS(tlb_index)
addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
/* MS: FIXME this is potential fault, because this is mask not count */
- andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
+ andi r5, r5, MICROBLAZE_TLB_SIZE - 1
ori r6, r0, 1
cmp r31, r5, r6
blti r31, ex12
- addik r5, r6, 1
+ lwi r5, r0, TOPHYS(tlb_skip)
ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
@@ -835,8 +884,14 @@ ex_handler_done:
* set of bits. These are size, valid, E, U0, and ensure
* bits 20 and 21 are zero.
*/
- andi r3, r3, 0xfffff000
- ori r3, r3, 0x0c0
+ andi r3, r3, PAGE_MASK
+#ifdef CONFIG_MICROBLAZE_64K_PAGES
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K)
+#elif CONFIG_MICROBLAZE_16K_PAGES
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K)
+#else
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
+#endif
mts rtlbhi, r3 /* Load TLB HI */
nop
@@ -892,7 +947,7 @@ ex_handler_done:
.ent _unaligned_data_exception
_unaligned_data_exception:
andi r8, r3, 0x3E0; /* Mask and extract the register operand */
- BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */
+ bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */
andi r6, r3, 0x400; /* Extract ESR[S] */
bneid r6, ex_sw_vm;
andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
@@ -900,7 +955,7 @@ ex_lw_vm:
beqid r6, ex_lhw_vm;
load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
/* Load a word, byte-by-byte from destination address and save it in tmp space*/
- la r6, r0, ex_tmp_data_loc_0;
+ addik r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
load2: lbui r5, r4, 1;
sbi r5, r6, 1;
@@ -914,7 +969,7 @@ load4: lbui r5, r4, 3;
ex_lhw_vm:
/* Load a half-word, byte-by-byte from destination address and
* save it in tmp space */
- la r6, r0, ex_tmp_data_loc_0;
+ addik r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
load5: lbui r5, r4, 1;
sbi r5, r6, 1;
@@ -930,7 +985,7 @@ ex_sw_vm:
addik r5, r8, sw_table_vm;
bra r5;
ex_sw_tail_vm:
- la r5, r0, ex_tmp_data_loc_0;
+ addik r5, r0, ex_tmp_data_loc_0;
beqid r6, ex_shw_vm;
swi r3, r5, 0; /* Get the word - delay slot */
/* Store the word, byte-by-byte into destination address */
@@ -945,11 +1000,20 @@ store3: sbi r3, r4, 2;
store4: sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+ lbui r3, r5, 0;
+store5: sbi r3, r4, 0;
+ lbui r3, r5, 1;
+ brid ret_from_exc;
+store6: sbi r3, r4, 1; /* Delay slot */
+#else
lbui r3, r5, 2;
store5: sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
store6: sbi r3, r4, 1; /* Delay slot */
+#endif
+
ex_sw_end_vm: /* Exception handling of store word, ends. */
/* We have to prevent cases that get/put_user macros get unaligned pointer
@@ -964,7 +1028,7 @@ ex_unaligned_fixup:
addik r7, r0, SIGSEGV
/* call bad_page_fault for finding aligned fixup, fixup address is saved
* in PT_PC which is used as return address from exception */
- la r15, r0, ret_from_exc-8 /* setup return address */
+ addik r15, r0, ret_from_exc-8 /* setup return address */
brid bad_page_fault
nop
@@ -1092,23 +1156,23 @@ lw_r10_vm: R3_TO_LWREG_VM_V (10);
lw_r11_vm: R3_TO_LWREG_VM_V (11);
lw_r12_vm: R3_TO_LWREG_VM_V (12);
lw_r13_vm: R3_TO_LWREG_VM_V (13);
-lw_r14_vm: R3_TO_LWREG_VM (14);
+lw_r14_vm: R3_TO_LWREG_VM_V (14);
lw_r15_vm: R3_TO_LWREG_VM_V (15);
-lw_r16_vm: R3_TO_LWREG_VM (16);
+lw_r16_vm: R3_TO_LWREG_VM_V (16);
lw_r17_vm: R3_TO_LWREG_VM_V (17);
lw_r18_vm: R3_TO_LWREG_VM_V (18);
-lw_r19_vm: R3_TO_LWREG_VM (19);
-lw_r20_vm: R3_TO_LWREG_VM (20);
-lw_r21_vm: R3_TO_LWREG_VM (21);
-lw_r22_vm: R3_TO_LWREG_VM (22);
-lw_r23_vm: R3_TO_LWREG_VM (23);
-lw_r24_vm: R3_TO_LWREG_VM (24);
-lw_r25_vm: R3_TO_LWREG_VM (25);
-lw_r26_vm: R3_TO_LWREG_VM (26);
-lw_r27_vm: R3_TO_LWREG_VM (27);
-lw_r28_vm: R3_TO_LWREG_VM (28);
-lw_r29_vm: R3_TO_LWREG_VM (29);
-lw_r30_vm: R3_TO_LWREG_VM (30);
+lw_r19_vm: R3_TO_LWREG_VM_V (19);
+lw_r20_vm: R3_TO_LWREG_VM_V (20);
+lw_r21_vm: R3_TO_LWREG_VM_V (21);
+lw_r22_vm: R3_TO_LWREG_VM_V (22);
+lw_r23_vm: R3_TO_LWREG_VM_V (23);
+lw_r24_vm: R3_TO_LWREG_VM_V (24);
+lw_r25_vm: R3_TO_LWREG_VM_V (25);
+lw_r26_vm: R3_TO_LWREG_VM_V (26);
+lw_r27_vm: R3_TO_LWREG_VM_V (27);
+lw_r28_vm: R3_TO_LWREG_VM_V (28);
+lw_r29_vm: R3_TO_LWREG_VM_V (29);
+lw_r30_vm: R3_TO_LWREG_VM_V (30);
lw_r31_vm: R3_TO_LWREG_VM_V (31);
sw_table_vm:
@@ -1126,23 +1190,23 @@ sw_r10_vm: SWREG_TO_R3_VM_V (10);
sw_r11_vm: SWREG_TO_R3_VM_V (11);
sw_r12_vm: SWREG_TO_R3_VM_V (12);
sw_r13_vm: SWREG_TO_R3_VM_V (13);
-sw_r14_vm: SWREG_TO_R3_VM (14);
+sw_r14_vm: SWREG_TO_R3_VM_V (14);
sw_r15_vm: SWREG_TO_R3_VM_V (15);
-sw_r16_vm: SWREG_TO_R3_VM (16);
+sw_r16_vm: SWREG_TO_R3_VM_V (16);
sw_r17_vm: SWREG_TO_R3_VM_V (17);
sw_r18_vm: SWREG_TO_R3_VM_V (18);
-sw_r19_vm: SWREG_TO_R3_VM (19);
-sw_r20_vm: SWREG_TO_R3_VM (20);
-sw_r21_vm: SWREG_TO_R3_VM (21);
-sw_r22_vm: SWREG_TO_R3_VM (22);
-sw_r23_vm: SWREG_TO_R3_VM (23);
-sw_r24_vm: SWREG_TO_R3_VM (24);
-sw_r25_vm: SWREG_TO_R3_VM (25);
-sw_r26_vm: SWREG_TO_R3_VM (26);
-sw_r27_vm: SWREG_TO_R3_VM (27);
-sw_r28_vm: SWREG_TO_R3_VM (28);
-sw_r29_vm: SWREG_TO_R3_VM (29);
-sw_r30_vm: SWREG_TO_R3_VM (30);
+sw_r19_vm: SWREG_TO_R3_VM_V (19);
+sw_r20_vm: SWREG_TO_R3_VM_V (20);
+sw_r21_vm: SWREG_TO_R3_VM_V (21);
+sw_r22_vm: SWREG_TO_R3_VM_V (22);
+sw_r23_vm: SWREG_TO_R3_VM_V (23);
+sw_r24_vm: SWREG_TO_R3_VM_V (24);
+sw_r25_vm: SWREG_TO_R3_VM_V (25);
+sw_r26_vm: SWREG_TO_R3_VM_V (26);
+sw_r27_vm: SWREG_TO_R3_VM_V (27);
+sw_r28_vm: SWREG_TO_R3_VM_V (28);
+sw_r29_vm: SWREG_TO_R3_VM_V (29);
+sw_r30_vm: SWREG_TO_R3_VM_V (30);
sw_r31_vm: SWREG_TO_R3_VM_V (31);
#endif /* CONFIG_MMU */