diff options
Diffstat (limited to 'arch/microblaze/kernel/hw_exception_handler.S')
| -rw-r--r-- | arch/microblaze/kernel/hw_exception_handler.S | 115 |
1 files changed, 79 insertions, 36 deletions
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index e62be837960..0b11a4469de 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -75,6 +75,7 @@ #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/signal.h> +#include <asm/registers.h> #include <asm/asm-offsets.h> #undef DEBUG @@ -146,15 +147,14 @@ or r3, r0, NUM_TO_REG (regnum); /* Shift right instruction depending on available configuration */ - #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0 - #define BSRLI(rD, rA, imm) \ - bsrli rD, rA, imm - #else - #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA) + #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0 /* Only the used shift constants defined here - add more if needed */ #define BSRLI2(rD, rA) \ srl rD, rA; /* << 1 */ \ srl rD, rD; /* << 2 */ + #define BSRLI4(rD, rA) \ + BSRLI2(rD, rA); \ + BSRLI2(rD, rD) #define BSRLI10(rD, rA) \ srl rD, rA; /* << 1 */ \ srl rD, rD; /* << 2 */ \ @@ -169,7 +169,33 @@ #define BSRLI20(rD, rA) \ BSRLI10(rD, rA); \ BSRLI10(rD, rD) + + .macro bsrli, rD, rA, IMM + .if (\IMM) == 2 + BSRLI2(\rD, \rA) + .elseif (\IMM) == 10 + BSRLI10(\rD, \rA) + .elseif (\IMM) == 12 + BSRLI2(\rD, \rA) + BSRLI10(\rD, \rD) + .elseif (\IMM) == 14 + BSRLI4(\rD, \rA) + BSRLI10(\rD, \rD) + .elseif (\IMM) == 20 + BSRLI20(\rD, \rA) + .elseif (\IMM) == 24 + BSRLI4(\rD, \rA) + BSRLI20(\rD, \rD) + .elseif (\IMM) == 28 + BSRLI4(\rD, \rA) + BSRLI4(\rD, \rD) + BSRLI20(\rD, \rD) + .else + .error "BSRLI shift macros \IMM" + .endif + .endm #endif + #endif /* CONFIG_MMU */ .extern other_exception_handler /* Defined in exception.c */ @@ -192,8 +218,8 @@ * - W S REG EXC * * - * STACK FRAME STRUCTURE (for NO_MMU) - * --------------------------------- + * STACK FRAME STRUCTURE (for CONFIG_MMU=n) + * ---------------------------------------- * * +-------------+ + 0 * | MSR | @@ -581,7 +607,7 @@ ex_handler_done: * tried to access a kernel or read-protected page - always * a SEGV). All other faults here must be stores, so no * need to check ESR_S as well. */ - andi r4, r4, 0x800 /* ESR_Z - zone protection */ + andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */ bnei r4, ex2 ori r4, r0, swapper_pg_dir @@ -595,25 +621,25 @@ ex_handler_done: * tried to access a kernel or read-protected page - always * a SEGV). All other faults here must be stores, so no * need to check ESR_S as well. */ - andi r4, r4, 0x800 /* ESR_Z */ + andi r4, r4, ESR_DIZ /* ESR_Z */ bnei r4, ex2 /* get current task address */ addi r4 ,CURRENT_TASK, TOPHYS(0); lwi r4, r4, TASK_THREAD+PGDIR ex4: tophys(r4,r4) - BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ - andi r5, r5, 0xffc + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ or r4, r4, r5 lwi r4, r4, 0 /* Get L1 entry */ - andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ beqi r5, ex2 /* Bail if no table */ tophys(r5,r5) - BSRLI(r6,r3,10) /* Compute PTE address */ - andi r6, r6, 0xffc - andi r5, r5, 0xfffff003 + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 or r5, r5, r6 lwi r4, r5, 0 /* Get Linux PTE */ @@ -632,7 +658,9 @@ ex_handler_done: * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ - andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ +/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */ + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK ori r4, r4, _PAGE_HWEXEC /* make it executable */ /* find the TLB index that caused the fault. It has to be here*/ @@ -701,18 +729,18 @@ ex_handler_done: lwi r4, r4, TASK_THREAD+PGDIR ex6: tophys(r4,r4) - BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ - andi r5, r5, 0xffc + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ or r4, r4, r5 lwi r4, r4, 0 /* Get L1 entry */ - andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ beqi r5, ex7 /* Bail if no table */ tophys(r5,r5) - BSRLI(r6,r3,10) /* Compute PTE address */ - andi r6, r6, 0xffc - andi r5, r5, 0xfffff003 + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 or r5, r5, r6 lwi r4, r5, 0 /* Get Linux PTE */ @@ -731,7 +759,8 @@ ex_handler_done: * here we (properly should) assume have the appropriate value. */ brid finish_tlb_load - andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK ex7: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -771,18 +800,18 @@ ex_handler_done: lwi r4, r4, TASK_THREAD+PGDIR ex9: tophys(r4,r4) - BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ - andi r5, r5, 0xffc + /* Create L1 (pgdir/pmd) address */ + bsrli r5, r3, PGDIR_SHIFT - 2 + andi r5, r5, PAGE_SIZE - 4 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ or r4, r4, r5 lwi r4, r4, 0 /* Get L1 entry */ - andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ + andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ beqi r5, ex10 /* Bail if no table */ tophys(r5,r5) - BSRLI(r6,r3,10) /* Compute PTE address */ - andi r6, r6, 0xffc - andi r5, r5, 0xfffff003 + bsrli r6, r3, PTE_SHIFT /* Compute PTE address */ + andi r6, r6, PAGE_SIZE - 4 or r5, r5, r6 lwi r4, r5, 0 /* Get Linux PTE */ @@ -801,7 +830,8 @@ ex_handler_done: * here we (properly should) assume have the appropriate value. */ brid finish_tlb_load - andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ + andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ + TLB_ZSEL(1) | TLB_ATTR_MASK ex10: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -820,19 +850,26 @@ ex_handler_done: * Upon exit, we reload everything and RFI. * A common place to load the TLB. */ +.section .data +.align 4 +.global tlb_skip + tlb_skip: + .long MICROBLAZE_TLB_SKIP tlb_index: - .long 1 /* MS: storing last used tlb index */ + /* MS: storing last used tlb index */ + .long MICROBLAZE_TLB_SIZE/2 +.previous finish_tlb_load: /* MS: load the last used TLB index. */ lwi r5, r0, TOPHYS(tlb_index) addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ /* MS: FIXME this is potential fault, because this is mask not count */ - andi r5, r5, (MICROBLAZE_TLB_SIZE-1) + andi r5, r5, MICROBLAZE_TLB_SIZE - 1 ori r6, r0, 1 cmp r31, r5, r6 blti r31, ex12 - addik r5, r6, 1 + lwi r5, r0, TOPHYS(tlb_skip) ex12: /* MS: save back current TLB index */ swi r5, r0, TOPHYS(tlb_index) @@ -847,8 +884,14 @@ ex_handler_done: * set of bits. These are size, valid, E, U0, and ensure * bits 20 and 21 are zero. */ - andi r3, r3, 0xfffff000 - ori r3, r3, 0x0c0 + andi r3, r3, PAGE_MASK +#ifdef CONFIG_MICROBLAZE_64K_PAGES + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K) +#elif CONFIG_MICROBLAZE_16K_PAGES + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K) +#else + ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K) +#endif mts rtlbhi, r3 /* Load TLB HI */ nop @@ -904,7 +947,7 @@ ex_handler_done: .ent _unaligned_data_exception _unaligned_data_exception: andi r8, r3, 0x3E0; /* Mask and extract the register operand */ - BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */ + bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */ andi r6, r3, 0x400; /* Extract ESR[S] */ bneid r6, ex_sw_vm; andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ |
