aboutsummaryrefslogtreecommitdiff
path: root/arch/microblaze/kernel/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel/head.S')
-rw-r--r--arch/microblaze/kernel/head.S161
1 files changed, 134 insertions, 27 deletions
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 77320b8fc16..4655ff342c6 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -63,9 +63,11 @@ ENTRY(_start)
real_start:
#endif
- mfs r1, rmsr
- andi r1, r1, ~2
- mts rmsr, r1
+ mts rmsr, r0
+/* Disable stack protection from bootloader */
+ mts rslr, r0
+ addi r8, r0, 0xFFFFFFFF
+ mts rshr, r8
/*
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
* if the msrclr instruction is not enabled. We use this to detect
@@ -73,6 +75,7 @@ real_start:
* r8 == 0 - msr instructions are implemented
* r8 != 0 - msr instructions are not implemented
*/
+ mfs r1, rmsr
msrclr r8, 0 /* clear nothing - just read msr for test */
cmpu r8, r8, r1 /* r1 must contain msr reg content */
@@ -96,7 +99,7 @@ big_endian:
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
- ori r3, r0, (0x4000 - 4)
+ ori r3, r0, (0x8000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
@@ -110,31 +113,35 @@ no_fdt_arg:
#ifndef CONFIG_CMDLINE_BOOL
/*
* handling command line
- * copy command line to __init_end. There is space for storing command line.
+ * copy command line directly to cmd_line placed in data section.
*/
- or r6, r0, r0 /* incremment */
- ori r4, r0, __init_end /* load address of command line */
+ beqid r5, skip /* Skip if NULL pointer */
+ or r11, r0, r0 /* incremment */
+ ori r4, r0, cmd_line /* load address of command line */
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
- lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
- sb r2, r4, r6 /* addr[r4+r6]= r2*/
- addik r6, r6, 1 /* increment counting */
+ /* r2=r5+r6 - r5 contain pointer to command line */
+ lbu r2, r5, r11
+ beqid r2, skip /* Skip if no data */
+ sb r2, r4, r11 /* addr[r4+r6]= r2 */
+ addik r11, r11, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
- addik r3, r3, -1 /* descrement loop */
+ addik r3, r3, -1 /* decrement loop */
addik r5, r4, 0 /* add new space for command line */
tovirt(r5,r5)
+skip:
#endif /* CONFIG_CMDLINE_BOOL */
#ifdef NOT_COMPILE
/* save bram context */
- or r6, r0, r0 /* incremment */
+ or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
ori r3, r0, (LMB_SIZE - 4)
_copy_bram:
- lw r7, r0, r6 /* r7 = r0 + r6 */
- sw r7, r4, r6 /* addr[r4 + r6] = r7*/
- addik r6, r6, 4 /* increment counting */
+ lw r7, r0, r11 /* r7 = r0 + r6 */
+ sw r7, r4, r11 /* addr[r4 + r6] = r7 */
+ addik r11, r11, 4 /* increment counting */
bgtid r3, _copy_bram /* loop for all entries */
addik r3, r3, -4 /* descrement loop */
#endif
@@ -150,6 +157,7 @@ _copy_bram:
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
+ mts rtlblo, r0
bgtid r3, _invalidate /* loop for all entries */
addik r3, r3, -1
/* sync */
@@ -169,6 +177,53 @@ _invalidate:
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
+ /* start to do TLB calculation */
+ addik r12, r0, _end
+ rsub r12, r3, r12
+ addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */
+
+ or r9, r0, r0 /* TLB0 = 0 */
+ or r10, r0, r0 /* TLB1 = 0 */
+
+ addik r11, r12, -0x1000000
+ bgei r11, GT16 /* size is greater than 16MB */
+ addik r11, r12, -0x0800000
+ bgei r11, GT8 /* size is greater than 8MB */
+ addik r11, r12, -0x0400000
+ bgei r11, GT4 /* size is greater than 4MB */
+ /* size is less than 4MB */
+ addik r11, r12, -0x0200000
+ bgei r11, GT2 /* size is greater than 2MB */
+ addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
+ addik r11, r12, -0x0100000
+ bgei r11, GT1 /* size is greater than 1MB */
+ /* TLB1 is 0 which is setup above */
+ bri tlb_end
+GT4: /* r11 contains the rest - will be either 1 or 4 */
+ ori r9, r0, 0x400000 /* TLB0 is 4MB */
+ bri TLB1
+GT16: /* TLB0 is 16MB */
+ addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
+TLB1:
+ /* must be used r2 because of subtract if failed */
+ addik r2, r11, -0x0400000
+ bgei r2, GT20 /* size is greater than 16MB */
+ /* size is >16MB and <20MB */
+ addik r11, r11, -0x0100000
+ bgei r11, GT17 /* size is greater than 17MB */
+ /* kernel is >16MB and < 17MB */
+GT1:
+ addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
+ bri tlb_end
+GT2: /* TLB0 is 0 and TLB1 will be 4MB */
+GT17: /* TLB1 is 4MB - kernel size <20MB */
+ addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
+ bri tlb_end
+GT8: /* TLB0 is still zero that's why I can use only TLB1 */
+GT20: /* TLB1 is 16MB - kernel size >20MB */
+ addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
+tlb_end:
+
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
@@ -178,29 +233,82 @@ _invalidate:
andi r4,r4,0xfffffc00 /* Mask off the real page number */
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
+ /*
+ * TLB0 is always used - check if is not zero (r9 stores TLB0 value)
+ * if is use TLB1 value and clear it (r10 stores TLB1 value)
+ */
+ bnei r9, tlb0_not_zero
+ add r9, r10, r0
+ add r10, r0, r0
+tlb0_not_zero:
+
+ /* look at the code below */
+ ori r30, r0, 0x200
+ andi r29, r9, 0x100000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r9, 0x400000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r9, 0x1000000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+1:
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
- ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
+ ori r3,r3,(TLB_VALID)
+ or r3, r3, r30
- mts rtlbx,r0 /* TLB slow 0 */
+ /* Load tlb_skip size value which is index to first unused TLB entry */
+ lwi r11, r0, TOPHYS(tlb_skip)
+ mts rtlbx,r11 /* TLB slow 0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
- addik r4, r4, 0x01000000 /* Map next 16 M entries */
- addik r3, r3, 0x01000000
+ /* Increase tlb_skip size */
+ addik r11, r11, 1
+ swi r11, r0, TOPHYS(tlb_skip)
+
+ /* TLB1 can be zeroes that's why we not setup it */
+ beqi r10, jump_over2
+
+ /* look at the code below */
+ ori r30, r0, 0x200
+ andi r29, r10, 0x100000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r10, 0x400000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r10, 0x1000000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+1:
+ addk r4, r4, r9 /* previous addr + TLB0 size */
+ addk r3, r3, r9
+
+ andi r3,r3,0xfffffc00 /* Mask off the effective page number */
+ ori r3,r3,(TLB_VALID)
+ or r3, r3, r30
- ori r6,r0,1 /* TLB slot 1 */
- mts rtlbx,r6
+ lwi r11, r0, TOPHYS(tlb_skip)
+ mts rtlbx, r11 /* r11 is used from TLB0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
+ /* Increase tlb_skip size */
+ addik r11, r11, 1
+ swi r11, r0, TOPHYS(tlb_skip)
+
+jump_over2:
/*
* Load a TLB entry for LMB, since we need access to
* the exception vectors, using a 4k real==virtual mapping.
*/
- ori r6,r0,3 /* TLB slot 3 */
- mts rtlbx,r6
+ /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
+ ori r11, r0, MICROBLAZE_LMB_TLB_ID
+ mts rtlbx,r11
ori r4,r0,(TLB_WR | TLB_EX)
ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
@@ -238,8 +346,8 @@ start_here:
* Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
* the function.
*/
- addik r9, r0, machine_early_init
- brald r15, r9
+ addik r11, r0, machine_early_init
+ brald r15, r11
nop
#ifndef CONFIG_MMU
@@ -268,8 +376,7 @@ start_here:
/* Load up the kernel context */
kernel_load_context:
- # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
- ori r5,r0,3
+ ori r5, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx,r5
nop
mts rtlbhi,r0