aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S637
1 files changed, 417 insertions, 220 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6825c34646d..3a8b32df6b3 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -9,7 +9,9 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .arch armv7-a
/*
* Debugging stuff
*
@@ -21,20 +23,12 @@
#if defined(CONFIG_DEBUG_ICEDCC)
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c0, c5, 0
.endm
-#elif defined(CONFIG_CPU_V7)
- .macro loadsp, rb, tmp
- .endm
- .macro writeb, ch, rb
-wait: mrc p14, 0, pc, c0, c1, 0
- bcs wait
- mcr p14, 0, \ch, c0, c5, 0
- .endm
#elif defined(CONFIG_CPU_XSCALE)
.macro loadsp, rb, tmp
.endm
@@ -51,7 +45,7 @@ wait: mrc p14, 0, pc, c0, c1, 0
#else
-#include <mach/debug-macro.S>
+#include CONFIG_DEBUG_LL_INCLUDE
.macro writeb, ch, rb
senduart \ch, \rb
@@ -66,11 +60,6 @@ wait: mrc p14, 0, pc, c0, c1, 0
add \rb, \rb, #0x00010000 @ Ser1
#endif
.endm
-#elif defined(CONFIG_ARCH_S3C2410)
- .macro loadsp, rb, tmp
- mov \rb, #0x50000000
- add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
- .endm
#else
.macro loadsp, rb, tmp
addruart \rb, \tmp
@@ -125,20 +114,30 @@ wait: mrc p14, 0, pc, c0, c1, 0
* sort out different calling conventions
*/
.align
+ .arm @ Always enter in ARM state
start:
.type start,#function
- .rept 8
+ .rept 7
mov r0, r0
.endr
+ ARM( mov r0, r0 )
+ ARM( b 1f )
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
- b 1f
.word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
-1: mov r7, r1 @ save architecture ID
+ THUMB( .thumb )
+1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
+ mrs r9, cpsr
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install @ get into SVC mode, reversibly
+#endif
+ mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
-#ifndef __ARM_ARCH_2__
/*
* Booting from Angel - need to enter SVC mode and disable
* FIQs/IRQs (numeric definitions from angel arm.h source).
@@ -151,13 +150,9 @@ start:
ARM( swi 0x123456 ) @ angel_SWI_ARM
THUMB( svc 0xab ) @ angel_SWI_THUMB
not_angel:
- mrs r2, cpsr @ turn off interrupts to
- orr r2, r2, #0xc0 @ prevent angel from running
- msr cpsr_c, r2
-#else
- teqp pc, #0x0c000003 @ turn off interrupts
-#endif
-
+ safe_svcmode_maskall r0
+ msr spsr_cxsf, r9 @ Save the CPU boot mode in
+ @ SPSR
/*
* Note that some cache flushing and other stuff may
* be needed here - is there an Angel SWI call for this?
@@ -169,54 +164,290 @@ not_angel:
*/
.text
- adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
- ldr sp, [r0, #28]
+
#ifdef CONFIG_AUTO_ZRELADDR
@ determine final kernel image address
- and r4, pc, #0xf8000000
+ mov r4, pc
+ and r4, r4, #0xf8000000
add r4, r4, #TEXT_OFFSET
#else
ldr r4, =zreladdr
#endif
- subs r0, r0, r1 @ calculate the delta offset
- @ if delta is zero, we are
- beq not_relocated @ running at the address we
- @ were linked at.
+ /*
+ * Set up a page table only if it won't overwrite ourself.
+ * That means r4 < pc && r4 - 16k page directory > &_end.
+ * Given that r4 > &_end is most unfrequent, we add a rough
+ * additional 1MB of room for a possible appended DTB.
+ */
+ mov r0, pc
+ cmp r0, r4
+ ldrcc r0, LC0+32
+ addcc r0, r0, pc
+ cmpcc r4, r0
+ orrcc r4, r4, #1 @ remember we skipped cache_on
+ blcs cache_on
+
+restart: adr r0, LC0
+ ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
+ ldr sp, [r0, #28]
/*
- * We're running at a different address. We need to fix
- * up various pointers:
- * r5 - zImage base address (_start)
- * r6 - size of decompressed image
- * r11 - GOT start
- * ip - GOT end
+ * We might be running at a different address. We need
+ * to fix up various pointers.
*/
- add r5, r5, r0
+ sub r0, r0, r1 @ calculate the delta offset
+ add r6, r6, r0 @ _edata
+ add r10, r10, r0 @ inflated kernel size location
+
+ /*
+ * The kernel build system appends the size of the
+ * decompressed kernel at the end of the compressed data
+ * in little-endian form.
+ */
+ ldrb r9, [r10, #0]
+ ldrb lr, [r10, #1]
+ orr r9, r9, lr, lsl #8
+ ldrb lr, [r10, #2]
+ ldrb r10, [r10, #3]
+ orr r9, r9, lr, lsl #16
+ orr r9, r9, r10, lsl #24
+
+#ifndef CONFIG_ZBOOT_ROM
+ /* malloc space is above the relocated stack (64k max) */
+ add sp, sp, r0
+ add r10, sp, #0x10000
+#else
+ /*
+ * With ZBOOT_ROM the bss/stack is non relocatable,
+ * but someone could still run this code from RAM,
+ * in which case our reference is _edata.
+ */
+ mov r10, r6
+#endif
+
+ mov r5, #0 @ init dtb size to 0
+#ifdef CONFIG_ARM_APPENDED_DTB
+/*
+ * r0 = delta
+ * r2 = BSS start
+ * r3 = BSS end
+ * r4 = final kernel address (possibly with LSB set)
+ * r5 = appended dtb size (still unknown)
+ * r6 = _edata
+ * r7 = architecture ID
+ * r8 = atags/device tree pointer
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * r11 = GOT start
+ * r12 = GOT end
+ * sp = stack pointer
+ *
+ * if there are device trees (dtb) appended to zImage, advance r10 so that the
+ * dtb data will get relocated along with the kernel if necessary.
+ */
+
+ ldr lr, [r6, #0]
+#ifndef __ARMEB__
+ ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
+#else
+ ldr r1, =0xd00dfeed
+#endif
+ cmp lr, r1
+ bne dtb_check_done @ not found
+
+#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
+ /*
+ * OK... Let's do some funky business here.
+ * If we do have a DTB appended to zImage, and we do have
+ * an ATAG list around, we want the later to be translated
+ * and folded into the former here. To be on the safe side,
+ * let's temporarily move the stack away into the malloc
+ * area. No GOT fixup has occurred yet, but none of the
+ * code we're about to call uses any global variable.
+ */
+ add sp, sp, #0x10000
+ stmfd sp!, {r0-r3, ip, lr}
+ mov r0, r8
+ mov r1, r6
+ sub r2, sp, r6
+ bl atags_to_fdt
+
+ /*
+ * If returned value is 1, there is no ATAG at the location
+ * pointed by r8. Try the typical 0x100 offset from start
+ * of RAM and hope for the best.
+ */
+ cmp r0, #1
+ sub r0, r4, #TEXT_OFFSET
+ bic r0, r0, #1
+ add r0, r0, #0x100
+ mov r1, r6
+ sub r2, sp, r6
+ bleq atags_to_fdt
+
+ ldmfd sp!, {r0-r3, ip, lr}
+ sub sp, sp, #0x10000
+#endif
+
+ mov r8, r6 @ use the appended device tree
+
+ /*
+ * Make sure that the DTB doesn't end up in the final
+ * kernel's .bss area. To do so, we adjust the decompressed
+ * kernel size to compensate if that .bss size is larger
+ * than the relocated code.
+ */
+ ldr r5, =_kernel_bss_size
+ adr r1, wont_overwrite
+ sub r1, r6, r1
+ subs r1, r5, r1
+ addhi r9, r9, r1
+
+ /* Get the dtb's size */
+ ldr r5, [r6, #4]
+#ifndef __ARMEB__
+ /* convert r5 (dtb size) to little endian */
+ eor r1, r5, r5, ror #16
+ bic r1, r1, #0x00ff0000
+ mov r5, r5, ror #8
+ eor r5, r5, r1, lsr #8
+#endif
+
+ /* preserve 64-bit alignment */
+ add r5, r5, #7
+ bic r5, r5, #7
+
+ /* relocate some pointers past the appended dtb */
+ add r6, r6, r5
+ add r10, r10, r5
+ add sp, sp, r5
+dtb_check_done:
+#endif
+
+/*
+ * Check to see if we will overwrite ourselves.
+ * r4 = final kernel address (possibly with LSB set)
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * We basically want:
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= address of wont_overwrite -> OK
+ * Note: the possible LSB in r4 is harmless here.
+ */
+ add r10, r10, #16384
+ cmp r4, r10
+ bhs wont_overwrite
+ add r10, r4, r9
+ adr r9, wont_overwrite
+ cmp r10, r9
+ bls wont_overwrite
+
+/*
+ * Relocate ourselves past the end of the decompressed kernel.
+ * r6 = _edata
+ * r10 = end of the decompressed kernel
+ * Because we always copy ahead, we need to do it from the end and go
+ * backward in case the source and destination overlap.
+ */
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
+ bic r10, r10, #255
+
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
+/* Relocate the hyp vector base if necessary */
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE
+ bne 1f
+
+ bl __hyp_get_vectors
+ sub r0, r0, r5
+ add r0, r0, r10
+ bl __hyp_set_vectors
+1:
+#endif
+
+ sub r9, r6, r5 @ size to copy
+ add r9, r9, #31 @ rounded up to a multiple
+ bic r9, r9, #31 @ ... of 32 bytes
+ add r6, r9, r5
+ add r9, r9, r10
+
+1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
+ cmp r6, r5
+ stmdb r9!, {r0 - r3, r10 - r12, lr}
+ bhi 1b
+
+ /* Preserve offset to relocated code. */
+ sub r6, r9, r6
+
+#ifndef CONFIG_ZBOOT_ROM
+ /* cache_clean_flush may use the stack, so relocate it */
+ add sp, sp, r6
+#endif
+
+ tst r4, #1
+ bleq cache_clean_flush
+
+ adr r0, BSYM(restart)
+ add r0, r0, r6
+ mov pc, r0
+
+wont_overwrite:
+/*
+ * If delta is zero, we are running at the address we were linked at.
+ * r0 = delta
+ * r2 = BSS start
+ * r3 = BSS end
+ * r4 = kernel execution address (possibly with LSB set)
+ * r5 = appended dtb size (0 if not present)
+ * r7 = architecture ID
+ * r8 = atags pointer
+ * r11 = GOT start
+ * r12 = GOT end
+ * sp = stack pointer
+ */
+ orrs r1, r0, r5
+ beq not_relocated
+
add r11, r11, r0
- add ip, ip, r0
+ add r12, r12, r0
#ifndef CONFIG_ZBOOT_ROM
/*
* If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
* we need to fix up pointers into the BSS region.
- * r2 - BSS start
- * r3 - BSS end
- * sp - stack pointer
+ * Note that the stack pointer has already been fixed up.
*/
add r2, r2, r0
add r3, r3, r0
- add sp, sp, r0
/*
* Relocate all entries in the GOT table.
+ * Bump bss entries to _edata + dtb size
*/
1: ldr r1, [r11, #0] @ relocate entries in the GOT
- add r1, r1, r0 @ table. This fixes up the
- str r1, [r11], #4 @ C references.
- cmp r11, ip
+ add r1, r1, r0 @ This fixes up C references
+ cmp r1, r2 @ if entry >= bss_start &&
+ cmphs r3, r1 @ bss_end > entry
+ addhi r1, r1, r5 @ entry += dtb size
+ str r1, [r11], #4 @ next entry
+ cmp r11, r12
blo 1b
+
+ /* bump our bss pointers too */
+ add r2, r2, r5
+ add r3, r3, r5
+
#else
/*
@@ -228,7 +459,7 @@ not_angel:
cmphs r3, r1 @ _end < entry
addlo r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
- cmp r11, ip
+ cmp r11, r12
blo 1b
#endif
@@ -241,87 +472,63 @@ not_relocated: mov r0, #0
blo 1b
/*
- * The C runtime environment should now be setup
- * sufficiently. Turn the cache on, set up some
- * pointers, and start decompressing.
+ * Did we skip the cache setup earlier?
+ * That is indicated by the LSB in r4.
+ * Do it now if so.
*/
- bl cache_on
-
- mov r1, sp @ malloc space above stack
- add r2, sp, #0x10000 @ 64k max
+ tst r4, #1
+ bic r4, r4, #1
+ blne cache_on
/*
- * Check to see if we will overwrite ourselves.
- * r4 = final kernel address
- * r5 = start of this image
- * r6 = size of decompressed image
- * r2 = end of malloc space (and therefore this image)
- * We basically want:
- * r4 >= r2 -> OK
- * r4 + image length <= r5 -> OK
+ * The C runtime environment should now be setup sufficiently.
+ * Set up some pointers, and start decompressing.
+ * r4 = kernel execution address
+ * r7 = architecture ID
+ * r8 = atags pointer
*/
- cmp r4, r2
- bhs wont_overwrite
- add r0, r4, r6
- cmp r0, r5
- bls wont_overwrite
-
- mov r5, r2 @ decompress after malloc space
- mov r0, r5
+ mov r0, r4
+ mov r1, sp @ malloc space above stack
+ add r2, sp, #0x10000 @ 64k max
mov r3, r7
bl decompress_kernel
+ bl cache_clean_flush
+ bl cache_off
+ mov r1, r7 @ restore architecture number
+ mov r2, r8 @ restore atags pointer
- add r0, r0, #127 + 128 @ alignment + stack
- bic r0, r0, #127 @ align the kernel length
-/*
- * r0 = decompressed kernel length
- * r1-r3 = unused
- * r4 = kernel execution address
- * r5 = decompressed kernel start
- * r7 = architecture ID
- * r8 = atags pointer
- * r9-r12,r14 = corrupted
- */
- add r1, r5, r0 @ end of decompressed kernel
- adr r2, reloc_start
- ldr r3, LC1
- add r3, r2, r3
-1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
- stmia r1!, {r9 - r12, r14}
- ldmia r2!, {r9 - r12, r14}
- stmia r1!, {r9 - r12, r14}
- cmp r2, r3
- blo 1b
- mov sp, r1
- add sp, sp, #128 @ relocate the stack
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr @ Get saved CPU boot mode
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE @ if not booted in HYP mode...
+ bne __enter_kernel @ boot kernel directly
- bl cache_clean_flush
- ARM( add pc, r5, r0 ) @ call relocation code
- THUMB( add r12, r5, r0 )
- THUMB( mov pc, r12 ) @ call relocation code
+ adr r12, .L__hyp_reentry_vectors_offset
+ ldr r0, [r12]
+ add r0, r0, r12
-/*
- * We're not in danger of overwriting ourselves. Do this the simple way.
- *
- * r4 = kernel execution address
- * r7 = architecture ID
- */
-wont_overwrite: mov r0, r4
- mov r3, r7
- bl decompress_kernel
- b call_kernel
+ bl __hyp_set_vectors
+ __HVC(0) @ otherwise bounce to hyp mode
+
+ b . @ should never be reached
+
+ .align 2
+.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
+#else
+ b __enter_kernel
+#endif
.align 2
.type LC0, #object
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
- .word _start @ r5
- .word _image_size @ r6
+ .word _edata @ r6
+ .word input_data_end - 4 @ r10 (inflated size location)
.word _got_start @ r11
.word _got_end @ ip
- .word user_stack_end @ sp
-LC1: .word reloc_end - reloc_start
+ .word .L_user_stack_end @ sp
+ .word _end - restart + 16384 + 1024*1024
.size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC
@@ -347,7 +554,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
* On exit,
* r0, r1, r2, r3, r9, r10, r12 corrupted
* This routine must preserve:
- * r4, r5, r6, r7, r8
+ * r4, r7, r8
*/
.align 5
cache_on: mov r3, #8 @ cache_on function
@@ -415,6 +622,12 @@ __armv3_mpu_cache_on:
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define CB_BITS 0x08
+#else
+#define CB_BITS 0x0c
+#endif
+
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@@ -426,13 +639,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM
add r10, r9, #0x10000000 @ a reasonable RAM size
- mov r1, #0x12
- orr r1, r1, #3 << 10
+ mov r1, #0x12 @ XN|U + section mapping
+ orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384
1: cmp r1, r9 @ if virt > start of RAM
- orrhs r1, r1, #0x0c @ set cacheable, bufferable
- cmp r1, r10 @ if virt > end of RAM
- bichs r1, r1, #0x0c @ clear cacheable, bufferable
+ cmphs r10, r1 @ && end of RAM > virt
+ bic r1, r1, #0x1c @ clear XN|U + C + B
+ orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
+ orrhs r1, r1, r6 @ set RAM section settings
str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576
teq r0, r2
@@ -443,9 +657,10 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* so there is no map overlap problem for up to 1 MB compressed kernel.
* If the execution is in RAM then we would only be duplicating the above.
*/
- mov r1, #0x1e
+ orr r1, r6, #0x04 @ ensure B is set for this
orr r1, r1, #3 << 10
- mov r2, pc, lsr #20
+ mov r2, pc
+ mov r2, r2, lsr #20
orr r1, r1, r2, lsl #20
add r0, r3, r2, lsl #2
str r1, [r0], #4
@@ -454,9 +669,25 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov pc, lr
ENDPROC(__setup_mmu)
+@ Enable unaligned access on v6, to allow better code generation
+@ for the decompressor C code:
+__armv6_mmu_cache_on:
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
+ b __armv4_mmu_cache_on
+
+__arm926ejs_mmu_cache_on:
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mov r0, #4 @ put dcache in WT mode
+ mcr p15, 7, r0, c15, c0, 0
+#endif
+
__armv4_mmu_cache_on:
mov r12, lr
#ifdef CONFIG_MMU
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -464,9 +695,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -478,6 +707,7 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA
+ movne r6, #CB_BITS | 0x02 @ !XN
blne __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -485,17 +715,24 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ @ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
- movne r1, #-1
+ movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
@@ -504,6 +741,7 @@ __armv7_mmu_cache_on:
__fa526_cache_on:
mov r12, lr
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
@@ -516,18 +754,6 @@ __fa526_cache_on:
mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
mov pc, r12
-__arm6_mmu_cache_on:
- mov r12, lr
- bl __setup_mmu
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov r0, #0x30
- bl __common_mmu_cache_on
- mov r0, #0
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov pc, r12
-
__common_mmu_cache_on:
#ifndef CONFIG_THUMB2_KERNEL
#ifndef DEBUG
@@ -543,42 +769,7 @@ __common_mmu_cache_on:
sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif
-/*
- * All code following this line is relocatable. It is relocated by
- * the above code to the end of the decompressed kernel image and
- * executed there. During this time, we have no stacks.
- *
- * r0 = decompressed kernel length
- * r1-r3 = unused
- * r4 = kernel execution address
- * r5 = decompressed kernel start
- * r7 = architecture ID
- * r8 = atags pointer
- * r9-r12,r14 = corrupted
- */
- .align 5
-reloc_start: add r9, r5, r0
- sub r9, r9, #128 @ do not copy the stack
- debug_reloc_start
- mov r1, r4
-1:
- .rept 4
- ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
- stmia r1!, {r0, r2, r3, r10 - r12, r14}
- .endr
-
- cmp r5, r9
- blo 1b
- mov sp, r1
- add sp, sp, #128 @ relocate the stack
- debug_reloc_end
-
-call_kernel: bl cache_clean_flush
- bl cache_off
- mov r0, #0 @ must be zero
- mov r1, r7 @ restore architecture number
- mov r2, r8 @ restore atags pointer
- mov pc, r4 @ call kernel
+#define PROC_ENTRY_SIZE (4*5)
/*
* Here follow the relocatable cache support functions for the
@@ -607,7 +798,7 @@ call_cache_fn: adr r12, proc_types
ARM( addeq pc, r12, r3 ) @ call cache function
THUMB( addeq r12, r3 )
THUMB( moveq pc, r12 ) @ call cache function
- add r12, r12, #4*5
+ add r12, r12, #PROC_ENTRY_SIZE
b 1b
/*
@@ -627,18 +818,8 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
- .word 0x41560600 @ ARM6/610
- .word 0xffffffe0
- W(b) __arm6_mmu_cache_off @ works, but slow
- W(b) __arm6_mmu_cache_off
- mov pc, lr
- THUMB( nop )
-@ b __arm6_mmu_cache_on @ untested
-@ b __arm6_mmu_cache_off
-@ b __armv3_mmu_cache_flush
-
- .word 0x00000000 @ old ARM ID
- .word 0x0000f000
+ .word 0x41000000 @ old ARM ID
+ .word 0xff00f000
mov pc, lr
THUMB( nop )
mov pc, lr
@@ -648,8 +829,10 @@ proc_types:
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
- W(b) __arm7_mmu_cache_off
- W(b) __arm7_mmu_cache_off
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
mov pc, lr
THUMB( nop )
@@ -672,6 +855,12 @@ proc_types:
W(b) __armv4_mpu_cache_off
W(b) __armv4_mpu_cache_flush
+ .word 0x41069260 @ ARM926EJ-S (v5TEJ)
+ .word 0xff0ffff0
+ W(b) __arm926ejs_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
@@ -750,13 +939,7 @@ proc_types:
.word 0x0007b000 @ ARMv6
.word 0x000ff000
- W(b) __armv4_mmu_cache_on
- W(b) __armv4_mmu_cache_off
- W(b) __armv6_mmu_cache_flush
-
- .word 0x560f5810 @ Marvell PJ4 ARMv6
- .word 0xff0ffff0
- W(b) __armv4_mmu_cache_on
+ W(b) __armv6_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv6_mmu_cache_flush
@@ -777,6 +960,16 @@ proc_types:
.size proc_types, . - proc_types
+ /*
+ * If you get a "non-constant expression in ".if" statement"
+ * error from the assembler on this line, check that you have
+ * not accidentally written a "b" instruction where you should
+ * have written W(b).
+ */
+ .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+ .error "The size of one or more proc_types entries is wrong."
+ .endif
+
/*
* Turn off the Cache and MMU. ARMv3 does not support
* reading the control register, but ARMv4 does.
@@ -784,7 +977,7 @@ proc_types:
* On exit,
* r0, r1, r2, r3, r9, r12 corrupted
* This routine must preserve:
- * r4, r6, r7
+ * r4, r7, r8
*/
.align 5
cache_off: mov r3, #12 @ cache_off function
@@ -838,28 +1031,13 @@ __armv7_mmu_cache_off:
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12
-__arm6_mmu_cache_off:
- mov r0, #0x00000030 @ ARM6 control reg.
- b __armv3_mmu_cache_off
-
-__arm7_mmu_cache_off:
- mov r0, #0x00000070 @ ARM7 control reg.
- b __armv3_mmu_cache_off
-
-__armv3_mmu_cache_off:
- mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov pc, lr
-
/*
* Clean and flush the cache to maintain consistency.
*
* On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve:
- * r0, r4, r5, r6, r7
+ * r4, r6, r7, r8
*/
.align 5
cache_clean_flush:
@@ -1081,9 +1259,28 @@ memdump: mov r12, r0
#endif
.ltorg
-reloc_end:
+
+#ifdef CONFIG_ARM_VIRT_EXT
+.align 5
+__hyp_reentry_vectors:
+ W(b) . @ reset
+ W(b) . @ undef
+ W(b) . @ svc
+ W(b) . @ pabort
+ W(b) . @ dabort
+ W(b) __enter_kernel @ hyp
+ W(b) . @ irq
+ W(b) . @ fiq
+#endif /* CONFIG_ARM_VIRT_EXT */
+
+__enter_kernel:
+ mov r0, #0 @ must be 0
+ ARM( mov pc, r4 ) @ call kernel
+ THUMB( bx r4 ) @ entry point is always ARM
+
+reloc_code_end:
.align
- .section ".stack", "w"
-user_stack: .space 4096
-user_stack_end:
+ .section ".stack", "aw", %nobits
+.L_user_stack: .space 4096
+.L_user_stack_end: