aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S419
1 files changed, 318 insertions, 101 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index adf583cd0c3..3a8b32df6b3 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -9,7 +9,9 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .arch armv7-a
/*
* Debugging stuff
*
@@ -43,7 +45,7 @@
#else
-#include <mach/debug-macro.S>
+#include CONFIG_DEBUG_LL_INCLUDE
.macro writeb, ch, rb
senduart \ch, \rb
@@ -58,11 +60,6 @@
add \rb, \rb, #0x00010000 @ Ser1
#endif
.endm
-#elif defined(CONFIG_ARCH_S3C2410)
- .macro loadsp, rb, tmp
- mov \rb, #0x50000000
- add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
- .endm
#else
.macro loadsp, rb, tmp
addruart \rb, \tmp
@@ -132,10 +129,15 @@ start:
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
THUMB( .thumb )
-1: mov r7, r1 @ save architecture ID
+1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
+ mrs r9, cpsr
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install @ get into SVC mode, reversibly
+#endif
+ mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
-#ifndef __ARM_ARCH_2__
/*
* Booting from Angel - need to enter SVC mode and disable
* FIQs/IRQs (numeric definitions from angel arm.h source).
@@ -148,13 +150,9 @@ start:
ARM( swi 0x123456 ) @ angel_SWI_ARM
THUMB( svc 0xab ) @ angel_SWI_THUMB
not_angel:
- mrs r2, cpsr @ turn off interrupts to
- orr r2, r2, #0xc0 @ prevent angel from running
- msr cpsr_c, r2
-#else
- teqp pc, #0x0c000003 @ turn off interrupts
-#endif
-
+ safe_svcmode_maskall r0
+ msr spsr_cxsf, r9 @ Save the CPU boot mode in
+ @ SPSR
/*
* Note that some cache flushing and other stuff may
* be needed here - is there an Angel SWI call for this?
@@ -176,19 +174,44 @@ not_angel:
ldr r4, =zreladdr
#endif
- bl cache_on
+ /*
+ * Set up a page table only if it won't overwrite ourself.
+ * That means r4 < pc && r4 - 16k page directory > &_end.
+ * Given that r4 > &_end is most unfrequent, we add a rough
+ * additional 1MB of room for a possible appended DTB.
+ */
+ mov r0, pc
+ cmp r0, r4
+ ldrcc r0, LC0+32
+ addcc r0, r0, pc
+ cmpcc r4, r0
+ orrcc r4, r4, #1 @ remember we skipped cache_on
+ blcs cache_on
restart: adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
- ldr sp, [r0, #32]
+ ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
+ ldr sp, [r0, #28]
/*
* We might be running at a different address. We need
* to fix up various pointers.
*/
sub r0, r0, r1 @ calculate the delta offset
- add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
+ add r10, r10, r0 @ inflated kernel size location
+
+ /*
+ * The kernel build system appends the size of the
+ * decompressed kernel at the end of the compressed data
+ * in little-endian form.
+ */
+ ldrb r9, [r10, #0]
+ ldrb lr, [r10, #1]
+ orr r9, r9, lr, lsl #8
+ ldrb lr, [r10, #2]
+ ldrb r10, [r10, #3]
+ orr r9, r9, lr, lsl #16
+ orr r9, r9, r10, lsl #24
#ifndef CONFIG_ZBOOT_ROM
/* malloc space is above the relocated stack (64k max) */
@@ -203,34 +226,156 @@ restart: adr r0, LC0
mov r10, r6
#endif
+ mov r5, #0 @ init dtb size to 0
+#ifdef CONFIG_ARM_APPENDED_DTB
+/*
+ * r0 = delta
+ * r2 = BSS start
+ * r3 = BSS end
+ * r4 = final kernel address (possibly with LSB set)
+ * r5 = appended dtb size (still unknown)
+ * r6 = _edata
+ * r7 = architecture ID
+ * r8 = atags/device tree pointer
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * r11 = GOT start
+ * r12 = GOT end
+ * sp = stack pointer
+ *
+ * if there are device trees (dtb) appended to zImage, advance r10 so that the
+ * dtb data will get relocated along with the kernel if necessary.
+ */
+
+ ldr lr, [r6, #0]
+#ifndef __ARMEB__
+ ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
+#else
+ ldr r1, =0xd00dfeed
+#endif
+ cmp lr, r1
+ bne dtb_check_done @ not found
+
+#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
+ /*
+ * OK... Let's do some funky business here.
+ * If we do have a DTB appended to zImage, and we do have
+ * an ATAG list around, we want the later to be translated
+ * and folded into the former here. To be on the safe side,
+ * let's temporarily move the stack away into the malloc
+ * area. No GOT fixup has occurred yet, but none of the
+ * code we're about to call uses any global variable.
+ */
+ add sp, sp, #0x10000
+ stmfd sp!, {r0-r3, ip, lr}
+ mov r0, r8
+ mov r1, r6
+ sub r2, sp, r6
+ bl atags_to_fdt
+
+ /*
+ * If returned value is 1, there is no ATAG at the location
+ * pointed by r8. Try the typical 0x100 offset from start
+ * of RAM and hope for the best.
+ */
+ cmp r0, #1
+ sub r0, r4, #TEXT_OFFSET
+ bic r0, r0, #1
+ add r0, r0, #0x100
+ mov r1, r6
+ sub r2, sp, r6
+ bleq atags_to_fdt
+
+ ldmfd sp!, {r0-r3, ip, lr}
+ sub sp, sp, #0x10000
+#endif
+
+ mov r8, r6 @ use the appended device tree
+
+ /*
+ * Make sure that the DTB doesn't end up in the final
+ * kernel's .bss area. To do so, we adjust the decompressed
+ * kernel size to compensate if that .bss size is larger
+ * than the relocated code.
+ */
+ ldr r5, =_kernel_bss_size
+ adr r1, wont_overwrite
+ sub r1, r6, r1
+ subs r1, r5, r1
+ addhi r9, r9, r1
+
+ /* Get the dtb's size */
+ ldr r5, [r6, #4]
+#ifndef __ARMEB__
+ /* convert r5 (dtb size) to little endian */
+ eor r1, r5, r5, ror #16
+ bic r1, r1, #0x00ff0000
+ mov r5, r5, ror #8
+ eor r5, r5, r1, lsr #8
+#endif
+
+ /* preserve 64-bit alignment */
+ add r5, r5, #7
+ bic r5, r5, #7
+
+ /* relocate some pointers past the appended dtb */
+ add r6, r6, r5
+ add r10, r10, r5
+ add sp, sp, r5
+dtb_check_done:
+#endif
+
/*
* Check to see if we will overwrite ourselves.
- * r4 = final kernel address
- * r5 = start of this image
+ * r4 = final kernel address (possibly with LSB set)
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
- * r4 >= r10 -> OK
- * r4 + image length <= r5 -> OK
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= address of wont_overwrite -> OK
+ * Note: the possible LSB in r4 is harmless here.
*/
+ add r10, r10, #16384
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
- cmp r10, r5
+ adr r9, wont_overwrite
+ cmp r10, r9
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
- * r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
- /* Round up to next 256-byte boundary. */
- add r10, r10, #256
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
bic r10, r10, #255
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
+/* Relocate the hyp vector base if necessary */
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE
+ bne 1f
+
+ bl __hyp_get_vectors
+ sub r0, r0, r5
+ add r0, r0, r10
+ bl __hyp_set_vectors
+1:
+#endif
+
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
@@ -245,7 +390,13 @@ restart: adr r0, LC0
/* Preserve offset to relocated code. */
sub r6, r9, r6
- bl cache_clean_flush
+#ifndef CONFIG_ZBOOT_ROM
+ /* cache_clean_flush may use the stack, so relocate it */
+ add sp, sp, r6
+#endif
+
+ tst r4, #1
+ bleq cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
@@ -257,15 +408,17 @@ wont_overwrite:
* r0 = delta
* r2 = BSS start
* r3 = BSS end
- * r4 = kernel execution address
+ * r4 = kernel execution address (possibly with LSB set)
+ * r5 = appended dtb size (0 if not present)
* r7 = architecture ID
* r8 = atags pointer
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*/
- teq r0, #0
+ orrs r1, r0, r5
beq not_relocated
+
add r11, r11, r0
add r12, r12, r0
@@ -280,12 +433,21 @@ wont_overwrite:
/*
* Relocate all entries in the GOT table.
+ * Bump bss entries to _edata + dtb size
*/
1: ldr r1, [r11, #0] @ relocate entries in the GOT
- add r1, r1, r0 @ table. This fixes up the
- str r1, [r11], #4 @ C references.
+ add r1, r1, r0 @ This fixes up C references
+ cmp r1, r2 @ if entry >= bss_start &&
+ cmphs r3, r1 @ bss_end > entry
+ addhi r1, r1, r5 @ entry += dtb size
+ str r1, [r11], #4 @ next entry
cmp r11, r12
blo 1b
+
+ /* bump our bss pointers too */
+ add r2, r2, r5
+ add r3, r3, r5
+
#else
/*
@@ -309,6 +471,15 @@ not_relocated: mov r0, #0
cmp r2, r3
blo 1b
+ /*
+ * Did we skip the cache setup earlier?
+ * That is indicated by the LSB in r4.
+ * Do it now if so.
+ */
+ tst r4, #1
+ bic r4, r4, #1
+ blne cache_on
+
/*
* The C runtime environment should now be setup sufficiently.
* Set up some pointers, and start decompressing.
@@ -323,22 +494,41 @@ not_relocated: mov r0, #0
bl decompress_kernel
bl cache_clean_flush
bl cache_off
- mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
- mov pc, r4 @ call kernel
+
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr @ Get saved CPU boot mode
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE @ if not booted in HYP mode...
+ bne __enter_kernel @ boot kernel directly
+
+ adr r12, .L__hyp_reentry_vectors_offset
+ ldr r0, [r12]
+ add r0, r0, r12
+
+ bl __hyp_set_vectors
+ __HVC(0) @ otherwise bounce to hyp mode
+
+ b . @ should never be reached
+
+ .align 2
+.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
+#else
+ b __enter_kernel
+#endif
.align 2
.type LC0, #object
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
- .word _start @ r5
.word _edata @ r6
- .word _image_size @ r9
+ .word input_data_end - 4 @ r10 (inflated size location)
.word _got_start @ r11
.word _got_end @ ip
- .word user_stack_end @ sp
+ .word .L_user_stack_end @ sp
+ .word _end - restart + 16384 + 1024*1024
.size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC
@@ -432,6 +622,12 @@ __armv3_mpu_cache_on:
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define CB_BITS 0x08
+#else
+#define CB_BITS 0x0c
+#endif
+
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@@ -443,13 +639,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM
add r10, r9, #0x10000000 @ a reasonable RAM size
- mov r1, #0x12
- orr r1, r1, #3 << 10
+ mov r1, #0x12 @ XN|U + section mapping
+ orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384
1: cmp r1, r9 @ if virt > start of RAM
- orrhs r1, r1, #0x0c @ set cacheable, bufferable
- cmp r1, r10 @ if virt > end of RAM
- bichs r1, r1, #0x0c @ clear cacheable, bufferable
+ cmphs r10, r1 @ && end of RAM > virt
+ bic r1, r1, #0x1c @ clear XN|U + C + B
+ orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
+ orrhs r1, r1, r6 @ set RAM section settings
str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576
teq r0, r2
@@ -460,7 +657,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* so there is no map overlap problem for up to 1 MB compressed kernel.
* If the execution is in RAM then we would only be duplicating the above.
*/
- mov r1, #0x1e
+ orr r1, r6, #0x04 @ ensure B is set for this
orr r1, r1, #3 << 10
mov r2, pc
mov r2, r2, lsr #20
@@ -472,9 +669,25 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov pc, lr
ENDPROC(__setup_mmu)
+@ Enable unaligned access on v6, to allow better code generation
+@ for the decompressor C code:
+__armv6_mmu_cache_on:
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
+ b __armv4_mmu_cache_on
+
+__arm926ejs_mmu_cache_on:
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mov r0, #4 @ put dcache in WT mode
+ mcr p15, 7, r0, c15, c0, 0
+#endif
+
__armv4_mmu_cache_on:
mov r12, lr
#ifdef CONFIG_MMU
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -482,9 +695,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -496,6 +707,7 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA
+ movne r6, #CB_BITS | 0x02 @ !XN
blne __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -503,17 +715,24 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ @ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
- movne r1, #-1
+ movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
@@ -522,6 +741,7 @@ __armv7_mmu_cache_on:
__fa526_cache_on:
mov r12, lr
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
@@ -534,18 +754,6 @@ __fa526_cache_on:
mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
mov pc, r12
-__arm6_mmu_cache_on:
- mov r12, lr
- bl __setup_mmu
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov r0, #0x30
- bl __common_mmu_cache_on
- mov r0, #0
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov pc, r12
-
__common_mmu_cache_on:
#ifndef CONFIG_THUMB2_KERNEL
#ifndef DEBUG
@@ -561,6 +769,8 @@ __common_mmu_cache_on:
sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif
+#define PROC_ENTRY_SIZE (4*5)
+
/*
* Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an
@@ -588,7 +798,7 @@ call_cache_fn: adr r12, proc_types
ARM( addeq pc, r12, r3 ) @ call cache function
THUMB( addeq r12, r3 )
THUMB( moveq pc, r12 ) @ call cache function
- add r12, r12, #4*5
+ add r12, r12, #PROC_ENTRY_SIZE
b 1b
/*
@@ -608,18 +818,8 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
- .word 0x41560600 @ ARM6/610
- .word 0xffffffe0
- W(b) __arm6_mmu_cache_off @ works, but slow
- W(b) __arm6_mmu_cache_off
- mov pc, lr
- THUMB( nop )
-@ b __arm6_mmu_cache_on @ untested
-@ b __arm6_mmu_cache_off
-@ b __armv3_mmu_cache_flush
-
- .word 0x00000000 @ old ARM ID
- .word 0x0000f000
+ .word 0x41000000 @ old ARM ID
+ .word 0xff00f000
mov pc, lr
THUMB( nop )
mov pc, lr
@@ -629,8 +829,10 @@ proc_types:
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
- W(b) __arm7_mmu_cache_off
- W(b) __arm7_mmu_cache_off
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
mov pc, lr
THUMB( nop )
@@ -653,6 +855,12 @@ proc_types:
W(b) __armv4_mpu_cache_off
W(b) __armv4_mpu_cache_flush
+ .word 0x41069260 @ ARM926EJ-S (v5TEJ)
+ .word 0xff0ffff0
+ W(b) __arm926ejs_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
@@ -731,13 +939,7 @@ proc_types:
.word 0x0007b000 @ ARMv6
.word 0x000ff000
- W(b) __armv4_mmu_cache_on
- W(b) __armv4_mmu_cache_off
- W(b) __armv6_mmu_cache_flush
-
- .word 0x560f5810 @ Marvell PJ4 ARMv6
- .word 0xff0ffff0
- W(b) __armv4_mmu_cache_on
+ W(b) __armv6_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv6_mmu_cache_flush
@@ -758,6 +960,16 @@ proc_types:
.size proc_types, . - proc_types
+ /*
+ * If you get a "non-constant expression in ".if" statement"
+ * error from the assembler on this line, check that you have
+ * not accidentally written a "b" instruction where you should
+ * have written W(b).
+ */
+ .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+ .error "The size of one or more proc_types entries is wrong."
+ .endif
+
/*
* Turn off the Cache and MMU. ARMv3 does not support
* reading the control register, but ARMv4 does.
@@ -819,21 +1031,6 @@ __armv7_mmu_cache_off:
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12
-__arm6_mmu_cache_off:
- mov r0, #0x00000030 @ ARM6 control reg.
- b __armv3_mmu_cache_off
-
-__arm7_mmu_cache_off:
- mov r0, #0x00000070 @ ARM7 control reg.
- b __armv3_mmu_cache_off
-
-__armv3_mmu_cache_off:
- mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
- mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
- mov pc, lr
-
/*
* Clean and flush the cache to maintain consistency.
*
@@ -1063,7 +1260,27 @@ memdump: mov r12, r0
.ltorg
+#ifdef CONFIG_ARM_VIRT_EXT
+.align 5
+__hyp_reentry_vectors:
+ W(b) . @ reset
+ W(b) . @ undef
+ W(b) . @ svc
+ W(b) . @ pabort
+ W(b) . @ dabort
+ W(b) __enter_kernel @ hyp
+ W(b) . @ irq
+ W(b) . @ fiq
+#endif /* CONFIG_ARM_VIRT_EXT */
+
+__enter_kernel:
+ mov r0, #0 @ must be 0
+ ARM( mov pc, r4 ) @ call kernel
+ THUMB( bx r4 ) @ entry point is always ARM
+
+reloc_code_end:
+
.align
.section ".stack", "aw", %nobits
-user_stack: .space 4096
-user_stack_end:
+.L_user_stack: .space 4096
+.L_user_stack_end: