aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/head_44x.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_44x.S')
-rw-r--r--arch/powerpc/kernel/head_44x.S209
1 files changed, 161 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b40a8..c334f53453f 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
#include <asm/synch.h>
#include "head_booke.h"
@@ -60,15 +61,37 @@ _ENTRY(_start);
* of abatron_pteptrs
*/
nop
+ mr r31,r3 /* save device tree ptr */
+ li r24,0 /* CPU number */
+
+#ifdef CONFIG_RELOCATABLE
/*
- * Save parameters we are passed
+ * Relocate ourselves to the current runtime address.
+ * This is called only by the Boot CPU.
+ * "relocate" is called with our current runtime virutal
+ * address.
+ * r21 will be loaded with the physical runtime address of _stext
*/
- mr r31,r3
- mr r30,r4
- mr r29,r5
- mr r28,r6
- mr r27,r7
- li r24,0 /* CPU number */
+ bl 0f /* Get our runtime address */
+0: mflr r21 /* Make it accessible */
+ addis r21,r21,(_stext - 0b)@ha
+ addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
+
+ /*
+ * We have the runtime (virutal) address of our base.
+ * We calculate our shift of offset from a 256M page.
+ * We could map the 256M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
+ rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virutal Address */
+
+ bl relocate
+#endif
bl init_cpu_state
@@ -92,14 +115,94 @@ _ENTRY(_start);
bl early_init
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries.
+ *
+ * r25 will contain RPN/ERPN for the start address of memory
+ * r21 will contain the current offset of _stext
+ */
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+
+ /*
+ * Compute the kernstart_addr.
+ * kernstart_addr => (r6,r8)
+ * kernstart_addr & ~0xfffffff => (r6,r7)
+ */
+ rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
+ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
+ rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
+ or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
+
+ /* Store kernstart_addr */
+ stw r6,0(r3) /* higher 32bit */
+ stw r8,4(r3) /* lower 32bit */
+
+ /*
+ * Compute the virt_phys_offset :
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
+ *
+ */
+
+ /* KERNELBASE&~0xfffffff => (r4,r5) */
+ li r4, 0 /* higer 32bit */
+ lis r5,KERNELBASE@h
+ rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
+
+ /*
+ * 64bit subtraction.
+ */
+ subfc r5,r7,r5
+ subfe r4,r6,r4
+
+ /* Store virt_phys_offset */
+ lis r3,virt_phys_offset@ha
+ la r3,virt_phys_offset@l(r3)
+
+ stw r4,0(r3)
+ stw r5,4(r3)
+
+#elif defined(CONFIG_DYNAMIC_MEMSTART)
+ /*
+ * Mapping based, page aligned dynamic kernel loading.
+ *
+ * r25 will contain RPN/ERPN for the start address of memory
+ *
+ * Add the difference between KERNELBASE and PAGE_OFFSET to the
+ * start of physical memory to get kernstart_addr.
+ */
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ lis r5,PAGE_OFFSET@h
+ ori r5,r5,PAGE_OFFSET@l
+ subf r4,r5,r4
+
+ rlwinm r6,r25,0,28,31 /* ERPN */
+ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
+ add r7,r7,r4
+
+ stw r6,0(r3)
+ stw r7,4(r3)
+#endif
+
/*
* Decide what sort of machine this is and initialize the MMU.
*/
- mr r3,r31
- mr r4,r30
- mr r5,r29
- mr r6,r28
- mr r7,r27
+ li r3,0
+ mr r4,r31
bl machine_init
bl MMU_init
@@ -145,10 +248,11 @@ _ENTRY(_start);
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+ machine_check_exception)
MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
/* Data Storage Interrupt */
@@ -158,7 +262,8 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
+ do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -170,29 +275,32 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
+ FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
- /* Auxillary Processor Unavailable Interrupt */
- EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ /* Auxiliary Processor Unavailable Interrupt */
+ EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
+ AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
+ unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
@@ -661,6 +769,8 @@ finish_tlb_load_47x:
*/
DEBUG_CRIT_EXCEPTION
+interrupt_end:
+
/*
* Global functions
*/
@@ -674,24 +784,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
-/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The 44x core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
- blr
-
-/*
- * extern void giveup_fpu(struct task_struct *prev)
- *
- * The 44x core does not have an FPU.
- */
-#ifndef CONFIG_PPC_FPU
-_GLOBAL(giveup_fpu)
- blr
-#endif
-
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
@@ -717,6 +809,8 @@ _GLOBAL(init_cpu_state)
/* We use the PVR to differenciate 44x cores from 476 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476FPE@h
+ beq head_start_47x
cmplwi cr0,r3,PVR_476@h
beq head_start_47x
cmplwi cr0,r3,PVR_476_ISS@h
@@ -785,12 +879,29 @@ skpinv: addi r4,r4,1 /* Increment */
/*
* Configure and load pinned entry into TLB slot 63.
*/
+#ifdef CONFIG_NONSTATIC_KERNEL
+ /*
+ * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
+ * entries of the initial mapping set by the boot loader.
+ * The XLAT entry is stored in r25
+ */
+
+ /* Read the XLAT entry for our current mapping */
+ tlbre r25,r23,PPC44x_TLB_XLAT
+
+ lis r3,KERNELBASE@h
+ ori r3,r3,KERNELBASE@l
+
+ /* Use our current RPN entry */
+ mr r4,r25
+#else
lis r3,PAGE_OFFSET@h
ori r3,r3,PAGE_OFFSET@l
/* Kernel is at the base of RAM */
li r4, 0 /* Load the kernel physical address */
+#endif
/* Load the kernel PID = 0 */
li r0,0
@@ -1000,9 +1111,6 @@ clear_utlb_entry:
lis r3,PAGE_OFFSET@h
ori r3,r3,PAGE_OFFSET@l
- /* Kernel is at the base of RAM */
- li r4, 0 /* Load the kernel physical address */
-
/* Load the kernel PID = 0 */
li r0,0
mtspr SPRN_PID,r0
@@ -1012,9 +1120,8 @@ clear_utlb_entry:
clrrwi r3,r3,12 /* Mask off the effective page number */
ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
- /* Word 1 */
- clrrwi r4,r4,12 /* Mask off the real page number */
- /* ERPN is 0 for first 4GB page */
+ /* Word 1 - use r25. RPN is the same as the original entry */
+
/* Word 2 */
li r5,0
ori r5,r5,PPC47x_TLB2_S_RWX
@@ -1025,7 +1132,7 @@ clear_utlb_entry:
/* We write to way 0 and bolted 0 */
lis r0,0x8800
tlbwe r3,r0,0
- tlbwe r4,r0,1
+ tlbwe r25,r0,1
tlbwe r5,r0,2
/*
@@ -1123,7 +1230,13 @@ head_start_common:
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
mtspr SPRN_IVPR,r4
- addis r22,r22,KERNELBASE@h
+ /*
+ * If the kernel was loaded at a non-zero 256 MB page, we need to
+ * mask off the most significant 4 bits to get the relative address
+ * from the start of physical memory
+ */
+ rlwinm r22,r22,0,4,31
+ addis r22,r22,PAGE_OFFSET@h
mtlr r22
isync
blr