diff options
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
| -rw-r--r-- | arch/powerpc/kernel/misc_32.S | 705 |
1 files changed, 482 insertions, 223 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 9d2c56621f1..7c6bb4b17b4 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -8,6 +8,8 @@ * kexec bits: * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz + * PPC44x port. Copyright (C) 2011, IBM Corporation + * Author: Suzuki Poulose <suzuki@in.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -29,10 +31,53 @@ #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/kexec.h> +#include <asm/bug.h> +#include <asm/ptrace.h> .text /* + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ +_GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) + lwz r10,THREAD+KSP_LIMIT(r2) + addi r11,r3,THREAD_INFO_GAP + stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) + mr r1,r3 + stw r10,8(r1) + stw r11,THREAD+KSP_LIMIT(r2) + bl __do_softirq + lwz r10,8(r1) + lwz r1,0(r1) + lwz r0,4(r1) + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr + +/* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + */ +_GLOBAL(call_do_irq) + mflr r0 + stw r0,4(r1) + lwz r10,THREAD+KSP_LIMIT(r2) + addi r11,r4,THREAD_INFO_GAP + stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) + mr r1,r4 + stw r10,8(r1) + stw r11,THREAD+KSP_LIMIT(r2) + bl __do_irq + lwz r10,8(r1) + lwz r1,0(r1) + lwz r0,4(r1) + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr + +/* * This returns the high 64 bits of the product of two 64-bit numbers. */ _GLOBAL(mulhdu) @@ -152,7 +197,7 @@ _GLOBAL(low_choose_750fx_pll) mtspr SPRN_HID1,r4 /* Store new HID1 image */ - rlwinm r6,r1,0,0,18 + CURRENT_THREAD_INFO(r6, r1) lwz r6,TI_CPU(r6) slwi r6,r6,2 addis r6,r6,nap_save_hid1@ha @@ -246,170 +291,6 @@ _GLOBAL(real_writeb) #endif /* CONFIG_40x */ -/* - * Flush MMU TLB - */ -_GLOBAL(_tlbia) -#if defined(CONFIG_40x) - sync /* Flush to memory before changing mapping */ - tlbia - isync /* Flush shadow TLB */ -#elif defined(CONFIG_44x) - li r3,0 - sync - - /* Load high watermark */ - lis r4,tlb_44x_hwater@ha - lwz r5,tlb_44x_hwater@l(r4) - -1: tlbwe r3,r3,PPC44x_TLB_PAGEID - addi r3,r3,1 - cmpw 0,r3,r5 - ble 1b - - isync -#elif defined(CONFIG_FSL_BOOKE) - /* Invalidate all entries in TLB0 */ - li r3, 0x04 - tlbivax 0,3 - /* Invalidate all entries in TLB1 */ - li r3, 0x0c - tlbivax 0,3 - msync -#ifdef CONFIG_SMP - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,18 - lwz r8,TI_CPU(r8) - oris r8,r8,10 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - sync - tlbia - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - sync - tlbia - sync -#endif /* CONFIG_SMP */ -#endif /* ! defined(CONFIG_40x) */ - blr - -/* - * Flush MMU TLB for a particular address - */ -_GLOBAL(_tlbie) -#if defined(CONFIG_40x) - /* We run the search with interrupts disabled because we have to change - * the PID and I don't want to preempt when that happens. - */ - mfmsr r5 - mfspr r6,SPRN_PID - wrteei 0 - mtspr SPRN_PID,r4 - tlbsx. r3, 0, r3 - mtspr SPRN_PID,r6 - wrtee r5 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. - * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate - * the TLB entry. */ - tlbwe r3, r3, TLB_TAG - isync -10: - -#elif defined(CONFIG_44x) - mfspr r5,SPRN_MMUCR - rlwimi r5,r4,0,24,31 /* Set TID */ - - /* We have to run the search with interrupts disabled, even critical - * and debug interrupts (in fact the only critical exceptions we have - * are debug and machine check). Otherwise an interrupt which causes - * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ - mfmsr r4 - lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha - addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l - andc r6,r4,r6 - mtmsr r6 - mtspr SPRN_MMUCR,r5 - tlbsx. r3, 0, r3 - mtmsr r4 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, - * which means bit 22, is clear. Since 22 is - * the V bit in the TLB_PAGEID, loading this - * value will invalidate the TLB entry. - */ - tlbwe r3, r3, PPC44x_TLB_PAGEID - isync -10: -#elif defined(CONFIG_FSL_BOOKE) - rlwinm r4, r3, 0, 0, 19 - ori r5, r4, 0x08 /* TLBSEL = 1 */ - tlbivax 0, r4 - tlbivax 0, r5 - msync -#if defined(CONFIG_SMP) - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,18 - lwz r8,TI_CPU(r8) - oris r8,r8,11 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - eieio - tlbie r3 - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - tlbie r3 - sync -#endif /* CONFIG_SMP */ -#endif /* ! CONFIG_40x */ - blr /* * Flush instruction cache. @@ -464,8 +345,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) * * flush_icache_range(unsigned long start, unsigned long stop) */ -_GLOBAL(__flush_icache_range) +_KPROBE(flush_icache_range) BEGIN_FTR_SECTION + PURGE_PREFETCHED_INS blr /* for 601, do nothing */ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) li r5,L1_CACHE_BYTES-1 @@ -480,10 +362,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ +#ifndef CONFIG_44x mtctr r4 2: icbi 0,r6 addi r6,r6,L1_CACHE_BYTES bdnz 2b +#else + /* Flash invalidate on 44x because we are passed kmapped addresses and + this doesn't work for userspace pages due to the virtually tagged + icache. Sigh. */ + iccci 0, r0 +#endif sync /* additional sync needed on g4 */ isync blr @@ -562,17 +451,18 @@ _GLOBAL(invalidate_dcache_range) */ _GLOBAL(__flush_dcache_icache) BEGIN_FTR_SECTION + PURGE_PREFETCHED_INS blr END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ addi r3,r3,L1_CACHE_BYTES bdnz 0b sync -#ifndef CONFIG_44x +#ifdef CONFIG_44x /* We don't flush the icache on 44x. Those have a virtual icache * and we don't have access to the virtual address here (it's * not the page vaddr but where it's mapped in user space). The @@ -580,15 +470,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * a change in the address space occurs, before returning to * user space */ +BEGIN_MMU_FTR_SECTION + blr +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) +#endif /* CONFIG_44x */ mtctr r4 1: icbi 0,r6 addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync -#endif /* CONFIG_44x */ blr +#ifndef CONFIG_BOOKE /* * Flush a particular page from the data cache to RAM, identified * by its physical address. We turn off the MMU so we can just use @@ -599,14 +493,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) */ _GLOBAL(__flush_dcache_icache_phys) BEGIN_FTR_SECTION + PURGE_PREFETCHED_INS blr /* for 601, do nothing */ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) mfmsr r10 rlwinm r0,r10,0,28,26 /* clear DR */ mtmsr r0 isync - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ + rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ + li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ @@ -621,6 +516,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) mtmsr r10 /* restore DR */ isync blr +#endif /* CONFIG_BOOKE */ /* * Clear pages using the dcbz instruction, which doesn't cause any @@ -630,18 +526,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * void clear_pages(void *page, int order) ; */ _GLOBAL(clear_pages) - li r0,4096/L1_CACHE_BYTES + li r0,PAGE_SIZE/L1_CACHE_BYTES slw r0,r0,r4 mtctr r0 -#ifdef CONFIG_8xx - li r4, 0 -1: stw r4, 0(r3) - stw r4, 4(r3) - stw r4, 8(r3) - stw r4, 12(r3) -#else 1: dcbz 0,r3 -#endif addi r3,r3,L1_CACHE_BYTES bdnz 1b blr @@ -666,15 +554,6 @@ _GLOBAL(copy_page) addi r3,r3,-4 addi r4,r4,-4 -#ifdef CONFIG_8xx - /* don't use prefetch on 8xx */ - li r0,4096/L1_CACHE_BYTES - mtctr r0 -1: COPY_16_BYTES - bdnz 1b - blr - -#else /* not 8xx, we can prefetch */ li r5,4 #if MAX_COPY_PREFETCH > 1 @@ -688,7 +567,7 @@ _GLOBAL(copy_page) dcbt r5,r4 li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH + li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 @@ -715,7 +594,6 @@ _GLOBAL(copy_page) li r0,MAX_COPY_PREFETCH li r11,4 b 2b -#endif /* CONFIG_8xx */ /* * void atomic_clear_mask(atomic_t mask, atomic_t *addr) @@ -786,6 +664,20 @@ _GLOBAL(__lshrdi3) blr /* + * 64-bit comparison: __cmpdi2(s64 a, s64 b) + * Returns 0 if a < b, 1 if a == b, 2 if a > b. + */ +_GLOBAL(__cmpdi2) + cmpw r3,r5 + li r3,1 + bne 1f + cmplw r4,r6 + beqlr +1: li r3,0 + bltlr + li r3,2 + blr +/* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. */ @@ -800,43 +692,34 @@ _GLOBAL(__ucmpdi2) li r3,2 blr +_GLOBAL(__bswapdi2) + rotlwi r9,r4,8 + rotlwi r10,r3,8 + rlwimi r9,r4,24,0,7 + rlwimi r10,r3,24,0,7 + rlwimi r9,r4,24,16,23 + rlwimi r10,r3,24,16,23 + mr r3,r9 + mr r4,r10 + blr + _GLOBAL(abs) srawi r4,r3,31 xor r3,r3,r4 sub r3,r3,r4 blr -/* - * Create a kernel thread - * kernel_thread(fn, arg, flags) - */ -_GLOBAL(kernel_thread) - stwu r1,-16(r1) - stw r30,8(r1) - stw r31,12(r1) - mr r30,r3 /* function */ - mr r31,r4 /* argument */ - ori r3,r5,CLONE_VM /* flags */ - oris r3,r3,CLONE_UNTRACED>>16 - li r4,0 /* new sp (unused) */ - li r0,__NR_clone - sc - cmpwi 0,r3,0 /* parent or child? */ - bne 1f /* return if parent */ - li r0,0 /* make top-level stack frame */ - stwu r0,-16(r1) - mtlr r30 /* fn addr in lr */ - mr r3,r31 /* load arg and call fn */ - PPC440EP_ERR42 - blrl - li r0,__NR_exit /* exit if function returns */ +#ifdef CONFIG_SMP +_GLOBAL(start_secondary_resume) + /* Reset stack */ + CURRENT_THREAD_INFO(r1, r1) + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r3,0 - sc -1: lwz r30,8(r1) - lwz r31,12(r1) - addi r1,r1,16 - blr - + stw r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . +#endif /* CONFIG_SMP */ + /* * This routine is just here to keep GCC happy - sigh... */ @@ -853,6 +736,378 @@ relocate_new_kernel: /* r4 = reboot_code_buffer */ /* r5 = start_address */ +#ifdef CONFIG_FSL_BOOKE + + mr r29, r3 + mr r30, r4 + mr r31, r5 + +#define ENTRY_MAPPING_KEXEC_SETUP +#include "fsl_booke_entry_mapping.S" +#undef ENTRY_MAPPING_KEXEC_SETUP + + mr r3, r29 + mr r4, r30 + mr r5, r31 + + li r0, 0 +#elif defined(CONFIG_44x) + + /* Save our parameters */ + mr r29, r3 + mr r30, r4 + mr r31, r5 + +#ifdef CONFIG_PPC_47x + /* Check for 47x cores */ + mfspr r3,SPRN_PVR + srwi r3,r3,16 + cmplwi cr0,r3,PVR_476@h + beq setup_map_47x + cmplwi cr0,r3,PVR_476_ISS@h + beq setup_map_47x +#endif /* CONFIG_PPC_47x */ + +/* + * Code for setting up 1:1 mapping for PPC440x for KEXEC + * + * We cannot switch off the MMU on PPC44x. + * So we: + * 1) Invalidate all the mappings except the one we are running from. + * 2) Create a tmp mapping for our code in the other address space(TS) and + * jump to it. Invalidate the entry we started in. + * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. + * 4) Jump to the 1:1 mapping in original TS. + * 5) Invalidate the tmp mapping. + * + * - Based on the kexec support code for FSL BookE + * + */ + + /* + * Load the PID with kernel PID (0). + * Also load our MSR_IS and TID to MMUCR for TLB search. + */ + li r3, 0 + mtspr SPRN_PID, r3 + mfmsr r4 + andi. r4,r4,MSR_IS@l + beq wmmucr + oris r3,r3,PPC44x_MMUCR_STS@h +wmmucr: + mtspr SPRN_MMUCR,r3 + sync + + /* + * Invalidate all the TLB entries except the current entry + * where we are running from + */ + bl 0f /* Find our address */ +0: mflr r5 /* Make it accessible */ + tlbsx r23,0,r5 /* Find entry we are in */ + li r4,0 /* Start at TLB entry 0 */ + li r3,0 /* Set PAGEID inval value */ +1: cmpw r23,r4 /* Is this our entry? */ + beq skip /* If so, skip the inval */ + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ +skip: + addi r4,r4,1 /* Increment */ + cmpwi r4,64 /* Are we done? */ + bne 1b /* If not, repeat */ + isync + + /* Create a temp mapping and jump to it */ + andi. r6, r23, 1 /* Find the index to use */ + addi r24, r6, 1 /* r24 will contain 1 or 2 */ + + mfmsr r9 /* get the MSR */ + rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ + xori r7, r5, 1 /* Use the other address space */ + + /* Read the current mapping entries */ + tlbre r3, r23, PPC44x_TLB_PAGEID + tlbre r4, r23, PPC44x_TLB_XLAT + tlbre r5, r23, PPC44x_TLB_ATTRIB + + /* Save our current XLAT entry */ + mr r25, r4 + + /* Extract the TLB PageSize */ + li r10, 1 /* r10 will hold PageSize */ + rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ + + /* XXX: As of now we use 256M, 4K pages */ + cmpwi r11, PPC44x_TLB_256M + bne tlb_4k + rotlwi r10, r10, 28 /* r10 = 256M */ + b write_out +tlb_4k: + cmpwi r11, PPC44x_TLB_4K + bne default + rotlwi r10, r10, 12 /* r10 = 4K */ + b write_out +default: + rotlwi r10, r10, 10 /* r10 = 1K */ + +write_out: + /* + * Write out the tmp 1:1 mapping for this code in other address space + * Fixup EPN = RPN , TS=other address space + */ + insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ + + /* Write out the tmp mapping entries */ + tlbwe r3, r24, PPC44x_TLB_PAGEID + tlbwe r4, r24, PPC44x_TLB_XLAT + tlbwe r5, r24, PPC44x_TLB_ATTRIB + + subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ + not r10, r11 /* Mask for PageNum */ + + /* Switch to other address space in MSR */ + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ + + bl 1f +1: mflr r8 + addi r8, r8, (2f-1b) /* Find the target offset */ + + /* Jump to the tmp mapping */ + mtspr SPRN_SRR0, r8 + mtspr SPRN_SRR1, r9 + rfi + +2: + /* Invalidate the entry we were executing from */ + li r3, 0 + tlbwe r3, r23, PPC44x_TLB_PAGEID + + /* attribute fields. rwx for SUPERVISOR mode */ + li r5, 0 + ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) + + /* Create 1:1 mapping in 256M pages */ + xori r7, r7, 1 /* Revert back to Original TS */ + + li r8, 0 /* PageNumber */ + li r6, 3 /* TLB Index, start at 3 */ + +next_tlb: + rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ + mr r4, r3 /* RPN = EPN */ + ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ + insrwi r3, r7, 1, 23 /* Set TS from r7 */ + + tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ + tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ + tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ + + addi r8, r8, 1 /* Increment PN */ + addi r6, r6, 1 /* Increment TLB Index */ + cmpwi r8, 8 /* Are we done ? */ + bne next_tlb + isync + + /* Jump to the new mapping 1:1 */ + li r9,0 + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ + + bl 1f +1: mflr r8 + and r8, r8, r11 /* Get our offset within page */ + addi r8, r8, (2f-1b) + + and r5, r25, r10 /* Get our target PageNum */ + or r8, r8, r5 /* Target jump address */ + + mtspr SPRN_SRR0, r8 + mtspr SPRN_SRR1, r9 + rfi +2: + /* Invalidate the tmp entry we used */ + li r3, 0 + tlbwe r3, r24, PPC44x_TLB_PAGEID + sync + b ppc44x_map_done + +#ifdef CONFIG_PPC_47x + + /* 1:1 mapping for 47x */ + +setup_map_47x: + + /* + * Load the kernel pid (0) to PID and also to MMUCR[TID]. + * Also set the MSR IS->MMUCR STS + */ + li r3, 0 + mtspr SPRN_PID, r3 /* Set PID */ + mfmsr r4 /* Get MSR */ + andi. r4, r4, MSR_IS@l /* TS=1? */ + beq 1f /* If not, leave STS=0 */ + oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ +1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ + sync + + /* Find the entry we are running from */ + bl 2f +2: mflr r23 + tlbsx r23, 0, r23 + tlbre r24, r23, 0 /* TLB Word 0 */ + tlbre r25, r23, 1 /* TLB Word 1 */ + tlbre r26, r23, 2 /* TLB Word 2 */ + + + /* + * Invalidates all the tlb entries by writing to 256 RPNs(r4) + * of 4k page size in all 4 ways (0-3 in r3). + * This would invalidate the entire UTLB including the one we are + * running from. However the shadow TLB entries would help us + * to continue the execution, until we flush them (rfi/isync). + */ + addis r3, 0, 0x8000 /* specify the way */ + addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ + addi r5, 0, 0 + b clear_utlb_entry + + /* Align the loop to speed things up. from head_44x.S */ + .align 6 + +clear_utlb_entry: + + tlbwe r4, r3, 0 + tlbwe r5, r3, 1 + tlbwe r5, r3, 2 + addis r3, r3, 0x2000 /* Increment the way */ + cmpwi r3, 0 + bne clear_utlb_entry + addis r3, 0, 0x8000 + addis r4, r4, 0x100 /* Increment the EPN */ + cmpwi r4, 0 + bne clear_utlb_entry + + /* Create the entries in the other address space */ + mfmsr r5 + rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ + xori r7, r7, 1 /* r7 = !TS */ + + insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ + + /* + * write out the TLB entries for the tmp mapping + * Use way '0' so that we could easily invalidate it later. + */ + lis r3, 0x8000 /* Way '0' */ + + tlbwe r24, r3, 0 + tlbwe r25, r3, 1 + tlbwe r26, r3, 2 + + /* Update the msr to the new TS */ + insrwi r5, r7, 1, 26 + + bl 1f +1: mflr r6 + addi r6, r6, (2f-1b) + + mtspr SPRN_SRR0, r6 + mtspr SPRN_SRR1, r5 + rfi + + /* + * Now we are in the tmp address space. + * Create a 1:1 mapping for 0-2GiB in the original TS. + */ +2: + li r3, 0 + li r4, 0 /* TLB Word 0 */ + li r5, 0 /* TLB Word 1 */ + li r6, 0 + ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ + + li r8, 0 /* PageIndex */ + + xori r7, r7, 1 /* revert back to original TS */ + +write_utlb: + rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ + /* ERPN = 0 as we don't use memory above 2G */ + + mr r4, r5 /* EPN = RPN */ + ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) + insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ + + tlbwe r4, r3, 0 /* Write out the entries */ + tlbwe r5, r3, 1 + tlbwe r6, r3, 2 + addi r8, r8, 1 + cmpwi r8, 8 /* Have we completed ? */ + bne write_utlb + + /* make sure we complete the TLB write up */ + isync + + /* + * Prepare to jump to the 1:1 mapping. + * 1) Extract page size of the tmp mapping + * DSIZ = TLB_Word0[22:27] + * 2) Calculate the physical address of the address + * to jump to. + */ + rlwinm r10, r24, 0, 22, 27 + + cmpwi r10, PPC47x_TLB0_4K + bne 0f + li r10, 0x1000 /* r10 = 4k */ + bl 1f + +0: + /* Defaults to 256M */ + lis r10, 0x1000 + + bl 1f +1: mflr r4 + addi r4, r4, (2f-1b) /* virtual address of 2f */ + + subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ + not r10, r11 /* Pagemask = ~(offsetmask) */ + + and r5, r25, r10 /* Physical page */ + and r6, r4, r11 /* offset within the current page */ + + or r5, r5, r6 /* Physical address for 2f */ + + /* Switch the TS in MSR to the original one */ + mfmsr r8 + insrwi r8, r7, 1, 26 + + mtspr SPRN_SRR1, r8 + mtspr SPRN_SRR0, r5 + rfi + +2: + /* Invalidate the tmp mapping */ + lis r3, 0x8000 /* Way '0' */ + + clrrwi r24, r24, 12 /* Clear the valid bit */ + tlbwe r24, r3, 0 + tlbwe r25, r3, 1 + tlbwe r26, r3, 2 + + /* Make sure we complete the TLB write and flush the shadow TLB */ + isync + +#endif + +ppc44x_map_done: + + + /* Restore the parameters */ + mr r3, r29 + mr r4, r30 + mr r5, r31 + + li r0, 0 +#else li r0, 0 /* @@ -869,12 +1124,13 @@ relocate_new_kernel: rfi 1: +#endif /* from this point address translation is turned off */ /* and interrupts are disabled */ /* set a new stack at the bottom of our page... */ /* (not really needed now) */ - addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */ + addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ stw r0, 0(r1) /* Do the copies */ @@ -937,6 +1193,9 @@ relocate_new_kernel: isync sync + mfspr r3, SPRN_PIR /* current core we are running on */ + mr r4, r5 /* load physical address of chunk called */ + /* jump to the entry point, usually the setup routine */ mtlr r5 blrl |
