diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc/kernel/head.S |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc/kernel/head.S')
-rw-r--r-- | arch/ppc/kernel/head.S | 1710 |
1 files changed, 1710 insertions, 0 deletions
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S new file mode 100644 index 00000000000..1a89a71e0ac --- /dev/null +++ b/arch/ppc/kernel/head.S @@ -0,0 +1,1710 @@ +/* + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP + * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> + * Adapted for Power Macintosh by Paul Mackerras. + * Low-level exception handlers and MMU support + * rewritten by Paul Mackerras. + * Copyright (C) 1996 Paul Mackerras. + * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * This file contains the low-level support and setup for the + * PowerPC platform, including trap and interrupt dispatch. + * (The PPC 8xx embedded CPUs use head_8xx.S instead.) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/offsets.h> + +#ifdef CONFIG_APUS +#include <asm/amigappc.h> +#endif + +#ifdef CONFIG_PPC64BRIDGE +#define LOAD_BAT(n, reg, RA, RB) \ + ld RA,(n*32)+0(reg); \ + ld RB,(n*32)+8(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB; \ + ld RA,(n*32)+16(reg); \ + ld RB,(n*32)+24(reg); \ + mtspr SPRN_DBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##L,RB; \ + +#else /* CONFIG_PPC64BRIDGE */ + +/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ +#define LOAD_BAT(n, reg, RA, RB) \ + /* see the comment for clear_bats() -- Cort */ \ + li RA,0; \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##U,RA; \ + lwz RA,(n*16)+0(reg); \ + lwz RB,(n*16)+4(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB; \ + beq 1f; \ + lwz RA,(n*16)+8(reg); \ + lwz RB,(n*16)+12(reg); \ + mtspr SPRN_DBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##L,RB; \ +1: +#endif /* CONFIG_PPC64BRIDGE */ + + .text + .stabs "arch/ppc/kernel/",N_SO,0,0,0f + .stabs "head.S",N_SO,0,0,0f +0: + .globl _stext +_stext: + +/* + * _start is defined this way because the XCOFF loader in the OpenFirmware + * on the powermac expects the entry point to be a procedure descriptor. + */ + .text + .globl _start +_start: + /* + * These are here for legacy reasons, the kernel used to + * need to look like a coff function entry for the pmac + * but we're always started by some kind of bootloader now. + * -- Cort + */ + nop /* used by __secondary_hold on prep (mtx) and chrp smp */ + nop /* used by __secondary_hold on prep (mtx) and chrp smp */ + nop + +/* PMAC + * Enter here with the kernel text, data and bss loaded starting at + * 0, running with virtual == physical mapping. + * r5 points to the prom entry point (the client interface handler + * address). Address translation is turned on, with the prom + * managing the hash table. Interrupts are disabled. The stack + * pointer (r1) points to just below the end of the half-meg region + * from 0x380000 - 0x400000, which is mapped in already. + * + * If we are booted from MacOS via BootX, we enter with the kernel + * image loaded somewhere, and the following values in registers: + * r3: 'BooX' (0x426f6f58) + * r4: virtual address of boot_infos_t + * r5: 0 + * + * APUS + * r3: 'APUS' + * r4: physical address of memory base + * Linux/m68k style BootInfo structure at &_end. + * + * PREP + * This is jumped to on prep systems right after the kernel is relocated + * to its proper place in memory by the boot loader. The expected layout + * of the regs is: + * r3: ptr to residual data + * r4: initrd_start or if no initrd then 0 + * r5: initrd_end - unused if r4 is 0 + * r6: Start of command line string + * r7: End of command line string + * + * This just gets a minimal mmu environment setup so we can call + * start_here() to do the real work. + * -- Cort + */ + + .globl __start +__start: +/* + * We have to do any OF calls before we map ourselves to KERNELBASE, + * because OF may have I/O devices mapped into that area + * (particularly on CHRP). + */ + mr r31,r3 /* save parameters */ + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* cpu # */ + +/* + * early_init() does the early machine identification and does + * the necessary low-level setup and clears the BSS + * -- Cort <cort@fsmlabs.com> + */ + bl early_init + +/* + * On POWER4, we first need to tweak some CPU configuration registers + * like real mode cache inhibit or exception base + */ +#ifdef CONFIG_POWER4 + bl __970_cpu_preinit +#endif /* CONFIG_POWER4 */ + +#ifdef CONFIG_APUS +/* On APUS the __va/__pa constants need to be set to the correct + * values before continuing. + */ + mr r4,r30 + bl fix_mem_constants +#endif /* CONFIG_APUS */ + +/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains + * the physical address we are running at, returned by early_init() + */ + bl mmu_off +__after_mmu_off: +#ifndef CONFIG_POWER4 + bl clear_bats + bl flush_tlbs + + bl initial_bats +#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) + bl setup_disp_bat +#endif +#else /* CONFIG_POWER4 */ + bl reloc_offset + bl initial_mm_power4 +#endif /* CONFIG_POWER4 */ + +/* + * Call setup_cpu for CPU 0 and initialize 6xx Idle + */ + bl reloc_offset + li r24,0 /* cpu# */ + bl call_setup_cpu /* Call setup_cpu for this CPU */ +#ifdef CONFIG_6xx + bl reloc_offset + bl init_idle_6xx +#endif /* CONFIG_6xx */ +#ifdef CONFIG_POWER4 + bl reloc_offset + bl init_idle_power4 +#endif /* CONFIG_POWER4 */ + + +#ifndef CONFIG_APUS +/* + * We need to run with _start at physical address 0. + * On CHRP, we are loaded at 0x10000 since OF on CHRP uses + * the exception vectors at 0 (and therefore this copy + * overwrites OF's exception vectors with our own). + * If the MMU is already turned on, we copy stuff to KERNELBASE, + * otherwise we copy it to 0. + */ + bl reloc_offset + mr r26,r3 + addis r4,r3,KERNELBASE@h /* current address of _start */ + cmpwi 0,r4,0 /* are we already running at 0? */ + bne relocate_kernel +#endif /* CONFIG_APUS */ +/* + * we now have the 1st 16M of ram mapped with the bats. + * prep needs the mmu to be turned on here, but pmac already has it on. + * this shouldn't bother the pmac since it just gets turned on again + * as we jump to our code at KERNELBASE. -- Cort + * Actually no, pmac doesn't have it on any more. BootX enters with MMU + * off, and in other cases, we now turn it off before changing BATs above. + */ +turn_on_mmu: + mfmsr r0 + ori r0,r0,MSR_DR|MSR_IR + mtspr SPRN_SRR1,r0 + lis r0,start_here@h + ori r0,r0,start_here@l + mtspr SPRN_SRR0,r0 + SYNC + RFI /* enables MMU */ + +/* + * We need __secondary_hold as a place to hold the other cpus on + * an SMP machine, even when we are running a UP kernel. + */ + . = 0xc0 /* for prep bootloader */ + li r3,1 /* MTX only has 1 cpu */ + .globl __secondary_hold +__secondary_hold: + /* tell the master we're here */ + stw r3,4(0) +#ifdef CONFIG_SMP +100: lwz r4,0(0) + /* wait until we're told to start */ + cmpw 0,r4,r3 + bne 100b + /* our cpu # was at addr 0 - go */ + mr r24,r3 /* cpu # */ + b __secondary_start +#else + b . +#endif /* CONFIG_SMP */ + +/* + * Exception entry code. This code runs with address translation + * turned off, i.e. using physical addresses. + * We assume sprg3 has the physical address of the current + * task's thread_struct. + */ +#define EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; \ + mtspr SPRN_SPRG1,r11; \ + mfcr r10; \ + EXCEPTION_PROLOG_1; \ + EXCEPTION_PROLOG_2 + +#define EXCEPTION_PROLOG_1 \ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ + andi. r11,r11,MSR_PR; \ + tophys(r11,r1); /* use tophys(r1) if kernel */ \ + beq 1f; \ + mfspr r11,SPRN_SPRG3; \ + lwz r11,THREAD_INFO-THREAD(r11); \ + addi r11,r11,THREAD_SIZE; \ + tophys(r11,r11); \ +1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ + + +#define EXCEPTION_PROLOG_2 \ + CLR_TOP32(r11); \ + stw r10,_CCR(r11); /* save registers */ \ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRN_SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRN_SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_SRR0; \ + mfspr r9,SPRN_SRR1; \ + stw r1,GPR1(r11); \ + stw r1,0(r11); \ + tovirt(r1,r11); /* set new kernel sp */ \ + li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ + MTMSRD(r10); /* (except for mach check in rtas) */ \ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Note: code which follows this uses cr0.eq (set if from kernel), + * r11, r12 (SRR0), and r9 (SRR1). + * + * Note2: once we have set r1 we are in a position to take exceptions + * again, and we could thus set MSR:RI at that point. + */ + +/* + * Exception vectors. + */ +#define EXCEPTION(n, label, hdlr, xfer) \ + . = n; \ +label: \ + EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + li r10,MSR_KERNEL; \ + copyee(r10, r9); \ + bl tfer; \ +i##n: \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ + ret_from_except) + +/* System reset */ +/* core99 pmac starts the seconary here by changing the vector, and + putting it back to what it was (UnknownException) when done. */ +#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) + . = 0x100 + b __secondary_start_gemini +#else + EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) +#endif + +/* Machine check */ +/* + * On CHRP, this is complicated by the fact that we could get a + * machine check inside RTAS, and we have no guarantee that certain + * critical registers will have the values we expect. The set of + * registers that might have bad values includes all the GPRs + * and all the BATs. We indicate that we are in RTAS by putting + * a non-zero value, the address of the exception frame to use, + * in SPRG2. The machine check handler checks SPRG2 and uses its + * value if it is non-zero. If we ever needed to free up SPRG2, + * we could use a field in the thread_info or thread_struct instead. + * (Other exception handlers assume that r1 is a valid kernel stack + * pointer when we take an exception from supervisor mode.) + * -- paulus. + */ + . = 0x200 + mtspr SPRN_SPRG0,r10 + mtspr SPRN_SPRG1,r11 + mfcr r10 +#ifdef CONFIG_PPC_CHRP + mfspr r11,SPRN_SPRG2 + cmpwi 0,r11,0 + bne 7f +#endif /* CONFIG_PPC_CHRP */ + EXCEPTION_PROLOG_1 +7: EXCEPTION_PROLOG_2 + addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_PPC_CHRP + mfspr r4,SPRN_SPRG2 + cmpwi cr1,r4,0 + bne cr1,1f +#endif + EXC_XFER_STD(0x200, MachineCheckException) +#ifdef CONFIG_PPC_CHRP +1: b machine_check_in_rtas +#endif + +/* Data access exception. */ + . = 0x300 +#ifdef CONFIG_PPC64BRIDGE + b DataAccess +DataAccessCont: +#else +DataAccess: + EXCEPTION_PROLOG +#endif /* CONFIG_PPC64BRIDGE */ + mfspr r10,SPRN_DSISR + andis. r0,r10,0xa470 /* weird error? */ + bne 1f /* if not, try to put a PTE */ + mfspr r4,SPRN_DAR /* into the hash table */ + rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ + bl hash_page +1: stw r10,_DSISR(r11) + mr r5,r10 + mfspr r4,SPRN_DAR + EXC_XFER_EE_LITE(0x300, handle_page_fault) + +#ifdef CONFIG_PPC64BRIDGE +/* SLB fault on data access. */ + . = 0x380 + b DataSegment +#endif /* CONFIG_PPC64BRIDGE */ + +/* Instruction access exception. */ + . = 0x400 +#ifdef CONFIG_PPC64BRIDGE + b InstructionAccess +InstructionAccessCont: +#else +InstructionAccess: + EXCEPTION_PROLOG +#endif /* CONFIG_PPC64BRIDGE */ + andis. r0,r9,0x4000 /* no pte found? */ + beq 1f /* if so, try to put a PTE */ + li r3,0 /* into the hash table */ + mr r4,r12 /* SRR0 is fault address */ + bl hash_page +1: mr r4,r12 + mr r5,r9 + EXC_XFER_EE_LITE(0x400, handle_page_fault) + +#ifdef CONFIG_PPC64BRIDGE +/* SLB fault on instruction access. */ + . = 0x480 + b InstructionSegment +#endif /* CONFIG_PPC64BRIDGE */ + +/* External interrupt */ + EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + +/* Alignment exception */ + . = 0x600 +Alignment: + EXCEPTION_PROLOG + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + stw r5,_DSISR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x600, AlignmentException) + +/* Program check exception */ + EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) + +/* Floating-point unavailable */ + . = 0x800 +FPUnavailable: + EXCEPTION_PROLOG + bne load_up_fpu /* if from user, just load it up */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x800, KernelFP) + +/* Decrementer */ + EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) + + EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) + EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) + +/* System call */ + . = 0xc00 +SystemCall: + EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0xc00, DoSyscall) + +/* Single step - not used on 601 */ + EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) + EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) + +/* + * The Altivec unavailable trap is at 0x0f20. Foo. + * We effectively remap it to 0x3000. + * We include an altivec unavailable exception vector even if + * not configured for Altivec, so that you can't panic a + * non-altivec kernel running on a machine with altivec just + * by executing an altivec instruction. + */ + . = 0xf00 + b Trap_0f + + . = 0xf20 + b AltiVecUnavailable + +Trap_0f: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0xf00, UnknownException) + +/* + * Handle TLB miss for instruction on 603/603e. + * Note: we get an alternate set of r0 - r3 to use automatically. + */ + . = 0x1000 +InstructionTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_IMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- InstructionAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- InstructionAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed bit) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ + rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ + and r1,r1,r2 /* writable if _RW and _DIRTY */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ + ori r1,r1,0xe14 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_IMISS + tlbli r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi +InstructionAddressInvalid: + mfspr r3,SPRN_SRR1 + rlwinm r1,r3,9,6,6 /* Get load/store bit */ + + addis r1,r1,0x2000 + mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ + mtctr r0 /* Restore CTR */ + andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ + or r2,r2,r1 + mtspr SPRN_SRR1,r2 + mfspr r1,SPRN_IMISS /* Get failing address */ + rlwinm. r2,r2,0,31,31 /* Check for little endian access */ + rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ + xor r1,r1,r2 + mtspr SPRN_DAR,r1 /* Set fault address */ + mfmsr r0 /* Restore "normal" registers */ + xoris r0,r0,MSR_TGPR>>16 + mtcrf 0x80,r3 /* Restore CR0 */ + mtmsr r0 + b InstructionAccess + +/* + * Handle TLB miss for DATA Load operation on 603/603e + */ + . = 0x1100 +DataLoadTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_DMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- DataAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- DataAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed bit) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ + rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ + and r1,r1,r2 /* writable if _RW and _DIRTY */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ + ori r1,r1,0xe14 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_DMISS + tlbld r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi +DataAddressInvalid: + mfspr r3,SPRN_SRR1 + rlwinm r1,r3,9,6,6 /* Get load/store bit */ + addis r1,r1,0x2000 + mtspr SPRN_DSISR,r1 + mtctr r0 /* Restore CTR */ + andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ + mtspr SPRN_SRR1,r2 + mfspr r1,SPRN_DMISS /* Get failing address */ + rlwinm. r2,r2,0,31,31 /* Check for little endian access */ + beq 20f /* Jump if big endian */ + xori r1,r1,3 +20: mtspr SPRN_DAR,r1 /* Set fault address */ + mfmsr r0 /* Restore "normal" registers */ + xoris r0,r0,MSR_TGPR>>16 + mtcrf 0x80,r3 /* Restore CR0 */ + mtmsr r0 + b DataAccess + +/* + * Handle TLB miss for DATA Store on 603/603e + */ + . = 0x1200 +DataStoreTLBMiss: +/* + * r0: stored ctr + * r1: linux style pte ( later becomes ppc hardware pte ) + * r2: ptr to linux-style pte + * r3: scratch + */ + mfctr r0 + /* Get PTE (linux-style) and check access */ + mfspr r3,SPRN_DMISS + lis r1,KERNELBASE@h /* check if kernel address */ + cmplw 0,r3,r1 + mfspr r2,SPRN_SPRG3 + li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ + lwz r2,PGDIR(r2) + blt+ 112f + lis r2,swapper_pg_dir@ha /* if kernel address, use */ + addi r2,r2,swapper_pg_dir@l /* kernel page table */ + mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ +112: tophys(r2,r2) + rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + beq- DataAddressInvalid /* return if no mapping */ + rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + lwz r3,0(r2) /* get linux-style pte */ + andc. r1,r1,r3 /* check access & ~permission */ + bne- DataAddressInvalid /* return if access not permitted */ + ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY + /* + * NOTE! We are assuming this is not an SMP system, otherwise + * we would need to update the pte atomically with lwarx/stwcx. + */ + stw r3,0(r2) /* update PTE (accessed/dirty bits) */ + /* Convert linux-style PTE to low word of PPC-style PTE */ + rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ + li r1,0xe15 /* clear out reserved bits and M */ + andc r1,r3,r1 /* PP = user? 2: 0 */ + mtspr SPRN_RPA,r1 + mfspr r3,SPRN_DMISS + tlbld r3 + mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ + mtcrf 0x80,r3 + rfi + +#ifndef CONFIG_ALTIVEC +#define AltivecAssistException UnknownException +#endif + + EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) + EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) + EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) +#ifdef CONFIG_POWER4 + EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) + EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) +#else /* !CONFIG_POWER4 */ + EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) + EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) + EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_POWER4 */ + EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) + EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) + EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) + EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) + + .globl mol_trampoline + .set mol_trampoline, i0x2f00 + + . = 0x3000 + +AltiVecUnavailable: + EXCEPTION_PROLOG +#ifdef CONFIG_ALTIVEC + bne load_up_altivec /* if from user, just load it up */ +#endif /* CONFIG_ALTIVEC */ + EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) + +#ifdef CONFIG_PPC64BRIDGE +DataAccess: + EXCEPTION_PROLOG + b DataAccessCont + +InstructionAccess: + EXCEPTION_PROLOG + b InstructionAccessCont + +DataSegment: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + mfspr r4,SPRN_DAR + stw r4,_DAR(r11) + EXC_XFER_STD(0x380, UnknownException) + +InstructionSegment: + EXCEPTION_PROLOG + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x480, UnknownException) +#endif /* CONFIG_PPC64BRIDGE */ + +/* + * This task wants to use the FPU now. + * On UP, disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Load up this task's FP registers from its thread_struct, + * enable the FPU for the current task and return to the task. + */ +load_up_fpu: + mfmsr r5 + ori r5,r5,MSR_FP +#ifdef CONFIG_PPC64BRIDGE + clrldi r5,r5,1 /* turn off 64-bit mode */ +#endif /* CONFIG_PPC64BRIDGE */ + SYNC + MTMSRD(r5) /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) /* get __pa constant */ + addis r3,r6,last_task_used_math@ha + lwz r4,last_task_used_math@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want last_task_used_math->thread */ + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r4) + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r10,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r10 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + lwz r4,THREAD_FPEXC_MODE(r5) + ori r9,r9,MSR_FP /* enable FP for current */ + or r9,r9,r4 + lfd fr0,THREAD_FPSCR-4(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_math@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + /* fall through to fast_exception_return */ + + .globl fast_exception_return +fast_exception_return: + andi. r10,r9,MSR_RI /* check for recoverable interrupt */ + beq 1f /* if not, we've got problems */ +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + RFI + +/* check if the exception happened in a restartable section */ +1: lis r3,exc_exit_restart_end@ha + addi r3,r3,exc_exit_restart_end@l + cmplw r12,r3 + bge 3f + lis r4,exc_exit_restart@ha + addi r4,r4,exc_exit_restart@l + cmplw r12,r4 + blt 3f + lis r3,fee_restarts@ha + tophys(r3,r3) + lwz r5,fee_restarts@l(r3) + addi r5,r5,1 + stw r5,fee_restarts@l(r3) + mr r12,r4 /* restart at exc_exit_restart */ + b 2b + + .comm fee_restarts,4 + +/* aargh, a nonrecoverable interrupt, panic */ +/* aargh, we don't know which trap this is */ +/* but the 601 doesn't implement the RI bit, so assume it's OK */ +3: +BEGIN_FTR_SECTION + b 2b +END_FTR_SECTION_IFSET(CPU_FTR_601) + li r10,-1 + stw r10,TRAP(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + li r10,MSR_KERNEL + bl transfer_to_handler_full + .long nonrecoverable_exception + .long ret_from_except + +/* + * FP unavailable trap from kernel - print a message, but let + * the task use FP in the kernel until it returns to user mode. + */ +KernelFP: + lwz r3,_MSR(r1) + ori r3,r3,MSR_FP + stw r3,_MSR(r1) /* enable use of FP after return */ + lis r3,86f@h + ori r3,r3,86f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +86: .string "floating point used in kernel (task=%p, pc=%x)\n" + .align 4,0 + +#ifdef CONFIG_ALTIVEC +/* Note that the AltiVec support is closely modeled after the FP + * support. Changes to one are likely to be applicable to the + * other! */ +load_up_altivec: +/* + * Disable AltiVec for the task which had AltiVec previously, + * and save its AltiVec registers in its thread_struct. + * Enables AltiVec for use in the kernel on return. + * On SMP we know the AltiVec units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_VEC@h + MTMSRD(r5) /* enable use of AltiVec now */ + isync +/* + * For SMP, we don't do lazy AltiVec switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_altivec in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) + addis r3,r6,last_task_used_altivec@ha + lwz r4,last_task_used_altivec@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ + SAVE_32VR(0,r10,r4) + mfvscr vr0 + li r10,THREAD_VSCR + stvx vr0,r10,r4 + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_VEC@h + andc r4,r4,r10 /* disable altivec for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of AltiVec after return */ + oris r9,r9,MSR_VEC@h + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_VSCR + stw r4,THREAD_USED_VR(r5) + lvx vr0,r10,r5 + mtvscr vr0 + REST_32VR(0,r10,r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_altivec@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * AltiVec unavailable trap from kernel - print a message, but let + * the task use AltiVec in the kernel until it returns to user mode. + */ +KernelAltiVec: + lwz r3,_MSR(r1) + oris r3,r3,MSR_VEC@h + stw r3,_MSR(r1) /* enable use of AltiVec after return */ + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" + .align 4,0 + +/* + * giveup_altivec(tsk) + * Disable AltiVec for the task given as the argument, + * and save the AltiVec registers in its thread_struct. + * Enables AltiVec for use in the kernel on return. + */ + + .globl giveup_altivec +giveup_altivec: + mfmsr r5 + oris r5,r5,MSR_VEC@h + SYNC + MTMSRD(r5) /* enable use of AltiVec now */ + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32VR(0, r4, r3) + mfvscr vr0 + li r4,THREAD_VSCR + stvx vr0,r4,r3 + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_VEC@h + andc r4,r4,r3 /* disable AltiVec for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_altivec@ha + stw r5,last_task_used_altivec@l(r4) +#endif /* CONFIG_SMP */ + blr +#endif /* CONFIG_ALTIVEC */ + +/* + * giveup_fpu(tsk) + * Disable FP for the task given as the argument, + * and save the floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + */ + .globl giveup_fpu +giveup_fpu: + mfmsr r5 + ori r5,r5,MSR_FP + SYNC_601 + ISYNC_601 + MTMSRD(r5) /* enable use of fpu now */ + SYNC_601 + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32FPRS(0, r3) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r3) + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r3,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r3 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_math@ha + stw r5,last_task_used_math@l(r4) +#endif /* CONFIG_SMP */ + blr + +/* + * This code is jumped to from the startup code to copy + * the kernel image to physical address 0. + */ +relocate_kernel: + addis r9,r26,klimit@ha /* fetch klimit */ + lwz r25,klimit@l(r9) + addis r25,r25,-KERNELBASE@h + li r3,0 /* Destination base address */ + li r6,0 /* Destination offset */ + li r5,0x4000 /* # bytes of memory to copy */ + bl copy_and_flush /* copy the first 0x4000 bytes */ + addi r0,r3,4f@l /* jump to the address of 4f */ + mtctr r0 /* in copy and do the rest. */ + bctr /* jump to the copy */ +4: mr r5,r25 + bl copy_and_flush /* copy the rest */ + b turn_on_mmu + +/* + * Copy routine used to copy the kernel to start at physical address 0 + * and flush and invalidate the caches as needed. + * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset + * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. + */ +copy_and_flush: + addi r5,r5,-4 + addi r6,r6,-4 +4: li r0,L1_CACHE_LINE_SIZE/4 + mtctr r0 +3: addi r6,r6,4 /* copy a cache line */ + lwzx r0,r6,r4 + stwx r0,r6,r3 + bdnz 3b + dcbst r6,r3 /* write it to memory */ + sync + icbi r6,r3 /* flush the icache line */ + cmplw 0,r6,r5 + blt 4b + sync /* additional sync needed on g4 */ + isync + addi r5,r5,4 + addi r6,r6,4 + blr + +#ifdef CONFIG_APUS +/* + * On APUS the physical base address of the kernel is not known at compile + * time, which means the __pa/__va constants used are incorrect. In the + * __init section is recorded the virtual addresses of instructions using + * these constants, so all that has to be done is fix these before + * continuing the kernel boot. + * + * r4 = The physical address of the kernel base. + */ +fix_mem_constants: + mr r10,r4 + addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ + neg r11,r10 /* phys_to_virt constant */ + + lis r12,__vtop_table_begin@h + ori r12,r12,__vtop_table_begin@l + add r12,r12,r10 /* table begin phys address */ + lis r13,__vtop_table_end@h + ori r13,r13,__vtop_table_end@l + add r13,r13,r10 /* table end phys address */ + subi r12,r12,4 + subi r13,r13, |