diff options
Diffstat (limited to 'arch/powerpc/mm/ppc_mmu_32.c')
| -rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 89 |
1 files changed, 43 insertions, 46 deletions
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index ec1421a20aa..11571e11883 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -11,7 +11,6 @@ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds @@ -27,33 +26,30 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> +#include <linux/memblock.h> #include <asm/prom.h> #include <asm/mmu.h> #include <asm/machdep.h> -#include <asm/lmb.h> #include "mmu_decl.h" -PTE *Hash, *Hash_end; +struct hash_pte *Hash, *Hash_end; unsigned long Hash_size, Hash_mask; unsigned long _SDR1; -union ubat { /* BAT register values to be loaded */ - BAT bat; - u32 word[2]; -} BATS[8][2]; /* 8 pairs of IBAT, DBAT */ +struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */ struct batrange { /* stores address ranges mapped by BATs */ unsigned long start; unsigned long limit; - unsigned long phys; + phys_addr_t phys; } bat_addrs[8]; /* * Return PA for this VA if it is mapped by a BAT, or 0 */ -unsigned long v_mapped_by_bats(unsigned long va) +phys_addr_t v_mapped_by_bats(unsigned long va) { int b; for (b = 0; b < 4; ++b) @@ -65,7 +61,7 @@ unsigned long v_mapped_by_bats(unsigned long va) /* * Return VA for a given PA or 0 if not mapped */ -unsigned long p_mapped_by_bats(unsigned long pa) +unsigned long p_mapped_by_bats(phys_addr_t pa) { int b; for (b = 0; b < 4; ++b) @@ -76,14 +72,10 @@ unsigned long p_mapped_by_bats(unsigned long pa) return 0; } -unsigned long __init mmu_mapin_ram(void) +unsigned long __init mmu_mapin_ram(unsigned long top) { -#ifdef CONFIG_POWER4 - return 0; -#else unsigned long tot, bl, done; unsigned long max_size = (256<<20); - unsigned long align; if (__map_without_bats) { printk(KERN_DEBUG "RAM mapped without BATs\n"); @@ -94,32 +86,25 @@ unsigned long __init mmu_mapin_ram(void) /* Make sure we don't map a block larger than the smallest alignment of the physical address. */ - /* alignment of PPC_MEMSTART */ - align = ~(PPC_MEMSTART-1) & PPC_MEMSTART; - /* set BAT block size to MIN(max_size, align) */ - if (align && align < max_size) - max_size = align; - - tot = total_lowmem; + tot = top; for (bl = 128<<10; bl < max_size; bl <<= 1) { if (bl * 2 > tot) break; } - setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM); - done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; + setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X); + done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; if ((done < tot) && !bat_addrs[3].limit) { /* use BAT3 to cover a bit more */ tot -= done; for (bl = 128<<10; bl < max_size; bl <<= 1) if (bl * 2 > tot) break; - setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM); - done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; + setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X); + done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; } return done; -#endif } /* @@ -127,16 +112,16 @@ unsigned long __init mmu_mapin_ram(void) * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. */ -void __init setbat(int index, unsigned long virt, unsigned long phys, +void __init setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, int flags) { unsigned int bl; int wimgxpp; - union ubat *bat = BATS[index]; + struct ppc_bat *bat = BATS[index]; - if (((flags & _PAGE_NO_CACHE) == 0) && - cpu_has_feature(CPU_FTR_NEED_COHERENT)) - flags |= _PAGE_COHERENT; + if ((flags & _PAGE_NO_CACHE) || + (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0)) + flags &= ~_PAGE_COHERENT; bl = (size >> 17) - 1; if (PVR_VER(mfspr(SPRN_PVR)) != 1) { @@ -145,15 +130,13 @@ void __init setbat(int index, unsigned long virt, unsigned long phys, wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT | _PAGE_GUARDED); wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; - bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ - bat[1].word[1] = phys | wimgxpp; -#ifndef CONFIG_KGDB /* want user access for breakpoints */ + bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ + bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; if (flags & _PAGE_USER) -#endif - bat[1].bat.batu.vp = 1; + bat[1].batu |= 1; /* Vp = 1 */ if (flags & _PAGE_GUARDED) { /* G bit must be zero in IBATs */ - bat[0].word[0] = bat[0].word[1] = 0; + bat[0].batu = bat[0].batl = 0; } else { /* make IBAT same as DBAT */ bat[0] = bat[1]; @@ -166,8 +149,8 @@ void __init setbat(int index, unsigned long virt, unsigned long phys, | _PAGE_COHERENT); wimgxpp |= (flags & _PAGE_RW)? ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX; - bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */ - bat->word[1] = phys | bl | 0x40; /* V=1 */ + bat->batu = virt | wimgxpp | 4; /* Ks=0, Ku=1 */ + bat->batl = phys | bl | 0x40; /* V=1 */ } bat_addrs[index].start = virt; @@ -203,7 +186,7 @@ void __init MMU_init_hw(void) extern unsigned int hash_page[]; extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; - if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) { + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) { /* * Put a blr (procedure return) instruction at the * start of hash_page, since we can still get DSI @@ -240,15 +223,14 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(lmb_alloc_base(Hash_size, Hash_size, - __initial_memory_limit)); + Hash = __va(memblock_alloc(Hash_size, Hash_size)); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; - Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); + Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size); - printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n", - total_memory >> 20, Hash_size >> 10, Hash); + printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n", + (unsigned long long)(total_memory >> 20), Hash_size >> 10, Hash); /* @@ -289,3 +271,18 @@ void __init MMU_init_hw(void) if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 601 can only access 16MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 1) + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); + else /* Anything else has 256M mapped */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); +} |
