diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 190 |
1 files changed, 131 insertions, 59 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index ac3390f8190..343a87fa78b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -26,10 +26,14 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/slab.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/fixmap.h> #include <asm/io.h> +#include <asm/setup.h> #include "mmu_decl.h" @@ -47,14 +51,10 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[]; -#ifdef CONFIG_SMP -extern void hash_page_sync(void); -#endif - #ifdef HAVE_BATS -extern unsigned long v_mapped_by_bats(unsigned long va); -extern unsigned long p_mapped_by_bats(unsigned long pa); -void setbat(int index, unsigned long virt, unsigned long phys, +extern phys_addr_t v_mapped_by_bats(unsigned long va); +extern unsigned long p_mapped_by_bats(phys_addr_t pa); +void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, int flags); #else /* !HAVE_BATS */ @@ -64,31 +64,36 @@ void setbat(int index, unsigned long virt, unsigned long phys, #ifdef HAVE_TLBCAM extern unsigned int tlbcam_index; -extern unsigned long v_mapped_by_tlbcam(unsigned long va); -extern unsigned long p_mapped_by_tlbcam(unsigned long pa); +extern phys_addr_t v_mapped_by_tlbcam(unsigned long va); +extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); #else /* !HAVE_TLBCAM */ #define v_mapped_by_tlbcam(x) (0UL) #define p_mapped_by_tlbcam(x) (0UL) #endif /* HAVE_TLBCAM */ -#ifdef CONFIG_PTE_64BIT -/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ -#define PGDIR_ORDER 1 -#else -#define PGDIR_ORDER 0 -#endif +#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; - ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); + /* pgdir take page or two with 4K pages and a page fraction otherwise */ +#ifndef CONFIG_PPC_4K_PAGES + ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); +#else + ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, + PGDIR_ORDER - PAGE_SHIFT); +#endif return ret; } void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGDIR_ORDER); +#ifndef CONFIG_PPC_4K_PAGES + kfree((void *)pgd); +#else + free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); +#endif } __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) @@ -111,57 +116,78 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *ptepage; -#ifdef CONFIG_HIGHPTE - gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO; -#else gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; -#endif ptepage = alloc_pages(flags, 0); if (!ptepage) return NULL; - pgtable_page_ctor(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } return ptepage; } -void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +void __iomem * +ioremap(phys_addr_t addr, unsigned long size) { -#ifdef CONFIG_SMP - hash_page_sync(); -#endif - free_page((unsigned long)pte); + return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, + __builtin_return_address(0)); } +EXPORT_SYMBOL(ioremap); -void pte_free(struct mm_struct *mm, pgtable_t ptepage) +void __iomem * +ioremap_wc(phys_addr_t addr, unsigned long size) { -#ifdef CONFIG_SMP - hash_page_sync(); -#endif - pgtable_page_dtor(ptepage); - __free_page(ptepage); + return __ioremap_caller(addr, size, _PAGE_NO_CACHE, + __builtin_return_address(0)); } +EXPORT_SYMBOL(ioremap_wc); void __iomem * -ioremap(phys_addr_t addr, unsigned long size) +ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { - return __ioremap(addr, size, _PAGE_NO_CACHE); + /* writeable implies dirty for kernel addresses */ + if (flags & _PAGE_RW) + flags |= _PAGE_DIRTY | _PAGE_HWWRITE; + + /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ + flags &= ~(_PAGE_USER | _PAGE_EXEC); + +#ifdef _PAGE_BAP_SR + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format + * which means that we just cleared supervisor access... oops ;-) This + * restores it + */ + flags |= _PAGE_BAP_SR; +#endif + + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } -EXPORT_SYMBOL(ioremap); +EXPORT_SYMBOL(ioremap_prot); void __iomem * -ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) +__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { - return __ioremap(addr, size, flags); + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } -EXPORT_SYMBOL(ioremap_flags); void __iomem * -__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) +__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, + void *caller) { unsigned long v, i; phys_addr_t p; int err; + /* Make sure we have the base flags */ + if ((flags & _PAGE_PRESENT) == 0) + flags |= PAGE_KERNEL; + + /* Non-cacheable page cannot be coherent */ + if (flags & _PAGE_NO_CACHE) + flags &= ~_PAGE_COHERENT; + /* * Choose an address to map it to. * Once the vmalloc system is running, we use it. @@ -178,15 +204,18 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) if (p < 16*1024*1024) p += _ISA_MEM_BASE; +#ifndef CONFIG_CRASH_DUMP /* * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. */ - if (mem_init_done && (p < virt_to_phys(high_memory))) { - printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", + if (mem_init_done && (p < virt_to_phys(high_memory)) && + !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { + printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; } +#endif if (size == 0) return NULL; @@ -210,19 +239,15 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) if (mem_init_done) { struct vm_struct *area; - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (area == 0) return NULL; + area->phys_addr = p; v = (unsigned long) area->addr; } else { v = (ioremap_bot -= size); } - if ((flags & _PAGE_PRESENT) == 0) - flags |= _PAGE_KERNEL; - if (flags & _PAGE_NO_CACHE) - flags |= _PAGE_GUARDED; - /* * Should check if it is a candidate for a BAT mapping */ @@ -269,27 +294,30 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) /* The PTE should never be already set nor present in the * hash table */ - BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)); + BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && + flags); set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); } + smp_wmb(); return err; } /* - * Map in all of physical memory starting at KERNELBASE. + * Map in a chunk of physical memory starting at start. */ -void __init mapin_ram(void) +void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { - unsigned long v, p, s, f; + unsigned long v, s, f; + phys_addr_t p; int ktext; - s = mmu_mapin_ram(); - v = KERNELBASE + s; - p = PPC_MEMSTART + s; - for (; s < total_lowmem; s += PAGE_SIZE) { + s = offset; + v = PAGE_OFFSET + s; + p = memstart_addr + s; + for (; s < top; s += PAGE_SIZE) { ktext = ((char *) v >= _stext && (char *) v < etext); - f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM; + f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; map_page(v, p, f); #ifdef CONFIG_PPC_STD_MMU_32 if (ktext) @@ -300,6 +328,30 @@ void __init mapin_ram(void) } } +void __init mapin_ram(void) +{ + unsigned long s, top; + +#ifndef CONFIG_WII + top = total_lowmem; + s = mmu_mapin_ram(top); + __mapin_ram_chunk(s, top); +#else + if (!wii_hole_size) { + s = mmu_mapin_ram(total_lowmem); + __mapin_ram_chunk(s, total_lowmem); + } else { + top = wii_hole_start; + s = mmu_mapin_ram(top); + __mapin_ram_chunk(s, top); + + top = memblock_end_of_DRAM(); + s = wii_mmu_mapin_mem2(top); + __mapin_ram_chunk(s, top); + } +#endif +} + /* Scan the real Linux page tables and return a PTE pointer for * a virtual address in a context. * Returns true (1) if PTE was found, zero otherwise. The pointer to @@ -349,9 +401,9 @@ static int __change_page_attr(struct page *page, pgprot_t prot) return 0; if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) return -EINVAL; - set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); + __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); wmb(); - flush_HPTE(0, address, pmd_val(*kpmd)); + flush_tlb_page(NULL, address); pte_unmap(kpte); return 0; @@ -386,3 +438,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); } #endif /* CONFIG_DEBUG_PAGEALLOC */ + +static int fixmaps; + +void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + map_page(address, phys, pgprot_val(flags)); + fixmaps++; +} + +void __this_fixmap_does_not_exist(void) +{ + WARN_ON(1); +} |
