diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 83 |
1 files changed, 61 insertions, 22 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 430d0908fa5..343a87fa78b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -26,11 +26,14 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/slab.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/io.h> +#include <asm/setup.h> #include "mmu_decl.h" @@ -76,7 +79,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) /* pgdir take page or two with 4K pages and a page fraction otherwise */ #ifndef CONFIG_PPC_4K_PAGES - ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); + ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); #else ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER - PAGE_SHIFT); @@ -113,16 +116,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *ptepage; -#ifdef CONFIG_HIGHPTE - gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO; -#else gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; -#endif ptepage = alloc_pages(flags, 0); if (!ptepage) return NULL; - pgtable_page_ctor(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } return ptepage; } @@ -135,18 +137,34 @@ ioremap(phys_addr_t addr, unsigned long size) EXPORT_SYMBOL(ioremap); void __iomem * -ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) +ioremap_wc(phys_addr_t addr, unsigned long size) +{ + return __ioremap_caller(addr, size, _PAGE_NO_CACHE, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iomem * +ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) flags |= _PAGE_DIRTY | _PAGE_HWWRITE; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ - flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC); + flags &= ~(_PAGE_USER | _PAGE_EXEC); + +#ifdef _PAGE_BAP_SR + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format + * which means that we just cleared supervisor access... oops ;-) This + * restores it + */ + flags |= _PAGE_BAP_SR; +#endif return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } -EXPORT_SYMBOL(ioremap_flags); +EXPORT_SYMBOL(ioremap_prot); void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) @@ -191,8 +209,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. */ - if (mem_init_done && (p < virt_to_phys(high_memory))) { - printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", + if (mem_init_done && (p < virt_to_phys(high_memory)) && + !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { + printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; } @@ -223,6 +242,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, area = get_vm_area_caller(size, VM_IOREMAP, caller); if (area == 0) return NULL; + area->phys_addr = p; v = (unsigned long) area->addr; } else { v = (ioremap_bot -= size); @@ -279,22 +299,23 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); } + smp_wmb(); return err; } /* - * Map in a big chunk of physical memory starting at PAGE_OFFSET. + * Map in a chunk of physical memory starting at start. */ -void __init mapin_ram(void) +void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { unsigned long v, s, f; phys_addr_t p; int ktext; - s = mmu_mapin_ram(); + s = offset; v = PAGE_OFFSET + s; p = memstart_addr + s; - for (; s < total_lowmem; s += PAGE_SIZE) { + for (; s < top; s += PAGE_SIZE) { ktext = ((char *) v >= _stext && (char *) v < etext); f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; map_page(v, p, f); @@ -307,6 +328,30 @@ void __init mapin_ram(void) } } +void __init mapin_ram(void) +{ + unsigned long s, top; + +#ifndef CONFIG_WII + top = total_lowmem; + s = mmu_mapin_ram(top); + __mapin_ram_chunk(s, top); +#else + if (!wii_hole_size) { + s = mmu_mapin_ram(total_lowmem); + __mapin_ram_chunk(s, total_lowmem); + } else { + top = wii_hole_start; + s = mmu_mapin_ram(top); + __mapin_ram_chunk(s, top); + + top = memblock_end_of_DRAM(); + s = wii_mmu_mapin_mem2(top); + __mapin_ram_chunk(s, top); + } +#endif +} + /* Scan the real Linux page tables and return a PTE pointer for * a virtual address in a context. * Returns true (1) if PTE was found, zero otherwise. The pointer to @@ -356,13 +401,9 @@ static int __change_page_attr(struct page *page, pgprot_t prot) return 0; if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) return -EINVAL; - set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); + __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); wmb(); -#ifdef CONFIG_PPC_STD_MMU - flush_hash_pages(0, address, pmd_val(*kpmd), 1); -#else flush_tlb_page(NULL, address); -#endif pte_unmap(kpte); return 0; @@ -399,8 +440,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable) #endif /* CONFIG_DEBUG_PAGEALLOC */ static int fixmaps; -unsigned long FIXADDR_TOP = (-PAGE_SIZE); -EXPORT_SYMBOL(FIXADDR_TOP); void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { |
