aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 573b3bd1c45..343a87fa78b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,12 +26,14 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/io.h>
+#include <asm/setup.h>
#include "mmu_decl.h"
@@ -77,7 +79,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
- ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+ ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
@@ -114,16 +116,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
-#ifdef CONFIG_HIGHPTE
- gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
-#else
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
-#endif
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
- pgtable_page_ctor(ptepage);
+ if (!pgtable_page_ctor(ptepage)) {
+ __free_page(ptepage);
+ return NULL;
+ }
return ptepage;
}
@@ -136,7 +137,15 @@ ioremap(phys_addr_t addr, unsigned long size)
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
+ioremap_wc(phys_addr_t addr, unsigned long size)
+{
+ return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iomem *
+ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW)
@@ -145,9 +154,17 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
-EXPORT_SYMBOL(ioremap_flags);
+EXPORT_SYMBOL(ioremap_prot);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -193,8 +210,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
* mem_init() sets high_memory so only do the check after that.
*/
if (mem_init_done && (p < virt_to_phys(high_memory)) &&
- !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
- printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
+ !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
+ printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
@@ -225,6 +242,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
+ area->phys_addr = p;
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
@@ -281,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
+ smp_wmb();
return err;
}
@@ -326,7 +345,7 @@ void __init mapin_ram(void)
s = mmu_mapin_ram(top);
__mapin_ram_chunk(s, top);
- top = lmb_end_of_DRAM();
+ top = memblock_end_of_DRAM();
s = wii_mmu_mapin_mem2(top);
__mapin_ram_chunk(s, top);
}
@@ -384,11 +403,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
-#ifdef CONFIG_PPC_STD_MMU
- flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
flush_tlb_page(NULL, address);
-#endif
pte_unmap(kpte);
return 0;