aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c25
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c18
-rw-r--r--arch/powerpc/mm/init_64.c8
-rw-r--r--arch/powerpc/mm/numa.c65
-rw-r--r--arch/powerpc/mm/pgtable_32.c33
-rw-r--r--arch/powerpc/mm/pgtable_64.c52
-rw-r--r--arch/powerpc/mm/slb.c13
10 files changed, 143 insertions, 77 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 93441e7a292..38a81967ca0 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -8,7 +8,7 @@ endif
obj-y := fault.o mem.o lmb.o
obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
-hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
+hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
hash_utils_64.o hash_low_64.o tlb_64.o \
slb_low.o slb.o stab.o mmap.o imalloc.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e8fa50624b7..03aeb3a4607 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -426,18 +426,21 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* kernel has accessed a bad area */
- printk(KERN_ALERT "Unable to handle kernel paging request for ");
switch (regs->trap) {
- case 0x300:
- case 0x380:
- printk("data at address 0x%08lx\n", regs->dar);
- break;
- case 0x400:
- case 0x480:
- printk("instruction fetch\n");
- break;
- default:
- printk("unknown fault\n");
+ case 0x300:
+ case 0x380:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "data at address 0x%08lx\n", regs->dar);
+ break;
+ case 0x400:
+ case 0x480:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "instruction fetch\n");
+ break;
+ default:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "unknown fault\n");
+ break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c90f124f3c7..6f1016acdbf 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,7 +123,7 @@ static inline void native_unlock_hpte(hpte_t *hptep)
clear_bit(HPTE_LOCK_BIT, word);
}
-long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize)
{
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1915661c2c8..c0d2a694fa3 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -277,7 +277,7 @@ static void __init htab_init_page_sizes(void)
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+ if (cpu_has_feature(CPU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5615acc2952..89c836d5480 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -146,6 +146,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return hugepte_offset(hpdp, addr);
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
{
pte_t *hugepte = hugepd_page(*hpdp);
@@ -480,9 +485,6 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
mm->context.high_htlb_areas |= newareas;
- /* update the paca copy of the context struct */
- get_paca()->context = mm->context;
-
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb();
@@ -494,11 +496,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
return 0;
}
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
{
int err = 0;
- if ( (addr+len) < addr )
+ if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+ return -EINVAL;
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
return -EINVAL;
if (addr < 0x100000000UL)
@@ -1041,7 +1047,7 @@ repeat:
return err;
}
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3ff374697e3..d12a87ec5ae 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -130,7 +130,7 @@ static int __init setup_kcore(void)
/* GFP_ATOMIC to avoid might_sleep warnings during boot */
kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
if (!kcore_mem)
- panic("mem_init: kmalloc failed\n");
+ panic("%s: kmalloc failed\n", __FUNCTION__);
kclist_add(kcore_mem, __va(base), size);
}
@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
}
module_init(setup_kcore);
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
* can't put into the tables above, because HPAGE_SHIFT is not compile
* time constant. */
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
#else
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
#endif
void pgtable_cache_init(void)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9da01dc8cfd..262790910ff 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
return lmb_end_of_DRAM() - start;
}
+/*
+ * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
+ * node. This assumes n_mem_{addr,size}_cells have been set.
+ */
+static void __init parse_drconf_memory(struct device_node *memory)
+{
+ const unsigned int *lm, *dm, *aa;
+ unsigned int ls, ld, la;
+ unsigned int n, aam, aalen;
+ unsigned long lmb_size, size;
+ int nid, default_nid = 0;
+ unsigned int start, ai, flags;
+
+ lm = get_property(memory, "ibm,lmb-size", &ls);
+ dm = get_property(memory, "ibm,dynamic-memory", &ld);
+ aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
+ if (!lm || !dm || !aa ||
+ ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
+ la < 2 * sizeof(unsigned int))
+ return;
+
+ lmb_size = read_n_cells(n_mem_size_cells, &lm);
+ n = *dm++; /* number of LMBs */
+ aam = *aa++; /* number of associativity lists */
+ aalen = *aa++; /* length of each associativity list */
+ if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
+ la < (aam * aalen + 2) * sizeof(unsigned int))
+ return;
+
+ for (; n != 0; --n) {
+ start = read_n_cells(n_mem_addr_cells, &dm);
+ ai = dm[2];
+ flags = dm[3];
+ dm += 4;
+ /* 0x80 == reserved, 0x8 = assigned to us */
+ if ((flags & 0x80) || !(flags & 0x8))
+ continue;
+ nid = default_nid;
+ /* flags & 0x40 means associativity index is invalid */
+ if (min_common_depth > 0 && min_common_depth <= aalen &&
+ (flags & 0x40) == 0 && ai < aam) {
+ /* this is like of_node_to_nid_single */
+ nid = aa[ai * aalen + min_common_depth - 1];
+ if (nid == 0xffff || nid >= MAX_NUMNODES)
+ nid = default_nid;
+ }
+ node_set_online(nid);
+
+ size = numa_enforce_memory_limit(start, lmb_size);
+ if (!size)
+ continue;
+
+ add_active_range(nid, start >> PAGE_SHIFT,
+ (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ }
+}
+
static int __init parse_numa_properties(void)
{
struct device_node *cpu = NULL;
@@ -385,6 +442,14 @@ new_range:
goto new_range;
}
+ /*
+ * Now do the same thing for each LMB listed in the ibm,dynamic-memory
+ * property in the ibm,dynamic-reconfiguration-memory node.
+ */
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory)
+ parse_drconf_memory(memory);
+
return 0;
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8fcacb0239d..1891dbeeb8e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -141,29 +141,19 @@ void pte_free(struct page *ptepage)
__free_page(ptepage);
}
-#ifndef CONFIG_PHYS_64BIT
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-#else /* CONFIG_PHYS_64BIT */
-void __iomem *
-ioremap64(unsigned long long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-EXPORT_SYMBOL(ioremap64);
+EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
+ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
{
- phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
-
- return ioremap64(addr64, size);
+ return __ioremap(addr, size, flags);
}
-#endif /* CONFIG_PHYS_64BIT */
-EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -264,20 +254,7 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
- return (void __iomem *) (port + _IO_BASE);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
- /* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-int
-map_page(unsigned long va, phys_addr_t pa, int flags)
+int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4aaa50..16e4ee1c231 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,7 +113,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
}
-static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
unsigned long ea, unsigned long size,
unsigned long flags)
{
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
}
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
-void __iomem * __ioremap(unsigned long addr, unsigned long size,
+void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
unsigned long pa, ea;
void __iomem *ret;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return (void __iomem *)addr;
-
/*
* Choose an address to map it to.
* Once the imalloc system is running, we use it.
@@ -178,9 +168,28 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
return ret;
}
+
+void __iomem * ioremap(phys_addr_t addr, unsigned long size)
+{
+ unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+ unsigned long flags)
+{
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
-int __ioremap_explicit(unsigned long pa, unsigned long ea,
+int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
unsigned long size, unsigned long flags)
{
struct vm_struct *area;
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
-void iounmap(volatile void __iomem *token)
+void __iounmap(volatile void __iomem *token)
{
void *addr;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
-
if (!mem_init_done)
return;
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
im_free(addr);
}
+void iounmap(volatile void __iomem *token)
+{
+ if (ppc_md.iounmap)
+ ppc_md.iounmap(token);
+ else
+ __iounmap(token);
+}
+
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
return 0;
}
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
{
struct vm_struct *area;
unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
void __iomem * reserve_phb_iospace(unsigned long size)
{
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index d3733912adb..224e960650a 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
+#include <asm/firmware.h>
#include <linux/compiler.h>
#ifdef DEBUG
@@ -193,6 +194,7 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
+ unsigned long lflags, vflags;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
@@ -225,11 +227,12 @@ void slb_initialize(void)
#endif
}
+ get_paca()->stab_rr = SLB_NUM_BOLTED;
+
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
-#ifndef CONFIG_PPC_ISERIES
- {
- unsigned long lflags, vflags;
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
@@ -247,8 +250,4 @@ void slb_initialize(void)
* elsewhere, we'll call _switch() which will bolt in the new
* one. */
asm volatile("isync":::"memory");
- }
-#endif /* CONFIG_PPC_ISERIES */
-
- get_paca()->stab_rr = SLB_NUM_BOLTED;
}