aboutsummaryrefslogtreecommitdiff
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/fault.c5
-rw-r--r--arch/ppc64/mm/hash_low.S2
-rw-r--r--arch/ppc64/mm/hugetlbpage.c6
-rw-r--r--arch/ppc64/mm/init.c28
-rw-r--r--arch/ppc64/mm/numa.c43
-rw-r--r--arch/ppc64/mm/slb.c4
-rw-r--r--arch/ppc64/mm/slb_low.S2
7 files changed, 61 insertions, 29 deletions
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
index 20b0f37e8bf..772f0714a5b 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/ppc64/mm/fault.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -84,8 +85,8 @@ static int store_updates_sp(struct pt_regs *regs)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index 35eb49e1b89..ee5a5d36bfa 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -16,7 +16,7 @@
#include <asm/page.h>
#include <asm/types.h>
#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
#include <asm/cputable.h>
.text
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index e7833c80eb6..338771ec70d 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
for (i = 0; i < NUM_LOW_AREAS; i++) {
if (! (areas & (1U << i)))
continue;
- asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
+ asm volatile("slbie %0"
+ : : "r" ((i << SID_SHIFT) | SLBIE_C));
}
asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
continue;
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
asm volatile("slbie %0"
- :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
+ :: "r" (((i << HTLB_AREA_SHIFT)
+ + (j << SID_SHIFT)) | SLBIE_C));
}
asm volatile("isync" : : : "memory");
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index c02dc9809ca..c2157c9c3ac 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -392,6 +392,7 @@ void free_initmem(void)
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
+ memset((void *)addr, 0xcc, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
@@ -552,27 +553,18 @@ void __init do_init_bootmem(void)
/* Add all physical memory to the bootmem map, mark each area
* present.
*/
- for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long base, size;
- unsigned long start_pfn, end_pfn;
-
- base = lmb.memory.region[i].base;
- size = lmb.memory.region[i].size;
-
- start_pfn = base >> PAGE_SHIFT;
- end_pfn = start_pfn + (size >> PAGE_SHIFT);
- memory_present(0, start_pfn, end_pfn);
-
- free_bootmem(base, size);
- }
+ for (i=0; i < lmb.memory.cnt; i++)
+ free_bootmem(lmb.memory.region[i].base,
+ lmb_size_bytes(&lmb.memory, i));
/* reserve the sections we're already using */
- for (i=0; i < lmb.reserved.cnt; i++) {
- unsigned long base = lmb.reserved.region[i].base;
- unsigned long size = lmb.reserved.region[i].size;
+ for (i=0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i));
- reserve_bootmem(base, size);
- }
+ for (i=0; i < lmb.memory.cnt; i++)
+ memory_present(0, lmb_start_pfn(&lmb.memory, i),
+ lmb_end_pfn(&lmb.memory, i));
}
/*
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index c3116f0d788..cb864b8f275 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,8 +440,6 @@ new_range:
for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
numa_domain;
- memory_present(numa_domain, start >> PAGE_SHIFT,
- (start + size) >> PAGE_SHIFT);
if (--ranges)
goto new_range;
@@ -483,7 +481,6 @@ static void __init setup_nonnuma(void)
for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
- memory_present(0, 0, init_node_data[0].node_end_pfn);
}
static void __init dump_numa_topology(void)
@@ -695,6 +692,46 @@ new_range:
size);
}
}
+ /*
+ * This loop may look famaliar, but we have to do it again
+ * after marking our reserved memory to mark memory present
+ * for sparsemem.
+ */
+ addr_cells = get_mem_addr_cells();
+ size_cells = get_mem_size_cells();
+ memory = NULL;
+ while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
+ unsigned long mem_start, mem_size;
+ int numa_domain, ranges;
+ unsigned int *memcell_buf;
+ unsigned int len;
+
+ memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
+ if (!memcell_buf || len <= 0)
+ continue;
+
+ ranges = memory->n_addrs; /* ranges in cell */
+new_range2:
+ mem_start = read_n_cells(addr_cells, &memcell_buf);
+ mem_size = read_n_cells(size_cells, &memcell_buf);
+ if (numa_enabled) {
+ numa_domain = of_node_numa_domain(memory);
+ if (numa_domain >= MAX_NUMNODES)
+ numa_domain = 0;
+ } else
+ numa_domain = 0;
+
+ if (numa_domain != nid)
+ continue;
+
+ mem_size = numa_enforce_memory_limit(mem_start, mem_size);
+ memory_present(numa_domain, mem_start >> PAGE_SHIFT,
+ (mem_start + mem_size) >> PAGE_SHIFT);
+
+ if (--ranges) /* process all ranges in cell */
+ goto new_range2;
+ }
+
}
}
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
index 244150a0bc1..0473953f6a3 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/ppc64/mm/slb.c
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
- esid_data = (unsigned long)get_paca()->slb_cache[i]
- << SID_SHIFT;
+ esid_data = ((unsigned long)get_paca()->slb_cache[i]
+ << SID_SHIFT) | SLBIE_C;
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
index 698d6b9ed6d..a3a03da503b 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/ppc64/mm/slb_low.S
@@ -21,7 +21,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
#include <asm/cputable.h>
/* void slb_allocate(unsigned long ea);