aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/subpage-prot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/subpage-prot.c')
-rw-r--r--arch/powerpc/mm/subpage-prot.c60
1 files changed, 55 insertions, 5 deletions
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e4f8f1fc81a..6c0b1f5f8d2 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
- pte_update(mm, addr, pte, 0, 0);
+ pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}
@@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
struct mm_struct *mm = current->mm;
struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
- int i, nw;
+ unsigned long i;
+ size_t nw;
unsigned long next, limit;
down_write(&mm->mmap_sem);
@@ -104,7 +105,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
limit = spt->maxaddr;
for (; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit);
- if (addr < 0x100000000) {
+ if (addr < 0x100000000UL) {
spm = spt->low_prot;
} else {
spm = spt->protptrs[addr >> SBP_L3_SHIFT];
@@ -129,6 +130,53 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
up_write(&mm->mmap_sem);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->private;
+ split_huge_page_pmd(vma, addr, pmd);
+ return 0;
+}
+
+static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
+ unsigned long len)
+{
+ struct vm_area_struct *vma;
+ struct mm_walk subpage_proto_walk = {
+ .mm = mm,
+ .pmd_entry = subpage_walk_pmd_entry,
+ };
+
+ /*
+ * We don't try too hard, we just mark all the vma in that range
+ * VM_NOHUGEPAGE and split them.
+ */
+ vma = find_vma(mm, addr);
+ /*
+ * If the range is in unmapped range, just return
+ */
+ if (vma && ((addr + len) <= vma->vm_start))
+ return;
+
+ while (vma) {
+ if (vma->vm_start >= (addr + len))
+ break;
+ vma->vm_flags |= VM_NOHUGEPAGE;
+ subpage_proto_walk.private = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &subpage_proto_walk);
+ vma = vma->vm_next;
+ }
+}
+#else
+static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
+ unsigned long len)
+{
+ return;
+}
+#endif
+
/*
* Copy in a subpage protection map for an address range.
* The map has 2 bits per 4k subpage, so 32 bits per 64k page.
@@ -144,7 +192,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
struct mm_struct *mm = current->mm;
struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
- int i, nw;
+ unsigned long i;
+ size_t nw;
unsigned long next, limit;
int err;
@@ -166,10 +215,11 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
return -EFAULT;
down_write(&mm->mmap_sem);
+ subpage_mark_vma_nohuge(mm, addr, len);
for (limit = addr + len; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit);
err = -ENOMEM;
- if (addr < 0x100000000) {
+ if (addr < 0x100000000UL) {
spm = spt->low_prot;
} else {
spm = spt->protptrs[addr >> SBP_L3_SHIFT];