aboutsummaryrefslogtreecommitdiff
path: root/mm/fremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c320
1 files changed, 175 insertions, 145 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index d862be3bc3e..72b8fa36143 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -5,7 +5,8 @@
*
* started by Ingo Molnar, Copyright (C) 2002, 2003
*/
-
+#include <linux/export.h>
+#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/file.h>
@@ -13,171 +14,146 @@
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/rmap.h>
-#include <linux/module.h>
#include <linux/syscalls.h>
+#include <linux/mmu_notifier.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+#include "internal.h"
+
+static int mm_counter(struct page *page)
+{
+ return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
+}
+
+static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
- struct page *page = NULL;
+ struct page *page;
+ swp_entry_t entry;
if (pte_present(pte)) {
- unsigned long pfn = pte_pfn(pte);
- flush_cache_page(vma, addr, pfn);
+ flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, pte, addr);
- goto out;
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ update_hiwater_rss(mm);
+ dec_mm_counter(mm, mm_counter(page));
+ page_remove_rmap(page);
+ page_cache_release(page);
+ }
+ } else { /* zap_pte() is not called when pte_none() */
+ if (!pte_file(pte)) {
+ update_hiwater_rss(mm);
+ entry = pte_to_swp_entry(pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+ dec_mm_counter(mm, mm_counter(page));
+ }
+ } else {
+ free_swap_and_cache(entry);
+ dec_mm_counter(mm, MM_SWAPENTS);
+ }
}
- page = pfn_to_page(pfn);
- if (pte_dirty(pte))
- set_page_dirty(page);
- page_remove_rmap(page);
- page_cache_release(page);
- } else {
- if (!pte_file(pte))
- free_swap_and_cache(pte_to_swp_entry(pte));
- pte_clear(mm, addr, ptep);
+ pte_clear_not_present_full(mm, addr, ptep, 0);
}
-out:
- return !!page;
}
/*
- * Install a file page to a given virtual memory address, release any
+ * Install a file pte to a given virtual memory address, release any
* previously existing mapping.
*/
-int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, struct page *page, pgprot_t prot)
+static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
- struct inode *inode;
- pgoff_t size;
int err = -ENOMEM;
- pte_t *pte;
- pmd_t *pmd;
- pud_t *pud;
- pgd_t *pgd;
- pte_t pte_val;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
- BUG_ON(vma->vm_flags & VM_RESERVED);
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- goto out;
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- goto out;
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
+ ptfile = pgoff_to_pte(pgoff);
+
+ if (!pte_none(*pte))
+ zap_pte(mm, vma, addr, pte);
+
+ set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
/*
- * This page may have been truncated. Tell the
- * caller about it.
+ * We don't need to run update_mmu_cache() here because the "file pte"
+ * being installed by install_file_pte() is not a real pte - it's a
+ * non-present entry (like a swap entry), noting what file offset should
+ * be mapped there when there's a fault (in a non-linear vma where
+ * that's not obvious).
*/
- err = -EINVAL;
- inode = vma->vm_file->f_mapping->host;
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (!page->mapping || page->index >= size)
- goto unlock;
- err = -ENOMEM;
- if (page_mapcount(page) > INT_MAX/2)
- goto unlock;
-
- if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
- inc_mm_counter(mm, file_rss);
-
- flush_icache_page(vma, page);
- set_pte_at(mm, addr, pte, mk_pte(page, prot));
- page_add_file_rmap(page);
- pte_val = *pte;
- update_mmu_cache(vma, addr, pte_val);
- err = 0;
-unlock:
pte_unmap_unlock(pte, ptl);
+ err = 0;
out:
return err;
}
-EXPORT_SYMBOL(install_page);
-/*
- * Install a file pte to a given virtual memory address, release any
- * previously existing mapping.
- */
-int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, unsigned long pgoff, pgprot_t prot)
+int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long size, pgoff_t pgoff)
{
- int err = -ENOMEM;
- pte_t *pte;
- pmd_t *pmd;
- pud_t *pud;
- pgd_t *pgd;
- pte_t pte_val;
- spinlock_t *ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ int err;
- BUG_ON(vma->vm_flags & VM_RESERVED);
+ do {
+ err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
+ if (err)
+ return err;
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- goto out;
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- goto out;
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
- goto out;
+ size -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pgoff++;
+ } while (size);
- if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
- update_hiwater_rss(mm);
- dec_mm_counter(mm, file_rss);
- }
-
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
- pte_val = *pte;
- update_mmu_cache(vma, addr, pte_val);
- pte_unmap_unlock(pte, ptl);
- err = 0;
-out:
- return err;
+ return 0;
}
+EXPORT_SYMBOL(generic_file_remap_pages);
-/***
- * sys_remap_file_pages - remap arbitrary pages of a shared backing store
- * file within an existing vma.
+/**
+ * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
* @start: start of the remapped virtual memory range
* @size: size of the remapped virtual memory range
- * @prot: new protection bits of the range
- * @pgoff: to be mapped page of the backing store file
+ * @prot: new protection bits of the range (see NOTE)
+ * @pgoff: to-be-mapped page of the backing store file
* @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
*
- * this syscall works purely via pagetables, so it's the most efficient
+ * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
+ * (shared backing store file).
+ *
+ * This syscall works purely via pagetables, so it's the most efficient
* way to map the same (large) file into a given virtual window. Unlike
* mmap()/mremap() it does not create any new vmas. The new mappings are
* also safe across swapout.
*
- * NOTE: the 'prot' parameter right now is ignored, and the vma's default
- * protection is used. Arbitrary protections might be implemented in the
- * future.
+ * NOTE: the @prot parameter right now is ignored (but must be zero),
+ * and the vma's default protection is used. Arbitrary protections
+ * might be implemented in the future.
*/
-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
- unsigned long __prot, unsigned long pgoff, unsigned long flags)
+SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct address_space *mapping;
- unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
int has_write_lock = 0;
+ vm_flags_t vm_flags = 0;
+
+ pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
+ "See Documentation/vm/remap_file_pages.txt.\n",
+ current->comm, current->pid);
- if (__prot)
+ if (prot)
return err;
/*
* Sanitize the syscall parameters:
@@ -189,6 +165,10 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
if (start + size <= start)
return err;
+ /* Does pgoff wrap? */
+ if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+ return err;
+
/* Can we represent this offset inside this architecture's pte's? */
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -203,51 +183,101 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
/*
* Make sure the vma is shared, that it supports prefaulting,
* and that the remapped range is valid and fully within
- * the single existing vma. vm_private_data is used as a
- * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
- * or VM_LOCKED, but VM_LOCKED could be revoked later on).
+ * the single existing vma.
*/
- if (vma && (vma->vm_flags & VM_SHARED) &&
- (!vma->vm_private_data ||
- (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
- vma->vm_ops && vma->vm_ops->populate &&
- end > start && start >= vma->vm_start &&
- end <= vma->vm_end) {
-
- /* Must set VM_NONLINEAR before any pages are populated. */
- if (pgoff != linear_page_index(vma, start) &&
- !(vma->vm_flags & VM_NONLINEAR)) {
- if (!has_write_lock) {
- up_read(&mm->mmap_sem);
- down_write(&mm->mmap_sem);
- has_write_lock = 1;
- goto retry;
- }
- mapping = vma->vm_file->f_mapping;
- spin_lock(&mapping->i_mmap_lock);
- flush_dcache_mmap_lock(mapping);
- vma->vm_flags |= VM_NONLINEAR;
- vma_prio_tree_remove(vma, &mapping->i_mmap);
- vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
- flush_dcache_mmap_unlock(mapping);
- spin_unlock(&mapping->i_mmap_lock);
+ if (!vma || !(vma->vm_flags & VM_SHARED))
+ goto out;
+
+ if (!vma->vm_ops || !vma->vm_ops->remap_pages)
+ goto out;
+
+ if (start < vma->vm_start || start + size > vma->vm_end)
+ goto out;
+
+ /* Must set VM_NONLINEAR before any pages are populated. */
+ if (!(vma->vm_flags & VM_NONLINEAR)) {
+ /*
+ * vm_private_data is used as a swapout cursor
+ * in a VM_NONLINEAR vma.
+ */
+ if (vma->vm_private_data)
+ goto out;
+
+ /* Don't need a nonlinear mapping, exit success */
+ if (pgoff == linear_page_index(vma, start)) {
+ err = 0;
+ goto out;
+ }
+
+ if (!has_write_lock) {
+get_write_lock:
+ up_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ has_write_lock = 1;
+ goto retry;
}
+ mapping = vma->vm_file->f_mapping;
+ /*
+ * page_mkclean doesn't work on nonlinear vmas, so if
+ * dirty pages need to be accounted, emulate with linear
+ * vmas.
+ */
+ if (mapping_cap_account_dirty(mapping)) {
+ unsigned long addr;
+ struct file *file = get_file(vma->vm_file);
+ /* mmap_region may free vma; grab the info now */
+ vm_flags = vma->vm_flags;
- err = vma->vm_ops->populate(vma, start, size,
- vma->vm_page_prot,
- pgoff, flags & MAP_NONBLOCK);
+ addr = mmap_region(file, start, size, vm_flags, pgoff);
+ fput(file);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+ } else {
+ BUG_ON(addr != start);
+ err = 0;
+ }
+ goto out_freed;
+ }
+ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+ vma->vm_flags |= VM_NONLINEAR;
+ vma_interval_tree_remove(vma, &mapping->i_mmap);
+ vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+ flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+ if (vma->vm_flags & VM_LOCKED) {
/*
- * We can't clear VM_NONLINEAR because we'd have to do
- * it after ->populate completes, and that would prevent
- * downgrading the lock. (Locks can't be upgraded).
+ * drop PG_Mlocked flag for over-mapped range
*/
+ if (!has_write_lock)
+ goto get_write_lock;
+ vm_flags = vma->vm_flags;
+ munlock_vma_pages_range(vma, start, start + size);
+ vma->vm_flags = vm_flags;
}
+
+ mmu_notifier_invalidate_range_start(mm, start, start + size);
+ err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
+ mmu_notifier_invalidate_range_end(mm, start, start + size);
+
+ /*
+ * We can't clear VM_NONLINEAR because we'd have to do
+ * it after ->populate completes, and that would prevent
+ * downgrading the lock. (Locks can't be upgraded).
+ */
+
+out:
+ if (vma)
+ vm_flags = vma->vm_flags;
+out_freed:
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
up_write(&mm->mmap_sem);
+ if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
+ mm_populate(start, size);
return err;
}
-