aboutsummaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2466d125023..57361708d1a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
tlb->mm = mm;
tlb->fullmm = fullmm;
+ tlb->start = -1UL;
+ tlb->end = 0;
tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{
struct mmu_gather_batch *batch, *next;
+ tlb->start = start;
+ tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -1204,6 +1208,11 @@ again:
*/
if (force_flush) {
force_flush = 0;
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+ tlb->start = addr;
+ tlb->end = end;
+#endif
tlb_flush_mmu(tlb);
if (addr != end)
goto again;
@@ -1334,8 +1343,11 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
- if (vma->vm_file)
- unmap_hugepage_range(vma, start, end, NULL);
+ if (vma->vm_file) {
+ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
} else
unmap_page_range(tlb, vma, start, end, details);
}
@@ -2638,6 +2650,9 @@ reuse:
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
set_page_dirty_balance(dirty_page, page_mkwrite);
+ /* file_update_time outside page_lock */
+ if (vma->vm_file)
+ file_update_time(vma->vm_file);
}
put_page(dirty_page);
if (page_mkwrite) {
@@ -2655,10 +2670,6 @@ reuse:
}
}
- /* file_update_time outside page_lock */
- if (vma->vm_file)
- file_update_time(vma->vm_file);
-
return ret;
}
@@ -3327,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (dirty_page) {
struct address_space *mapping = page->mapping;
+ int dirtied = 0;
if (set_page_dirty(dirty_page))
- page_mkwrite = 1;
+ dirtied = 1;
unlock_page(dirty_page);
put_page(dirty_page);
- if (page_mkwrite && mapping) {
+ if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
@@ -3341,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
/* file_update_time outside page_lock */
- if (vma->vm_file)
+ if (vma->vm_file && !page_mkwrite)
file_update_time(vma->vm_file);
} else {
unlock_page(vmf.page);
@@ -3929,7 +3941,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
free_page((unsigned long)buf);
}
}
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
}
#ifdef CONFIG_PROVE_LOCKING