diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-30 23:00:30 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-01-06 15:05:22 -0800 |
commit | 8ac9e802007e99534ec66af850ef7b099df27499 (patch) | |
tree | fde720a1f13b82f58f10edd51d583eb293e305e6 /mm | |
parent | b2ea8cb9c8f1937cb80b9beb50548a05bfc37819 (diff) |
ksm: fix mlockfreed to munlocked
2.6.33-rc1 commit 73848b4684e84a84cfd1555af78d41158f31e16b, adjusted
to include 31e855ea7173bdb0520f9684580423a9560f66e0's movement of
the unlock_page(oldpage), but omit other intervening cleanups.
When KSM merges an mlocked page, it has been forgetting to munlock it:
that's been left to free_page_mlock(), which reports it in /proc/vmstat
as unevictable_pgs_mlockfreed instead of unevictable_pgs_munlocked,
which indicates that such pages _might_ be left unevictable for long
after they should be evictable. Call munlock_vma_page() to fix that.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/ksm.c | 14 | ||||
-rw-r--r-- | mm/mlock.c | 4 |
3 files changed, 11 insertions, 10 deletions
diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb..17bc0df273b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -107,9 +107,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) } /* - * must be called with vma's mmap_sem held for read, and page locked. + * must be called with vma's mmap_sem held for read or write, and page locked. */ extern void mlock_vma_page(struct page *page); +extern void munlock_vma_page(struct page *page); /* * Clear the page's PageMlocked(). This can be useful in a situation where @@ -34,6 +34,7 @@ #include <linux/ksm.h> #include <asm/tlbflush.h> +#include "internal.h" /* * A few notes about the KSM scanning process, @@ -767,15 +768,14 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, oldpage, &orig_pte)) { - unlock_page(oldpage); - goto out_putpage; - } - unlock_page(oldpage); - - if (pages_identical(oldpage, newpage)) + if (write_protect_page(vma, oldpage, &orig_pte) == 0 && + pages_identical(oldpage, newpage)) err = replace_page(vma, oldpage, newpage, orig_pte); + if ((vma->vm_flags & VM_LOCKED) && !err) + munlock_vma_page(oldpage); + + unlock_page(oldpage); out_putpage: put_page(oldpage); put_page(newpage); diff --git a/mm/mlock.c b/mm/mlock.c index bd6f0e466f6..2e05c97b04f 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page) * not get another chance to clear PageMlocked. If we successfully * isolate the page and try_to_munlock() detects other VM_LOCKED vmas * mapping the page, it will restore the PageMlocked state, unless the page - * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), + * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(), * perhaps redundantly. * If we lose the isolation race, and the page is mapped by other VM_LOCKED * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() * either of which will restore the PageMlocked state by calling * mlock_vma_page() above, if it can grab the vma's mmap sem. */ -static void munlock_vma_page(struct page *page) +void munlock_vma_page(struct page *page) { BUG_ON(!PageLocked(page)); |