aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorCyrill Gorcunov <gorcunov@gmail.com>2013-10-16 13:46:51 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-11-04 04:35:20 -0800
commit72d2ddf5f2218861f41b7d1a9da4c130d6d81911 (patch)
tree7e92030ef3dcdfa94eddfcd74370a4ae251f39b1 /mm
parentc0329006f4b3c7ab4dde75aa003de50982a44b91 (diff)
mm: migration: do not lose soft dirty bit if page is in migration state
commit c3d16e16522fe3fe8759735850a0676da18f4b1d upstream. If page migration is turned on in config and the page is migrating, we may lose the soft dirty bit. If fork and mprotect are called on migrating pages (once migration is complete) pages do not obtain the soft dirty bit in the correspond pte entries. Fix it adding an appropriate test on swap entries. Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Matt Mackall <mpm@selenic.com> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mprotect.c7
3 files changed, 9 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index af84bc0ec17..440986e5721 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -861,6 +861,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*src_pte))
+ pte = pte_swp_mksoft_dirty(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 25ca7caf909..81af4e67810 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -157,6 +157,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*ptep))
+ pte = pte_mksoft_dirty(pte);
if (is_write_migration_entry(entry))
pte = pte_mkwrite(pte);
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 94722a4d6b4..a3af058f68e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
+ pte_t newpte;
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
- set_pte_at(mm, addr, pte,
- swp_entry_to_pte(entry));
+ newpte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(oldpte))
+ newpte = pte_swp_mksoft_dirty(newpte);
+ set_pte_at(mm, addr, pte, newpte);
}
pages++;
}