diff options
author | Mel Gorman <mgorman@suse.de> | 2014-01-07 14:00:45 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-09 12:25:14 -0800 |
commit | fd5df8002f7668ac04a53b1b6d75296ad191a136 (patch) | |
tree | b5e1de83ba1d5e14e1f57ddeb79e5da3ec2d6bee /mm | |
parent | 3d792d616ba408ab55a54c1bb75a9367d997acfa (diff) |
mm: numa: avoid unnecessary disruption of NUMA hinting during migration
commit de466bd628e8d663fdf3f791bc8db318ee85c714 upstream.
do_huge_pmd_numa_page() handles the case where there is parallel THP
migration. However, by the time it is checked the NUMA hinting
information has already been disrupted. This patch adds an earlier
check with some helpers.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 22 | ||||
-rw-r--r-- | mm/migrate.c | 12 |
2 files changed, 28 insertions, 6 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40f4ceb979f..47962456ed8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ret = 0; goto out_unlock; } + + /* mmap_sem prevents this happening but warn if that changes */ + WARN_ON(pmd_trans_migrating(pmd)); + if (unlikely(pmd_trans_splitting(pmd))) { /* split huge page running from under us */ spin_unlock(&src_mm->page_table_lock); @@ -1294,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!pmd_same(pmd, *pmdp))) goto out_unlock; + /* + * If there are potential migrations, wait for completion and retry + * without disrupting NUMA hinting information. Do not relock and + * check_same as the page may no longer be mapped. + */ + if (unlikely(pmd_trans_migrating(*pmdp))) { + spin_unlock(&mm->page_table_lock); + wait_migrate_huge_page(vma->anon_vma, pmdp); + goto out; + } + page = pmd_page(pmd); page_nid = page_to_nid(page); count_vm_numa_event(NUMA_HINT_FAULTS); @@ -1312,12 +1327,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto clear_pmdnuma; } - /* - * If there are potential migrations, wait for completion and retry. We - * do not relock and check_same as the page may no longer be mapped. - * Furtermore, even if the page is currently misplaced, there is no - * guarantee it is still misplaced after the migration completes. - */ + /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { spin_unlock(&mm->page_table_lock); wait_on_page_locked(page); diff --git a/mm/migrate.c b/mm/migrate.c index e33f0d11634..6609413b742 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1597,6 +1597,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) return 1; } +bool pmd_trans_migrating(pmd_t pmd) +{ + struct page *page = pmd_page(pmd); + return PageLocked(page); +} + +void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ + struct page *page = pmd_page(*pmd); + wait_on_page_locked(page); +} + /* * Attempt to migrate a misplaced page to the specified destination * node. Caller is expected to have an elevated reference count on |