aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/migrate.h10
-rw-r--r--mm/huge_memory.c22
-rw-r--r--mm/migrate.c12
3 files changed, 37 insertions, 7 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 8d3c57fdf22..804651c6229 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -90,10 +90,18 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page, int node);
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page, int node);
extern bool migrate_ratelimited(int node);
#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+}
static inline int migrate_misplaced_page(struct page *page, int node)
{
return -EAGAIN; /* can't migrate now */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40f4ceb979f..47962456ed8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = 0;
goto out_unlock;
}
+
+ /* mmap_sem prevents this happening but warn if that changes */
+ WARN_ON(pmd_trans_migrating(pmd));
+
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(&src_mm->page_table_lock);
@@ -1294,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
+ /*
+ * If there are potential migrations, wait for completion and retry
+ * without disrupting NUMA hinting information. Do not relock and
+ * check_same as the page may no longer be mapped.
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ spin_unlock(&mm->page_table_lock);
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
+ goto out;
+ }
+
page = pmd_page(pmd);
page_nid = page_to_nid(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
@@ -1312,12 +1327,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto clear_pmdnuma;
}
- /*
- * If there are potential migrations, wait for completion and retry. We
- * do not relock and check_same as the page may no longer be mapped.
- * Furtermore, even if the page is currently misplaced, there is no
- * guarantee it is still misplaced after the migration completes.
- */
+ /* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
spin_unlock(&mm->page_table_lock);
wait_on_page_locked(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index e33f0d11634..6609413b742 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1597,6 +1597,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
+bool pmd_trans_migrating(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+ struct page *page = pmd_page(*pmd);
+ wait_on_page_locked(page);
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on