diff options
author | Hugh Dickins <hughd@google.com> | 2014-06-23 13:22:07 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-07-09 10:51:22 -0700 |
commit | 50a28baa75d832d8f74e1f6bf7ca247eb92b77be (patch) | |
tree | 47e17bde702b33a4ee93d3eff00315f4501bce35 | |
parent | 2bcdd4933ff4dc46445dcae93cb37c648283b782 (diff) |
mm: fix crashes from mbind() merging vmas
commit d05f0cdcbe6388723f1900c549b4850360545201 upstream.
In v2.6.34 commit 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
introduced vma merging to mbind(), but it should have also changed the
convention of passing start vma from queue_pages_range() (formerly
check_range()) to new_vma_page(): vma merging may have already freed
that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
worse crashes.
Fixes: 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | mm/mempolicy.c | 53 |
1 files changed, 24 insertions, 29 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 82f1b027ba1..ee50c256fdc 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -566,24 +566,24 @@ static inline int check_pgd_range(struct vm_area_struct *vma, * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ -static struct vm_area_struct * +static int check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { - int err; - struct vm_area_struct *first, *vma, *prev; + int err = 0; + struct vm_area_struct *vma, *prev; - first = find_vma(mm, start); - if (!first) - return ERR_PTR(-EFAULT); + vma = find_vma(mm, start); + if (!vma) + return -EFAULT; prev = NULL; - for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { + for (; vma && vma->vm_start < end; vma = vma->vm_next) { if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) - return ERR_PTR(-EFAULT); + return -EFAULT; if (prev && prev->vm_end < vma->vm_start) - return ERR_PTR(-EFAULT); + return -EFAULT; } if (!is_vm_hugetlb_page(vma) && ((flags & MPOL_MF_STRICT) || @@ -597,14 +597,12 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, start = vma->vm_start; err = check_pgd_range(vma, start, endvma, nodes, flags, private); - if (err) { - first = ERR_PTR(err); + if (err) break; - } } prev = vma; } - return first; + return err; } /* @@ -945,16 +943,15 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, { nodemask_t nmask; LIST_HEAD(pagelist); - int err = 0; - struct vm_area_struct *vma; + int err; nodes_clear(nmask); node_set(source, nmask); - vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, + err = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (err) + return err; if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, new_node_page, dest, @@ -1058,16 +1055,17 @@ out: /* * Allocate a new page for page migration based on vma policy. - * Start assuming that page is mapped by vma pointed to by @private. + * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { - struct vm_area_struct *vma = (struct vm_area_struct *)private; + struct vm_area_struct *vma; unsigned long uninitialized_var(address); + vma = find_vma(current->mm, start); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) @@ -1093,7 +1091,7 @@ int do_migrate_pages(struct mm_struct *mm, return -ENOSYS; } -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { return NULL; } @@ -1103,7 +1101,6 @@ static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { - struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; @@ -1167,19 +1164,17 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; - vma = check_range(mm, start, end, nmask, + err = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - err = PTR_ERR(vma); - if (!IS_ERR(vma)) { + if (!err) { int nr_failed = 0; err = mbind_range(mm, start, end, new); if (!list_empty(&pagelist)) { - nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma, - false, true); + nr_failed = migrate_pages(&pagelist, new_page, + start, false, true); if (nr_failed) putback_lru_pages(&pagelist); } |