diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 3 | ||||
-rw-r--r-- | mm/Makefile | 3 | ||||
-rw-r--r-- | mm/backing-dev.c | 5 | ||||
-rw-r--r-- | mm/compaction.c | 26 | ||||
-rw-r--r-- | mm/debug-pagealloc.c | 56 | ||||
-rw-r--r-- | mm/highmem.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 54 | ||||
-rw-r--r-- | mm/ksm.c | 3 | ||||
-rw-r--r-- | mm/memblock.c | 11 | ||||
-rw-r--r-- | mm/memcontrol.c | 3 | ||||
-rw-r--r-- | mm/memory-failure.c | 12 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 75 | ||||
-rw-r--r-- | mm/mlock.c | 13 | ||||
-rw-r--r-- | mm/mmap.c | 9 | ||||
-rw-r--r-- | mm/mremap.c | 42 | ||||
-rw-r--r-- | mm/oom_kill.c | 53 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 17 | ||||
-rw-r--r-- | mm/process_vm_access.c | 496 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 47 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/thrash.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 80 | ||||
-rw-r--r-- | mm/vmscan.c | 325 | ||||
-rw-r--r-- | mm/vmstat.c | 3 |
28 files changed, 981 insertions, 375 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index f2f1ca19ed5..011b110365c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -131,6 +131,9 @@ config SPARSEMEM_VMEMMAP config HAVE_MEMBLOCK boolean +config NO_BOOTMEM + boolean + # eventually, we can have this option just 'select SPARSEMEM' config MEMORY_HOTPLUG bool "Allow for memory hot-add" diff --git a/mm/Makefile b/mm/Makefile index 836e4163c1b..50ec00ef2a0 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -5,7 +5,8 @@ mmu-y := nommu.o mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ - vmalloc.o pagewalk.o pgtable-generic.o + vmalloc.o pagewalk.o pgtable-generic.o \ + process_vm_access.o obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index a87da524a4a..7520ef0bfd4 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -404,9 +404,8 @@ static int bdi_forker_thread(void *ptr) /* * In the following loop we are going to check whether we have * some work to do without any synchronization with tasks - * waking us up to do work for them. So we have to set task - * state already here so that we don't miss wakeups coming - * after we verify some condition. + * waking us up to do work for them. Set the task state here + * so that we don't miss wakeups after verifying conditions. */ set_current_state(TASK_INTERRUPTIBLE); diff --git a/mm/compaction.c b/mm/compaction.c index 6cc604bd564..899d9563858 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -35,10 +35,6 @@ struct compact_control { unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ - /* Account for isolated anon and file pages */ - unsigned long nr_anon; - unsigned long nr_file; - unsigned int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; @@ -223,17 +219,13 @@ static void isolate_freepages(struct zone *zone, static void acct_isolated(struct zone *zone, struct compact_control *cc) { struct page *page; - unsigned int count[NR_LRU_LISTS] = { 0, }; + unsigned int count[2] = { 0, }; - list_for_each_entry(page, &cc->migratepages, lru) { - int lru = page_lru_base_type(page); - count[lru]++; - } + list_for_each_entry(page, &cc->migratepages, lru) + count[!!page_is_file_cache(page)]++; - cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; - cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; - __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon); - __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file); + __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); } /* Similar to reclaim, but different enough that they don't share logic */ @@ -269,6 +261,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; + isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; /* Do not scan outside zone boundaries */ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); @@ -356,8 +349,11 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, continue; } + if (!cc->sync) + mode |= ISOLATE_CLEAN; + /* Try isolate the page */ - if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0) + if (__isolate_lru_page(page, mode, 0) != 0) continue; VM_BUG_ON(PageTransCompound(page)); @@ -586,7 +582,7 @@ out: return ret; } -unsigned long compact_zone_order(struct zone *zone, +static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, bool sync) { diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index a1e3324de2b..7cea557407f 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -1,7 +1,10 @@ #include <linux/kernel.h> +#include <linux/string.h> #include <linux/mm.h> +#include <linux/highmem.h> #include <linux/page-debug-flags.h> #include <linux/poison.h> +#include <linux/ratelimit.h> static inline void set_page_poison(struct page *page) { @@ -18,28 +21,13 @@ static inline bool page_poison(struct page *page) return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); } -static void poison_highpage(struct page *page) -{ - /* - * Page poisoning for highmem pages is not implemented. - * - * This can be called from interrupt contexts. - * So we need to create a new kmap_atomic slot for this - * application and it will need interrupt protection. - */ -} - static void poison_page(struct page *page) { - void *addr; + void *addr = kmap_atomic(page); - if (PageHighMem(page)) { - poison_highpage(page); - return; - } set_page_poison(page); - addr = page_address(page); memset(addr, PAGE_POISON, PAGE_SIZE); + kunmap_atomic(addr); } static void poison_pages(struct page *page, int n) @@ -59,14 +47,12 @@ static bool single_bit_flip(unsigned char a, unsigned char b) static void check_poison_mem(unsigned char *mem, size_t bytes) { + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); unsigned char *start; unsigned char *end; - for (start = mem; start < mem + bytes; start++) { - if (*start != PAGE_POISON) - break; - } - if (start == mem + bytes) + start = memchr_inv(mem, PAGE_POISON, bytes); + if (!start) return; for (end = mem + bytes - 1; end > start; end--) { @@ -74,7 +60,7 @@ static void check_poison_mem(unsigned char *mem, size_t bytes) break; } - if (!printk_ratelimit()) + if (!__ratelimit(&ratelimit)) return; else if (start == end && single_bit_flip(*start, PAGE_POISON)) printk(KERN_ERR "pagealloc: single bit error\n"); @@ -86,27 +72,17 @@ static void check_poison_mem(unsigned char *mem, size_t bytes) dump_stack(); } -static void unpoison_highpage(struct page *page) -{ - /* - * See comment in poison_highpage(). - * Highmem pages should not be poisoned for now - */ - BUG_ON(page_poison(page)); -} - static void unpoison_page(struct page *page) { - if (PageHighMem(page)) { - unpoison_highpage(page); + void *addr; + + if (!page_poison(page)) return; - } - if (page_poison(page)) { - void *addr = page_address(page); - check_poison_mem(addr, PAGE_SIZE); - clear_page_poison(page); - } + addr = kmap_atomic(page); + check_poison_mem(addr, PAGE_SIZE); + clear_page_poison(page); + kunmap_atomic(addr); } static void unpoison_pages(struct page *page, int n) diff --git a/mm/highmem.c b/mm/highmem.c index 5ef672c07f7..e159a7b1cc2 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -250,7 +250,7 @@ void *kmap_high_get(struct page *page) #endif /** - * kunmap_high - map a highmem page into memory + * kunmap_high - unmap a highmem page into memory * @page: &struct page to unmap * * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e2d1587be26..860ec211ddd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -89,7 +89,8 @@ struct khugepaged_scan { struct list_head mm_head; struct mm_slot *mm_slot; unsigned long address; -} khugepaged_scan = { +}; +static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; @@ -829,7 +830,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, for (i = 0; i < HPAGE_PMD_NR; i++) { copy_user_highpage(pages[i], page + i, - haddr + PAGE_SHIFT*i, vma); + haddr + PAGE_SIZE * i, vma); __SetPageUptodate(pages[i]); cond_resched(); } @@ -1052,6 +1053,51 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, return ret; } +int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, + unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd) +{ + int ret = 0; + pmd_t pmd; + + struct mm_struct *mm = vma->vm_mm; + + if ((old_addr & ~HPAGE_PMD_MASK) || + (new_addr & ~HPAGE_PMD_MASK) || + old_end - old_addr < HPAGE_PMD_SIZE || + (new_vma->vm_flags & VM_NOHUGEPAGE)) + goto out; + + /* + * The destination pmd shouldn't be established, free_pgtables() + * should have release it. + */ + if (WARN_ON(!pmd_none(*new_pmd))) { + VM_BUG_ON(pmd_trans_huge(*new_pmd)); + goto out; + } + + spin_lock(&mm->page_table_lock); + if (likely(pmd_trans_huge(*old_pmd))) { + if (pmd_trans_splitting(*old_pmd)) { + spin_unlock(&mm->page_table_lock); + wait_split_huge_page(vma->anon_vma, old_pmd); + ret = -1; + } else { + pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); + VM_BUG_ON(!pmd_none(*new_pmd)); + set_pmd_at(mm, new_addr, new_pmd, pmd); + spin_unlock(&mm->page_table_lock); + ret = 1; + } + } else { + spin_unlock(&mm->page_table_lock); + } +out: + return ret; +} + int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot) { @@ -1906,7 +1952,7 @@ static void collapse_huge_page(struct mm_struct *mm, BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address); set_pmd_at(mm, address, pmd, _pmd); - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, _pmd); prepare_pmd_huge_pte(pgtable, mm); mm->nr_ptes--; spin_unlock(&mm->page_table_lock); @@ -2024,6 +2070,8 @@ static void collect_mm_slot(struct mm_slot *mm_slot) static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) + __releases(&khugepaged_mm_lock) + __acquires(&khugepaged_mm_lock) { struct mm_slot *mm_slot; struct mm_struct *mm; @@ -1905,7 +1905,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); err = unmerge_and_remove_all_rmap_items(); - test_set_oom_score_adj(oom_score_adj); + compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, + oom_score_adj); if (err) { ksm_run = KSM_RUN_STOP; count = err; diff --git a/mm/memblock.c b/mm/memblock.c index ccbf9733959..84bec4969ed 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -58,7 +58,8 @@ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, p return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +static long __init_memblock memblock_overlaps_region(struct memblock_type *type, + phys_addr_t base, phys_addr_t size) { unsigned long i; @@ -267,7 +268,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) return 0; } -extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, +int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, phys_addr_t addr2, phys_addr_t size2) { return 1; @@ -626,6 +627,12 @@ phys_addr_t __init memblock_phys_mem_size(void) return memblock.memory_size; } +/* lowest address */ +phys_addr_t __init_memblock memblock_start_of_DRAM(void) +{ + return memblock.memory.regions[0].base; +} + phys_addr_t __init_memblock memblock_end_of_DRAM(void) { int idx = memblock.memory.cnt - 1; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3508777837c..2d5755544af 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1185,7 +1185,8 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, - int mode, struct zone *z, + isolate_mode_t mode, + struct zone *z, struct mem_cgroup *mem_cont, int active, int file) { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2b43ba051ac..edc388db730 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1310,7 +1310,7 @@ int unpoison_memory(unsigned long pfn) * to the end. */ if (PageHuge(page)) { - pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); + pr_info("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); return 0; } if (TestClearPageHWPoison(p)) @@ -1419,7 +1419,7 @@ static int soft_offline_huge_page(struct page *page, int flags) if (PageHWPoison(hpage)) { put_page(hpage); - pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn); + pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); return -EBUSY; } @@ -1433,8 +1433,8 @@ static int soft_offline_huge_page(struct page *page, int flags) list_for_each_entry_safe(page1, page2, &pagelist, lru) put_page(page1); - pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", - pfn, ret, page->flags); + pr_info("soft offline: %#lx: migration failed %d, type %lx\n", + pfn, ret, page->flags); if (ret > 0) ret = -EIO; return ret; @@ -1505,7 +1505,7 @@ int soft_offline_page(struct page *page, int flags) } if (!PageLRU(page)) { pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", - pfn, page->flags); + pfn, page->flags); return -EIO; } @@ -1566,7 +1566,7 @@ int soft_offline_page(struct page *page, int flags) } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", - pfn, ret, page_count(page), page->flags); + pfn, ret, page_count(page), page->flags); } if (ret) return ret; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9c51f9f58ca..cd237f47830 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -111,7 +111,7 @@ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ -struct mempolicy default_policy = { +static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, diff --git a/mm/migrate.c b/mm/migrate.c index 14d0a6a632f..33358f87811 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -621,38 +621,18 @@ static int move_to_new_page(struct page *newpage, struct page *page, return rc; } -/* - * Obtain the lock on page, remove all ptes and migrate the page - * to the newly allocated page in newpage. - */ -static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force, bool offlining, bool sync) +static int __unmap_and_move(struct page *page, struct page *newpage, + int force, bool offlining, bool sync) { - int rc = 0; - int *result = NULL; - struct page *newpage = get_new_page(page, private, &result); + int rc = -EAGAIN; int remap_swapcache = 1; int charge = 0; struct mem_cgroup *mem; struct anon_vma *anon_vma = NULL; - if (!newpage) - return -ENOMEM; - - if (page_count(page) == 1) { - /* page was freed from under us. So we are done. */ - goto move_newpage; - } - if (unlikely(PageTransHuge(page))) - if (unlikely(split_huge_page(page))) - goto move_newpage; - - /* prepare cgroup just returns 0 or -ENOMEM */ - rc = -EAGAIN; - if (!trylock_page(page)) { if (!force || !sync) - goto move_newpage; + goto out; /* * It's not safe for direct compaction to call lock_page. @@ -668,7 +648,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, * altogether. */ if (current->flags & PF_MEMALLOC) - goto move_newpage; + goto out; lock_page(page); } @@ -785,27 +765,52 @@ uncharge: mem_cgroup_end_migration(mem, page, newpage, rc == 0); unlock: unlock_page(page); +out: + return rc; +} -move_newpage: +/* + * Obtain the lock on page, remove all ptes and migrate the page + * to the newly allocated page in newpage. + */ +static int unmap_and_move(new_page_t get_new_page, unsigned long private, + struct page *page, int force, bool offlining, bool sync) +{ + int rc = 0; + int *result = NULL; + struct page *newpage = get_new_page(page, private, &result); + + if (!newpage) + return -ENOMEM; + + if (page_count(page) == 1) { + /* page was freed from under us. So we are done. */ + goto out; + } + + if (unlikely(PageTransHuge(page))) + if (unlikely(split_huge_page(page))) + goto out; + + rc = __unmap_and_move(page, newpage, force, offlining, sync); +out: if (rc != -EAGAIN) { - /* - * A page that has been migrated has all references - * removed and will be freed. A page that has not been - * migrated will have kepts its references and be - * restored. - */ - list_del(&page->lru); + /* + * A page that has been migrated has all references + * removed and will be freed. A page that has not been + * migrated will have kepts its references and be + * restored. + */ + list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); putback_lru_page(page); } - /* * Move the new page to the LRU. If migration was not successful * then this will free the page. */ putback_lru_page(newpage); - if (result) { if (rc) *result = rc; diff --git a/mm/mlock.c b/mm/mlock.c index 048260c4e02..bd34b3a1085 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -110,7 +110,15 @@ void munlock_vma_page(struct page *page) if (TestClearPageMlocked(page)) { dec_zone_page_state(page, NR_MLOCK); if (!isolate_lru_page(page)) { - int ret = try_to_munlock(page); + int ret = SWAP_AGAIN; + + /* + * Optimization: if the page was mapped just once, + * that's our mapping and we don't need to check all the + * other vmas. + */ + if (page_mapcount(page) > 1) + ret = try_to_munlock(page); /* * did try_to_unlock() succeed or punt? */ @@ -549,7 +557,8 @@ SYSCALL_DEFINE1(mlockall, int, flags) if (!can_do_mlock()) goto out; - lru_add_drain_all(); /* flush pagevec */ + if (flags & MCL_CURRENT) + lru_add_drain_all(); /* flush pagevec */ down_write(¤t->mm->mmap_sem); diff --git a/mm/mmap.c b/mm/mmap.c index a65efd4db3e..3c0061f744f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2558,7 +2558,6 @@ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; - int ret = -EINTR; BUG_ON(down_read_trylock(&mm->mmap_sem)); @@ -2579,13 +2578,11 @@ int mm_take_all_locks(struct mm_struct *mm) vm_lock_anon_vma(mm, avc->anon_vma); } - ret = 0; + return 0; out_unlock: - if (ret) - mm_drop_all_locks(mm); - - return ret; + mm_drop_all_locks(mm); + return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) diff --git a/mm/mremap.c b/mm/mremap.c index 506fa44403d..d6959cb4df5 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -41,8 +41,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) return NULL; pmd = pmd_offset(pud, addr); - split_huge_page_pmd(mm, pmd); - if (pmd_none_or_clear_bad(pmd)) + if (pmd_none(*pmd)) return NULL; return pmd; @@ -65,8 +64,6 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); - if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) - return NULL; return pmd; } @@ -80,11 +77,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; - unsigned long old_start; - old_start = old_addr; - mmu_notifier_invalidate_range_start(vma->vm_mm, - old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before @@ -111,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; - pte = ptep_clear_flush(vma, old_addr, old_pte); + pte = ptep_get_and_clear(mm, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); } @@ -123,7 +116,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); - mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } #define LATENCY_LIMIT (64 * PAGE_SIZE) @@ -134,22 +126,43 @@ unsigned long move_page_tables(struct vm_area_struct *vma, { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; + bool need_flush = false; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); + mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end); + for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK; - if (next - 1 > old_end) - next = old_end; + /* even if next overflowed, extent below will be ok */ extent = next - old_addr; + if (extent > old_end - old_addr) + extent = old_end - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; + if (pmd_trans_huge(*old_pmd)) { + int err = 0; + if (extent == HPAGE_PMD_SIZE) + err = move_huge_pmd(vma, new_vma, old_addr, + new_addr, old_end, + old_pmd, new_pmd); + if (err > 0) { + need_flush = true; + continue; + } else if (!err) { + split_huge_page_pmd(vma->vm_mm, old_pmd); + } + VM_BUG_ON(pmd_trans_huge(*old_pmd)); + } + if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, + new_pmd, new_addr)) + break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; @@ -157,7 +170,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); + need_flush = true; } + if (likely(need_flush)) + flush_tlb_range(vma, old_end-len, old_addr); + + mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end); return len + old_addr - old_end; /* how much done */ } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 626303b52f3..e916168b6e0 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -32,12 +32,32 @@ #include <linux/mempolicy.h> #include <linux/security.h> #include <linux/ptrace.h> +#include <linux/freezer.h> int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; static DEFINE_SPINLOCK(zone_scan_lock); +/* + * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj + * @old_val: old oom_score_adj for compare + * @new_val: new oom_score_adj for swap + * + * Sets the oom_score_adj value for current to @new_val iff its present value is + * @old_val. Usually used to reinstate a previous value to prevent racing with + * userspacing tuning the value in the interim. + */ +void compare_swap_oom_score_adj(int old_val, int new_val) +{ + struct sighand_struct *sighand = current->sighand; + + spin_lock_irq(&sighand->siglock); + if (current->signal->oom_score_adj == old_val) + current->signal->oom_score_adj = new_val; + spin_unlock_irq(&sighand->siglock); +} + /** * test_set_oom_score_adj() - set current's oom_score_adj and return old value * @new_val: new oom_score_adj value @@ -53,13 +73,7 @@ int test_set_oom_score_adj(int new_val) spin_lock_irq(&sighand->siglock); old_val = current->signal->oom_score_adj; - if (new_val != old_val) { - if (new_val == OOM_SCORE_ADJ_MIN) - atomic_inc(¤t->mm->oom_disable_count); - else if (old_val == OOM_SCORE_ADJ_MIN) - atomic_dec(¤t->mm->oom_disable_count); - current->signal->oom_score_adj = new_val; - } + current->signal->oom_score_adj = new_val; spin_unlock_irq(&sighand->siglock); return old_val; @@ -172,16 +186,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, return 0; /* - * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN - * so the entire heuristic doesn't need to be executed for something - * that cannot be killed. - */ - if (atomic_read(&p->mm->oom_disable_count)) { - task_unlock(p); - return 0; - } - - /* * The memory controller may have a limit of 0 bytes, so avoid a divide * by zero, if necessary. */ @@ -317,8 +321,11 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, * blocked waiting for another task which itself is waiting * for memory. Is there a better alternative? */ - if (test_tsk_thread_flag(p, TIF_MEMDIE)) + if (test_tsk_thread_flag(p, TIF_MEMDIE)) { + if (unlikely(frozen(p))) + thaw_process(p); return ERR_PTR(-1UL); + } if (!p->mm) continue; @@ -435,7 +442,7 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) task_unlock(p); /* - * Kill all processes sharing p->mm in other thread groups, if any. + * Kill all user processes sharing p->mm in other thread groups, if any. * They don't get access to memory reserves or a higher scheduler * priority, though, to avoid depletion of all memory or task * starvation. This prevents mm->mmap_sem livelock when an oom killed @@ -445,7 +452,11 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) * signal. */ for_each_process(q) - if (q->mm == mm && !same_thread_group(q, p)) { + if (q->mm == mm && !same_thread_group(q, p) && + !(q->flags & PF_KTHREAD)) { + if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + continue; + task_lock(q); /* Protect ->comm from prctl() */ pr_err("Kill process %d (%s) sharing same memory\n", task_pid_nr(q), q->comm); @@ -722,7 +733,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, read_lock(&tasklist_lock); if (sysctl_oom_kill_allocating_task && !oom_unkillable_task(current, NULL, nodemask) && - current->mm && !atomic_read(¤t->mm->oom_disable_count)) { + current->mm) { /* * oom_kill_process() needs tasklist_lock held. If it returns * non-zero, current could not be killed so we must fallback to diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0e309cd1b5b..793e9874de5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -305,7 +305,9 @@ static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) } /* - * + * bdi_min_ratio keeps the sum of the minimum dirty shares of all + * registered backing devices, which, for obvious reasons, can not + * exceed 100%. */ static unsigned int bdi_min_ratio; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6e8ecb6e021..9dd443d89d8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -318,6 +318,7 @@ static void bad_page(struct page *page) current->comm, page_to_pfn(page)); dump_page(page); + print_modules(); dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ @@ -1753,7 +1754,6 @@ static DEFINE_RATELIMIT_STATE(nopage_rs, void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) { - va_list args; unsigned int filter = SHOW_MEM_FILTER_NODES; if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) @@ -1772,14 +1772,21 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) filter &= ~SHOW_MEM_FILTER_NODES; if (fmt) { - printk(KERN_WARNING); + struct va_format vaf; + va_list args; + va_start(args, fmt); - vprintk(fmt, args); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_warn("%pV", &vaf); + va_end(args); } |