diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 3 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/kmemleak.c | 9 | ||||
-rw-r--r-- | mm/ksm.c | 15 | ||||
-rw-r--r-- | mm/memblock.c | 50 | ||||
-rw-r--r-- | mm/mlock.c | 34 | ||||
-rw-r--r-- | mm/mmap.c | 27 | ||||
-rw-r--r-- | mm/mmu_notifier.c | 18 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 285 |
11 files changed, 76 insertions, 373 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 2c7aea7106f..ae55c1e04d1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -287,7 +287,7 @@ config NR_QUICK config VIRT_TO_BUS def_bool y - depends on !ARCH_NO_VIRT_TO_BUS + depends on HAVE_VIRT_TO_BUS config MMU_NOTIFIER bool diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bfa142e67b1..e2f7f5aaaaf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot) static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; - struct hlist_node *node; - hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm) + hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) if (mm == mm_slot->mm) return mm_slot; diff --git a/mm/internal.h b/mm/internal.h index 1c0c4cc0fcf..8562de0a519 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma, * must be called with vma's mmap_sem held for read or write, and page locked. */ extern void mlock_vma_page(struct page *page); -extern void munlock_vma_page(struct page *page); +extern unsigned int munlock_vma_page(struct page *page); /* * Clear the page's PageMlocked(). This can be useful in a situation where diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 83dd5fbf5e6..c8d7f3110fd 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object) */ static void free_object_rcu(struct rcu_head *rcu) { - struct hlist_node *elem, *tmp; + struct hlist_node *tmp; struct kmemleak_scan_area *area; struct kmemleak_object *object = container_of(rcu, struct kmemleak_object, rcu); @@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu) * Once use_count is 0 (guaranteed by put_object), there is no other * code accessing this object, hence no need for locking. */ - hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { - hlist_del(elem); + hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { + hlist_del(&area->node); kmem_cache_free(scan_area_cache, area); } kmem_cache_free(object_cache, object); @@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end, static void scan_object(struct kmemleak_object *object) { struct kmemleak_scan_area *area; - struct hlist_node *elem; unsigned long flags; /* @@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object) spin_lock_irqsave(&object->lock, flags); } } else - hlist_for_each_entry(area, elem, &object->area_list, node) + hlist_for_each_entry(area, &object->area_list, node) scan_block((void *)area->start, (void *)(area->start + area->size), object, 0); @@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot) static struct mm_slot *get_mm_slot(struct mm_struct *mm) { - struct hlist_node *node; struct mm_slot *slot; - hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) + hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) if (slot->mm == mm) return slot; @@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn) static void remove_node_from_stable_tree(struct stable_node *stable_node) { struct rmap_item *rmap_item; - struct hlist_node *hlist; - hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { if (rmap_item->hlist.next) ksm_pages_sharing--; else @@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, { struct stable_node *stable_node; struct rmap_item *rmap_item; - struct hlist_node *hlist; unsigned int mapcount = page_mapcount(page); int referenced = 0; int search_new_forks = 0; @@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, if (!stable_node) return 0; again: - hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; @@ -1952,7 +1949,6 @@ out: int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) { struct stable_node *stable_node; - struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; @@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) if (!stable_node) return SWAP_FAIL; again: - hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; @@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, struct vm_area_struct *, unsigned long, void *), void *arg) { struct stable_node *stable_node; - struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; @@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, if (!stable_node) return ret; again: - hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; diff --git a/mm/memblock.c b/mm/memblock.c index 1bcd9b97056..b8d9147e5c0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -92,58 +92,9 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, * * Find @size free area aligned to @align in the specified range and node. * - * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the - * memory we found if not in hotpluggable ranges. - * * RETURNS: * Found address on success, %0 on failure. */ -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, - phys_addr_t end, phys_addr_t size, - phys_addr_t align, int nid) -{ - phys_addr_t this_start, this_end, cand; - u64 i; - int curr = movablemem_map.nr_map - 1; - - /* pump up @end */ - if (end == MEMBLOCK_ALLOC_ACCESSIBLE) - end = memblock.current_limit; - - /* avoid allocating the first page */ - start = max_t(phys_addr_t, start, PAGE_SIZE); - end = max(start, end); - - for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { - this_start = clamp(this_start, start, end); - this_end = clamp(this_end, start, end); - -restart: - if (this_end <= this_start || this_end < size) - continue; - - for (; curr >= 0; curr--) { - if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT) - < this_end) - break; - } - - cand = round_down(this_end - size, align); - if (curr >= 0 && - cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) { - this_end = movablemem_map.map[curr].start_pfn - << PAGE_SHIFT; - goto restart; - } - - if (cand >= this_start) - return cand; - } - - return 0; -} -#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid) @@ -172,7 +123,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, } return 0; } -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ /** * memblock_find_in_range - find free area in given range diff --git a/mm/mlock.c b/mm/mlock.c index e6638f565d4..1c5e33fce63 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page) * can't isolate the page, we leave it for putback_lru_page() and vmscan * [page_referenced()/try_to_unmap()] to deal with. */ -void munlock_vma_page(struct page *page) +unsigned int munlock_vma_page(struct page *page) { + unsigned int page_mask = 0; + BUG_ON(!PageLocked(page)); if (TestClearPageMlocked(page)) { - mod_zone_page_state(page_zone(page), NR_MLOCK, - -hpage_nr_pages(page)); + unsigned int nr_pages = hpage_nr_pages(page); + mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); + page_mask = nr_pages - 1; if (!isolate_lru_page(page)) { int ret = SWAP_AGAIN; @@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page) count_vm_event(UNEVICTABLE_PGMUNLOCKED); } } + + return page_mask; } /** @@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; - unsigned long addr = start; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; @@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ - return __get_user_pages(current, mm, addr, nr_pages, gup_flags, + return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } @@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval) void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr; - - lru_add_drain(); vma->vm_flags &= ~VM_LOCKED; - for (addr = start; addr < end; addr += PAGE_SIZE) { + while (start < end) { struct page *page; + unsigned int page_mask, page_increm; + /* * Although FOLL_DUMP is intended for get_dump_page(), * it just so happens that its special treatment of the @@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ - page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); + page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, + &page_mask); if (page && !IS_ERR(page)) { lock_page(page); - munlock_vma_page(page); + lru_add_drain(); + /* + * Any THP page found by follow_page_mask() may have + * gotten split before reaching munlock_vma_page(), + * so we need to recompute the page_mask here. + */ + page_mask = munlock_vma_page(page); unlock_page(page); put_page(page); } + page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); + start += page_increm * PAGE_SIZE; cond_resched(); } } diff --git a/mm/mmap.c b/mm/mmap.c index 37a1fcac029..2664a47cec9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2185,9 +2185,28 @@ int expand_downwards(struct vm_area_struct *vma, return error; } +/* + * Note how expand_stack() refuses to expand the stack all the way to + * abut the next virtual mapping, *unless* that mapping itself is also + * a stack mapping. We want to leave room for a guard page, after all + * (the guard page itself is not added here, that is done by the + * actual page faulting logic) + * + * This matches the behavior of the guard page logic (see mm/memory.c: + * check_stack_guard_page()), which only allows the guard page to be + * removed under these circumstances. + */ #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *next; + + address &= PAGE_MASK; + next = vma->vm_next; + if (next && next->vm_start == address + PAGE_SIZE) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + } return expand_upwards(vma, address); } @@ -2209,6 +2228,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *prev; + + address &= PAGE_MASK; + prev = vma->vm_prev; + if (prev && prev->vm_end == address) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + } return expand_downwards(vma, address); } diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 2175fb0d501..be04122fb27 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *mn; - struct hlist_node *n; int young = 0, id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->clear_flush_young) young |= mn->ops->clear_flush_young(mn, mm, address); } @@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *mn; - struct hlist_node *n; int young = 0, id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->test_young) { young = mn->ops->test_young(mn, mm, address); if (young) @@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { struct mmu_notifier *mn; - struct hlist_node *n; int id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->change_pte) mn->ops->change_pte(mn, mm, address, pte); } @@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *mn; - struct hlist_node *n; int id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_page) mn->ops->invalidate_page(mn, mm, address); } @@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *mn; - struct hlist_node *n; int id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_start) mn->ops->invalidate_range_start(mn, mm, start, end); } @@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *mn; - struct hlist_node *n; int id; id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_end) mn->ops->invalidate_range_end(mn, mm, start, end); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index cdc377c456c..efe68148f62 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -696,7 +696,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, * => fast response on large errors; small oscillation near setpoint */ setpoint = (freerun + limit) / 2; - x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT, + x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, limit - setpoint + 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; @@ -1986,6 +1986,8 @@ int __set_page_dirty_no_writeback(struct page *page) */ void account_page_dirtied(struct page *page, struct address_space *mapping) { + trace_writeback_dirty_page(page, mapping); + if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0dade3f18f7..8fcced7823f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -202,18 +202,11 @@ static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -/* Movable memory ranges, will also be used by memblock subsystem. */ -struct movablemem_map movablemem_map = { - .acpi = false, - .nr_map = 0, -}; - static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; static unsigned long __initdata required_kernelcore; static unsigned long __initdata required_movablecore; static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; -static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES]; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -4412,77 +4405,6 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } -/** - * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array. - * - * zone_movable_limit is initialized as 0. This function will try to get - * the first ZONE_MOVABLE pfn of each node from movablemem_map, and - * assigne them to zone_movable_limit. - * zone_movable_limit[nid] == 0 means no limit for the node. - * - * Note: Each range is represented as [start_pfn, end_pfn) - */ -static void __meminit sanitize_zone_movable_limit(void) -{ - int map_pos = 0, i, nid; - unsigned long start_pfn, end_pfn; - - if (!movablemem_map.nr_map) - return; - - /* Iterate all ranges from minimum to maximum */ - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { - /* - * If we have found lowest pfn of ZONE_MOVABLE of the node - * specified by user, just go on to check next range. - */ - if (zone_movable_limit[nid]) - continue; - -#ifdef CONFIG_ZONE_DMA - /* Skip DMA memory. */ - if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA]) - start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA]; -#endif - -#ifdef CONFIG_ZONE_DMA32 - /* Skip DMA32 memory. */ - if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32]) - start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32]; -#endif - -#ifdef CONFIG_HIGHMEM - /* Skip lowmem if ZONE_MOVABLE is highmem. */ - if (zone_movable_is_highmem() && - start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]) - start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]; -#endif - - if (start_pfn >= end_pfn) - continue; - - while (map_pos < movablemem_map.nr_map) { - if (end_pfn <= movablemem_map.map[map_pos].start_pfn) - break; - - if (start_pfn >= movablemem_map.map[map_pos].end_pfn) { - map_pos++; - continue; - } - - /* - * The start_pfn of ZONE_MOVABLE is either the minimum - * pfn specified by movablemem_map, or 0, which means - * the node has no ZONE_MOVABLE. - */ - zone_movable_limit[nid] = max(start_pfn, - movablemem_map.map[map_pos].start_pfn); - - break; - } - } -} - #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, @@ -4500,6 +4422,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid, return zholes_size[zone_type]; } + #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, @@ -4941,19 +4864,12 @@ static void __init find_zone_movable_pfns_for_nodes(void) required_kernelcore = max(required_kernelcore, corepages); } - /* - * If neither kernelcore/movablecore nor movablemem_map is specified, - * there is no ZONE_MOVABLE. But if movablemem_map is specified, the - * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[]. - */ - if (!required_kernelcore) { - if (movablemem_map.nr_map) - memcpy(zone_movable_pfn, zone_movable_limit, - sizeof(zone_movable_pfn)); + /* If kernelcore was not specified, there is no ZONE_MOVABLE */ + if (!required_kernelcore) goto out; - } /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ + find_usable_zone_for_movable(); usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; restart: @@ -4981,24 +4897,10 @@ restart: for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { unsigned long size_pages; - /* - * Find more memory for kernelcore in - * [zone_movable_pfn[nid], zone_movable_limit[nid]). - */ start_pfn = max(start_pfn, zone_movable_pfn[nid]); if (start_pfn >= end_pfn) continue; - if (zone_movable_limit[nid]) { - end_pfn = min(end_pfn, zone_movable_limit[nid]); - /* No range left for kernelcore in this node */ - if (start_pfn >= end_pfn) { - zone_movable_pfn[nid] = - zone_movable_limit[nid]; - break; - } - } - /* Account for what is only usable for kernelcore */ if (start_pfn < usable_startpfn) { unsigned long kernel_pages; @@ -5058,12 +4960,12 @@ restart: if (usable_nodes && required_kernelcore > usable_nodes) goto restart; -out: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ for (nid = 0; nid < MAX_NUMNODES; nid++) zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); +out: /* restore the node_state */ node_states[N_MEMORY] = saved_node_state; } @@ -5126,8 +5028,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) /* Find the PFNs that ZONE_MOVABLE begins at in each node */ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); - find_usable_zone_for_movable(); - sanitize_zone_movable_limit(); find_zone_movable_pfns_for_nodes(); /* Print out the zone ranges */ @@ -5211,181 +5111,6 @@ static int __init cmdline_parse_movablecore(char *p) early_param("kernelcore", cmdline_parse_kernelcore); early_param("movablecore", cmdline_parse_movablecore); -/** - * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[]. - * @start_pfn: start pfn of the range to be checked - * @end_pfn: end pfn of the range to be checked (exclusive) - * - * This function checks if a given memory range [start_pfn, end_pfn) overlaps - * the movablemem_map.map[] array. - * - * Return: index of the first overlapped element in movablemem_map.map[] - * or -1 if they don't overlap each other. - */ -int __init movablemem_map_overlap(unsigned long start_pfn, - unsigned long end_pfn) -{ - int overlap; - - if (!movablemem_map.nr_map) - return -1; - - for (overlap = 0; overlap < movablemem_map.nr_map; overlap++) - if (start_pfn < movablemem_map.map[overlap].end_pfn) - break; - - if (overlap == movablemem_map.nr_map || - end_pfn <= movablemem_map.map[overlap].start_pfn) - return -1; - - return overlap; -} - -/** - * insert_movablemem_map - Insert a memory range in to movablemem_map.map. - * @start_pfn: start pfn of the range - * @end_pfn: end pfn of the range - * - * This function will also merge the overlapped ranges, and sort the array - * by start_pfn in monotonic increasing order. - */ -void __init insert_movablemem_map(unsigned long start_pfn, - unsigned long end_pfn) -{ - int pos, overlap; - - /* - * pos will be at the 1st overlapped range, or the position - * where the element should be inserted. - */ - for (pos = 0; pos < movablemem_map.nr_map; pos++) - if (start_pfn <= movablemem_map.map[pos].end_pfn) - break; - - /* If there is no overlapped range, just insert the element. */ - if (pos == movablemem_map.nr_map || - end_pfn < movablemem_map.map[pos].start_pfn) { - /* - * If pos is not the end of array, we need to move all - * the rest elements backward. - */ - if (pos < movablemem_map.nr_map) - memmove(&movablemem_map.map[pos+1], - &movablemem_map.map[pos], - sizeof(struct movablemem_entry) * - (movablemem_map.nr_map - pos)); - movablemem_map.map[pos].start_pfn = start_pfn; - movablemem_map.map[pos].end_pfn = end_pfn; - movablemem_map.nr_map++; - return; - } - - /* overlap will be at the last overlapped range */ - for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++) - if (end_pfn < movablemem_map.map[overlap].start_pfn) - break; - - /* - * If there are more ranges overlapped, we need to merge them, - * and move the rest elements forward. - */ - overlap--; - movablemem_map.map[pos].start_pfn = min(start_pfn, - movablemem_map.map[pos].start_pfn); - movablemem_map.map[pos].end_pfn = max(end_pfn, - movablemem_map.map[overlap].end_pfn); - - if (pos != overlap && overlap + 1 != movablemem_map.nr_map) - memmove(&movablemem_map.map[pos+1], - &movablemem_map.map[overlap+1], - sizeof(struct movablemem_entry) * - (movablemem_map.nr_map - overlap - 1)); - - movablemem_map.nr_map -= overlap - pos; -} - -/** - * movablemem_map_add_region - Add a memory range into movablemem_map. - * @start: physical start address of range - * @end: physical end address of range - * - * This function transform the physical address into pfn, and then add the - * range into movablemem_map by calling insert_movablemem_map(). - */ -static void __init movablemem_map_add_region(u64 start, u64 size) -{ - unsigned long start_pfn, end_pfn; - - /* In case size == 0 or start + size overflows */ - if (start + size <= start) - return; - - if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) { - pr_err("movablemem_map: too many entries;" - " ignoring [mem %#010llx-%#010llx]\n", - (unsigned long long) start, - (unsigned long long) (start + size - 1)); - return; - } - - start_pfn = PFN_DOWN(start); - end_pfn = PFN_UP(start + size); - insert_movablemem_map(start_pfn, end_pfn); -} - -/* - * cmdline_parse_movablemem_map - Parse boot option movablemem_map. - * @p: The boot option of the following format: - * movablemem_map=nn[KMG]@ss[KMG] - * - * This option sets the memory range [ss, ss+nn) to be used as movable memory. - * - * Return: 0 on success or -EINVAL on failure. - */ -static int __init cmdline_parse_movablemem_map(char *p) -{ - char *oldp; - u64 start_at, mem_size; - - if (!p) - goto err; - - if (!strcmp(p, "acpi")) - movablemem_map.acpi = true; - - /* - * If user decide to use info from BIOS, all the other user specified - * ranges will be ingored. - */ - if (movablemem_map.acpi) { - if (movablemem_map.nr_map) { - memset(movablemem_map.map, 0, - sizeof(struct movablemem_entry) - * movablemem_map.nr_map); - movablemem_map.nr_map = 0; - } - return 0; - } - - oldp = p; - mem_size = memparse(p, &p); - if (p == oldp) - goto err; - - if (*p == '@') { - oldp = ++p; - start_at = memparse(p, &p); - if (p == oldp || *p != '\0') - goto err; - - movablemem_map_add_region(start_at, mem_size); - return 0; - } -err: - return -EINVAL; -} -early_param("movablemem_map", cmdline_parse_movablemem_map); - #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ /** |