diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 159 |
1 files changed, 81 insertions, 78 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b2838c24e58..37576b822f0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) /* * permit the bootmem allocator to evade page validation on high-order frees */ -void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) +void __init __free_pages_bootmem(struct page *page, unsigned int order) { if (order == 0) { __ClearPageReserved(page); @@ -890,31 +890,51 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) } #endif -static void __drain_pages(unsigned int cpu) +/* + * Drain pages of the indicated processor. + * + * The processor must either be the current processor and the + * thread pinned to the current processor or a processor that + * is not online. + */ +static void drain_pages(unsigned int cpu) { unsigned long flags; struct zone *zone; - int i; for_each_zone(zone) { struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; if (!populated_zone(zone)) continue; pset = zone_pcp(zone, cpu); - for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { - struct per_cpu_pages *pcp; - - pcp = &pset->pcp[i]; - local_irq_save(flags); - free_pages_bulk(zone, pcp->count, &pcp->list, 0); - pcp->count = 0; - local_irq_restore(flags); - } + + pcp = &pset->pcp; + local_irq_save(flags); + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + pcp->count = 0; + local_irq_restore(flags); } } +/* + * Spill all of this CPU's per-cpu pages back into the buddy allocator. + */ +void drain_local_pages(void *arg) +{ + drain_pages(smp_processor_id()); +} + +/* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator + */ +void drain_all_pages(void) +{ + on_each_cpu(drain_local_pages, NULL, 0, 1); +} + #ifdef CONFIG_HIBERNATION void mark_free_pages(struct zone *zone) @@ -952,40 +972,9 @@ void mark_free_pages(struct zone *zone) #endif /* CONFIG_PM */ /* - * Spill all of this CPU's per-cpu pages back into the buddy allocator. - */ -void drain_local_pages(void) -{ - unsigned long flags; - - local_irq_save(flags); - __drain_pages(smp_processor_id()); - local_irq_restore(flags); -} - -void smp_drain_local_pages(void *arg) -{ - drain_local_pages(); -} - -/* - * Spill all the per-cpu pages from all CPUs back into the buddy allocator - */ -void drain_all_local_pages(void) -{ - unsigned long flags; - - local_irq_save(flags); - __drain_pages(smp_processor_id()); - local_irq_restore(flags); - - smp_call_function(smp_drain_local_pages, NULL, 0, 1); -} - -/* * Free a 0-order page */ -static void fastcall free_hot_cold_page(struct page *page, int cold) +static void free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -1001,10 +990,13 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) arch_free_page(page, 0); kernel_map_pages(page, 1, 0); - pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; + pcp = &zone_pcp(zone, get_cpu())->pcp; local_irq_save(flags); __count_vm_event(PGFREE); - list_add(&page->lru, &pcp->list); + if (cold) + list_add_tail(&page->lru, &pcp->list); + else + list_add(&page->lru, &pcp->list); set_page_private(page, get_pageblock_migratetype(page)); pcp->count++; if (pcp->count >= pcp->high) { @@ -1015,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) put_cpu(); } -void fastcall free_hot_page(struct page *page) +void free_hot_page(struct page *page) { free_hot_cold_page(page, 0); } -void fastcall free_cold_page(struct page *page) +void free_cold_page(struct page *page) { free_hot_cold_page(page, 1); } @@ -1062,7 +1054,7 @@ again: if (likely(order == 0)) { struct per_cpu_pages *pcp; - pcp = &zone_pcp(zone, cpu)->pcp[cold]; + pcp = &zone_pcp(zone, cpu)->pcp; local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, @@ -1072,9 +1064,15 @@ again: } /* Find a page of the appropriate migrate type */ - list_for_each_entry(page, &pcp->list, lru) - if (page_private(page) == migratetype) - break; + if (cold) { + list_for_each_entry_reverse(page, &pcp->list, lru) + if (page_private(page) == migratetype) + break; + } else { + list_for_each_entry(page, &pcp->list, lru) + if (page_private(page) == migratetype) + break; + } /* Allocate more to the pcp list if necessary */ if (unlikely(&page->lru == &pcp->list)) { @@ -1569,7 +1567,7 @@ nofail_alloc: cond_resched(); if (order != 0) - drain_all_local_pages(); + drain_all_pages(); if (likely(did_some_progress)) { page = get_page_from_freelist(gfp_mask, order, @@ -1643,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) +unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page * page; page = alloc_pages(gfp_mask, order); @@ -1654,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) EXPORT_SYMBOL(__get_free_pages); -fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) +unsigned long get_zeroed_page(gfp_t gfp_mask) { struct page * page; @@ -1680,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec) free_hot_cold_page(pvec->pages[i], pvec->cold); } -fastcall void __free_pages(struct page *page, unsigned int order) +void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { if (order == 0) @@ -1692,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); -fastcall void free_pages(unsigned long addr, unsigned int order) +void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); @@ -1801,12 +1799,9 @@ void show_free_areas(void) pageset = zone_pcp(zone, cpu); - printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " - "Cold: hi:%5d, btch:%4d usd:%4d\n", - cpu, pageset->pcp[0].high, - pageset->pcp[0].batch, pageset->pcp[0].count, - pageset->pcp[1].high, pageset->pcp[1].batch, - pageset->pcp[1].count); + printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", + cpu, pageset->pcp.high, + pageset->pcp.batch, pageset->pcp.count); } } @@ -1879,6 +1874,8 @@ void show_free_areas(void) printk("= %lukB\n", K(total)); } + printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); + show_swap_cache_info(); } @@ -2551,8 +2548,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, } } -static void __meminit zone_init_free_lists(struct pglist_data *pgdat, - struct zone *zone, unsigned long size) +static void __meminit zone_init_free_lists(struct zone *zone) { int order, t; for_each_migratetype_order(order, t) { @@ -2604,17 +2600,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) memset(p, 0, sizeof(*p)); - pcp = &p->pcp[0]; /* hot */ + pcp = &p->pcp; pcp->count = 0; pcp->high = 6 * batch; pcp->batch = max(1UL, 1 * batch); INIT_LIST_HEAD(&pcp->list); - - pcp = &p->pcp[1]; /* cold*/ - pcp->count = 0; - pcp->high = 2 * batch; - pcp->batch = max(1UL, batch/2); - INIT_LIST_HEAD(&pcp->list); } /* @@ -2627,7 +2617,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p, { struct per_cpu_pages *pcp; - pcp = &p->pcp[0]; /* hot list */ + pcp = &p->pcp; pcp->high = high; pcp->batch = max(1UL, high/4); if ((high/4) > (PAGE_SHIFT * 8)) @@ -2831,7 +2821,7 @@ __meminit int init_currently_empty_zone(struct zone *zone, memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); - zone_init_free_lists(pgdat, zone, zone->spanned_pages); + zone_init_free_lists(zone); return 0; } @@ -3978,10 +3968,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self, int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { - local_irq_disable(); - __drain_pages(cpu); + drain_pages(cpu); + + /* + * Spill the event counters of the dead processor + * into the current processors event counters. + * This artificially elevates the count of the current + * processor. + */ vm_events_fold_cpu(cpu); - local_irq_enable(); + + /* + * Zero the differential counters of the dead processor + * so that the vm statistics are consistent. + * + * This is only okay since the processor is dead and cannot + * race with what we are doing. + */ refresh_cpu_vm_stats(cpu); } return NOTIFY_OK; @@ -4480,7 +4483,7 @@ int set_migratetype_isolate(struct page *page) out: spin_unlock_irqrestore(&zone->lock, flags); if (!ret) - drain_all_local_pages(); + drain_all_pages(); return ret; } |