From 4ed7e02222aba062bd0ed3ab12dfc8e9fc0467b5 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Tue, 31 Jul 2012 16:43:35 -0700 Subject: mm/hotplug: mark memory hotplug code in page_alloc.c as __meminit Mark functions used by both boot and memory hotplug as __meminit to reduce memory footprint when memory hotplug is disabled. Alos guard zone_pcp_update() with CONFIG_MEMORY_HOTPLUG because it's only used by memory hotplug code. Signed-off-by: Jiang Liu Cc: Wei Wang Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Rusty Russell Cc: Yinghai Lu Cc: Tony Luck Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: David Rientjes Cc: Keping Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 66 +++++++++++++++++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9c9a31665a7..667338e80e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3411,7 +3411,7 @@ static void setup_zone_pageset(struct zone *zone); DEFINE_MUTEX(zonelists_mutex); /* return values int ....just for stop_machine() */ -static __init_refok int __build_all_zonelists(void *data) +static int __build_all_zonelists(void *data) { int nid; int cpu; @@ -3755,7 +3755,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif -static int zone_batchsize(struct zone *zone) +static int __meminit zone_batchsize(struct zone *zone) { #ifdef CONFIG_MMU int batch; @@ -3837,7 +3837,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p, pcp->batch = PAGE_SHIFT * 8; } -static void setup_zone_pageset(struct zone *zone) +static void __meminit setup_zone_pageset(struct zone *zone) { int cpu; @@ -3910,33 +3910,6 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) return 0; } -static int __zone_pcp_update(void *data) -{ - struct zone *zone = data; - int cpu; - unsigned long batch = zone_batchsize(zone), flags; - - for_each_possible_cpu(cpu) { - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; - - pset = per_cpu_ptr(zone->pageset, cpu); - pcp = &pset->pcp; - - local_irq_save(flags); - if (pcp->count > 0) - free_pcppages_bulk(zone, pcp->count, pcp); - setup_pageset(pset, batch); - local_irq_restore(flags); - } - return 0; -} - -void zone_pcp_update(struct zone *zone) -{ - stop_machine(__zone_pcp_update, zone, NULL); -} - static __meminit void zone_pcp_init(struct zone *zone) { /* @@ -3952,7 +3925,7 @@ static __meminit void zone_pcp_init(struct zone *zone) zone_batchsize(zone)); } -__meminit int init_currently_empty_zone(struct zone *zone, +int __meminit init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size, enum memmap_context context) @@ -4765,7 +4738,7 @@ out: } /* Any regular memory on that node ? */ -static void check_for_regular_memory(pg_data_t *pgdat) +static void __init check_for_regular_memory(pg_data_t *pgdat) { #ifdef CONFIG_HIGHMEM enum zone_type zone_type; @@ -5893,6 +5866,35 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages) } #endif +#ifdef CONFIG_MEMORY_HOTPLUG +static int __meminit __zone_pcp_update(void *data) +{ + struct zone *zone = data; + int cpu; + unsigned long batch = zone_batchsize(zone), flags; + + for_each_possible_cpu(cpu) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; + + pset = per_cpu_ptr(zone->pageset, cpu); + pcp = &pset->pcp; + + local_irq_save(flags); + if (pcp->count > 0) + free_pcppages_bulk(zone, pcp->count, pcp); + setup_pageset(pset, batch); + local_irq_restore(flags); + } + return 0; +} + +void __meminit zone_pcp_update(struct zone *zone) +{ + stop_machine(__zone_pcp_update, zone, NULL); +} +#endif + #ifdef CONFIG_MEMORY_HOTREMOVE void zone_pcp_reset(struct zone *zone) { -- cgit v1.2.3-18-g5258