diff options
Diffstat (limited to 'mm/vmstat.c')
| -rw-r--r-- | mm/vmstat.c | 674 | 
1 files changed, 463 insertions, 211 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c index 42eac4d3321..b37bd49bfd5 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -19,6 +19,9 @@  #include <linux/math64.h>  #include <linux/writeback.h>  #include <linux/compaction.h> +#include <linux/mm_inline.h> + +#include "internal.h"  #ifdef CONFIG_VM_EVENT_COUNTERS  DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; @@ -52,7 +55,6 @@ void all_vm_events(unsigned long *ret)  }  EXPORT_SYMBOL_GPL(all_vm_events); -#ifdef CONFIG_HOTPLUG  /*   * Fold the foreign cpu events into our own.   * @@ -69,7 +71,6 @@ void vm_events_fold_cpu(int cpu)  		fold_state->event[i] = 0;  	}  } -#endif /* CONFIG_HOTPLUG */  #endif /* CONFIG_VM_EVENT_COUNTERS */ @@ -78,12 +79,36 @@ void vm_events_fold_cpu(int cpu)   *   * vm_stat contains the global counters   */ -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;  EXPORT_SYMBOL(vm_stat);  #ifdef CONFIG_SMP -static int calculate_threshold(struct zone *zone) +int calculate_pressure_threshold(struct zone *zone) +{ +	int threshold; +	int watermark_distance; + +	/* +	 * As vmstats are not up to date, there is drift between the estimated +	 * and real values. For high thresholds and a high number of CPUs, it +	 * is possible for the min watermark to be breached while the estimated +	 * value looks fine. The pressure threshold is a reduced value such +	 * that even the maximum amount of drift will not accidentally breach +	 * the min watermark +	 */ +	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); +	threshold = max(1, (int)(watermark_distance / num_online_cpus())); + +	/* +	 * Maximum threshold is 125 +	 */ +	threshold = min(125, threshold); + +	return threshold; +} + +int calculate_normal_threshold(struct zone *zone)  {  	int threshold;  	int mem;	/* memory in 128 MB units */ @@ -118,7 +143,7 @@ static int calculate_threshold(struct zone *zone)  	 * 125		1024		10	16-32 GB	9  	 */ -	mem = zone->present_pages >> (27 - PAGE_SHIFT); +	mem = zone->managed_pages >> (27 - PAGE_SHIFT);  	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); @@ -133,7 +158,7 @@ static int calculate_threshold(struct zone *zone)  /*   * Refresh the thresholds for each zone.   */ -static void refresh_zone_stat_thresholds(void) +void refresh_zone_stat_thresholds(void)  {  	struct zone *zone;  	int cpu; @@ -142,7 +167,7 @@ static void refresh_zone_stat_thresholds(void)  	for_each_populated_zone(zone) {  		unsigned long max_drift, tolerate_drift; -		threshold = calculate_threshold(zone); +		threshold = calculate_normal_threshold(zone);  		for_each_online_cpu(cpu)  			per_cpu_ptr(zone->pageset, cpu)->stat_threshold @@ -161,42 +186,52 @@ static void refresh_zone_stat_thresholds(void)  	}  } +void set_pgdat_percpu_threshold(pg_data_t *pgdat, +				int (*calculate_pressure)(struct zone *)) +{ +	struct zone *zone; +	int cpu; +	int threshold; +	int i; + +	for (i = 0; i < pgdat->nr_zones; i++) { +		zone = &pgdat->node_zones[i]; +		if (!zone->percpu_drift_mark) +			continue; + +		threshold = (*calculate_pressure)(zone); +		for_each_possible_cpu(cpu) +			per_cpu_ptr(zone->pageset, cpu)->stat_threshold +							= threshold; +	} +} +  /* - * For use when we know that interrupts are disabled. + * For use when we know that interrupts are disabled, + * or when we know that preemption is disabled and that + * particular counter cannot be updated from interrupt context.   */  void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,  				int delta)  { -	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); - -	s8 *p = pcp->vm_stat_diff + item; +	struct per_cpu_pageset __percpu *pcp = zone->pageset; +	s8 __percpu *p = pcp->vm_stat_diff + item;  	long x; +	long t; -	x = delta + *p; +	x = delta + __this_cpu_read(*p); -	if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { +	t = __this_cpu_read(pcp->stat_threshold); + +	if (unlikely(x > t || x < -t)) {  		zone_page_state_add(x, zone, item);  		x = 0;  	} -	*p = x; +	__this_cpu_write(*p, x);  }  EXPORT_SYMBOL(__mod_zone_page_state);  /* - * For an unknown interrupt state - */ -void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, -					int delta) -{ -	unsigned long flags; - -	local_irq_save(flags); -	__mod_zone_page_state(zone, item, delta); -	local_irq_restore(flags); -} -EXPORT_SYMBOL(mod_zone_page_state); - -/*   * Optimized increment and decrement functions.   *   * These are only for a single page and therefore can take a struct page * @@ -221,16 +256,17 @@ EXPORT_SYMBOL(mod_zone_page_state);   */  void __inc_zone_state(struct zone *zone, enum zone_stat_item item)  { -	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); -	s8 *p = pcp->vm_stat_diff + item; - -	(*p)++; +	struct per_cpu_pageset __percpu *pcp = zone->pageset; +	s8 __percpu *p = pcp->vm_stat_diff + item; +	s8 v, t; -	if (unlikely(*p > pcp->stat_threshold)) { -		int overstep = pcp->stat_threshold / 2; +	v = __this_cpu_inc_return(*p); +	t = __this_cpu_read(pcp->stat_threshold); +	if (unlikely(v > t)) { +		s8 overstep = t >> 1; -		zone_page_state_add(*p + overstep, zone, item); -		*p = -overstep; +		zone_page_state_add(v + overstep, zone, item); +		__this_cpu_write(*p, -overstep);  	}  } @@ -242,16 +278,17 @@ EXPORT_SYMBOL(__inc_zone_page_state);  void __dec_zone_state(struct zone *zone, enum zone_stat_item item)  { -	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); -	s8 *p = pcp->vm_stat_diff + item; +	struct per_cpu_pageset __percpu *pcp = zone->pageset; +	s8 __percpu *p = pcp->vm_stat_diff + item; +	s8 v, t; -	(*p)--; +	v = __this_cpu_dec_return(*p); +	t = __this_cpu_read(pcp->stat_threshold); +	if (unlikely(v < - t)) { +		s8 overstep = t >> 1; -	if (unlikely(*p < - pcp->stat_threshold)) { -		int overstep = pcp->stat_threshold / 2; - -		zone_page_state_add(*p - overstep, zone, item); -		*p = overstep; +		zone_page_state_add(v - overstep, zone, item); +		__this_cpu_write(*p, overstep);  	}  } @@ -261,6 +298,95 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)  }  EXPORT_SYMBOL(__dec_zone_page_state); +#ifdef CONFIG_HAVE_CMPXCHG_LOCAL +/* + * If we have cmpxchg_local support then we do not need to incur the overhead + * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. + * + * mod_state() modifies the zone counter state through atomic per cpu + * operations. + * + * Overstep mode specifies how overstep should handled: + *     0       No overstepping + *     1       Overstepping half of threshold + *     -1      Overstepping minus half of threshold +*/ +static inline void mod_state(struct zone *zone, +       enum zone_stat_item item, int delta, int overstep_mode) +{ +	struct per_cpu_pageset __percpu *pcp = zone->pageset; +	s8 __percpu *p = pcp->vm_stat_diff + item; +	long o, n, t, z; + +	do { +		z = 0;  /* overflow to zone counters */ + +		/* +		 * The fetching of the stat_threshold is racy. We may apply +		 * a counter threshold to the wrong the cpu if we get +		 * rescheduled while executing here. However, the next +		 * counter update will apply the threshold again and +		 * therefore bring the counter under the threshold again. +		 * +		 * Most of the time the thresholds are the same anyways +		 * for all cpus in a zone. +		 */ +		t = this_cpu_read(pcp->stat_threshold); + +		o = this_cpu_read(*p); +		n = delta + o; + +		if (n > t || n < -t) { +			int os = overstep_mode * (t >> 1) ; + +			/* Overflow must be added to zone counters */ +			z = n + os; +			n = -os; +		} +	} while (this_cpu_cmpxchg(*p, o, n) != o); + +	if (z) +		zone_page_state_add(z, zone, item); +} + +void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, +					int delta) +{ +	mod_state(zone, item, delta, 0); +} +EXPORT_SYMBOL(mod_zone_page_state); + +void inc_zone_state(struct zone *zone, enum zone_stat_item item) +{ +	mod_state(zone, item, 1, 1); +} + +void inc_zone_page_state(struct page *page, enum zone_stat_item item) +{ +	mod_state(page_zone(page), item, 1, 1); +} +EXPORT_SYMBOL(inc_zone_page_state); + +void dec_zone_page_state(struct page *page, enum zone_stat_item item) +{ +	mod_state(page_zone(page), item, -1, -1); +} +EXPORT_SYMBOL(dec_zone_page_state); +#else +/* + * Use interrupt disable to serialize counter updates + */ +void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, +					int delta) +{ +	unsigned long flags; + +	local_irq_save(flags); +	__mod_zone_page_state(zone, item, delta); +	local_irq_restore(flags); +} +EXPORT_SYMBOL(mod_zone_page_state); +  void inc_zone_state(struct zone *zone, enum zone_stat_item item)  {  	unsigned long flags; @@ -291,13 +417,19 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)  	local_irq_restore(flags);  }  EXPORT_SYMBOL(dec_zone_page_state); +#endif + +static inline void fold_diff(int *diff) +{ +	int i; + +	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) +		if (diff[i]) +			atomic_long_add(diff[i], &vm_stat[i]); +}  /* - * Update the zone counters for one cpu. - * - * The cpu specified must be either the current cpu or a processor that - * is not online. If it is the current cpu then the execution thread must - * be pinned to the current cpu. + * Update the zone counters for the current cpu.   *   * Note that refresh_cpu_vm_stats strives to only access   * node local memory. The per cpu pagesets on remote zones are placed @@ -310,33 +442,29 @@ EXPORT_SYMBOL(dec_zone_page_state);   * with the global counters. These could cause remote node cache line   * bouncing and will have to be only done when necessary.   */ -void refresh_cpu_vm_stats(int cpu) +static void refresh_cpu_vm_stats(void)  {  	struct zone *zone;  	int i;  	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };  	for_each_populated_zone(zone) { -		struct per_cpu_pageset *p; +		struct per_cpu_pageset __percpu *p = zone->pageset; -		p = per_cpu_ptr(zone->pageset, cpu); +		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { +			int v; -		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) -			if (p->vm_stat_diff[i]) { -				unsigned long flags; -				int v; +			v = this_cpu_xchg(p->vm_stat_diff[i], 0); +			if (v) { -				local_irq_save(flags); -				v = p->vm_stat_diff[i]; -				p->vm_stat_diff[i] = 0; -				local_irq_restore(flags);  				atomic_long_add(v, &zone->vm_stat[i]);  				global_diff[i] += v;  #ifdef CONFIG_NUMA  				/* 3 seconds idle till flush */ -				p->expire = 3; +				__this_cpu_write(p->expire, 3);  #endif  			} +		}  		cond_resched();  #ifdef CONFIG_NUMA  		/* @@ -346,31 +474,75 @@ void refresh_cpu_vm_stats(int cpu)  		 * Check if there are pages remaining in this pageset  		 * if not then there is nothing to expire.  		 */ -		if (!p->expire || !p->pcp.count) +		if (!__this_cpu_read(p->expire) || +			       !__this_cpu_read(p->pcp.count))  			continue;  		/*  		 * We never drain zones local to this processor.  		 */  		if (zone_to_nid(zone) == numa_node_id()) { -			p->expire = 0; +			__this_cpu_write(p->expire, 0);  			continue;  		} -		p->expire--; -		if (p->expire) + +		if (__this_cpu_dec_return(p->expire))  			continue; -		if (p->pcp.count) -			drain_zone_pages(zone, &p->pcp); +		if (__this_cpu_read(p->pcp.count)) +			drain_zone_pages(zone, this_cpu_ptr(&p->pcp));  #endif  	} +	fold_diff(global_diff); +} -	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) -		if (global_diff[i]) -			atomic_long_add(global_diff[i], &vm_stat[i]); +/* + * Fold the data for an offline cpu into the global array. + * There cannot be any access by the offline cpu and therefore + * synchronization is simplified. + */ +void cpu_vm_stats_fold(int cpu) +{ +	struct zone *zone; +	int i; +	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; + +	for_each_populated_zone(zone) { +		struct per_cpu_pageset *p; + +		p = per_cpu_ptr(zone->pageset, cpu); + +		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) +			if (p->vm_stat_diff[i]) { +				int v; + +				v = p->vm_stat_diff[i]; +				p->vm_stat_diff[i] = 0; +				atomic_long_add(v, &zone->vm_stat[i]); +				global_diff[i] += v; +			} +	} + +	fold_diff(global_diff);  } +/* + * this is only called if !populated_zone(zone), which implies no other users of + * pset->vm_stat_diff[] exsist. + */ +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) +{ +	int i; + +	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) +		if (pset->vm_stat_diff[i]) { +			int v = pset->vm_stat_diff[i]; +			pset->vm_stat_diff[i] = 0; +			atomic_long_add(v, &zone->vm_stat[i]); +			atomic_long_add(v, &vm_stat[i]); +		} +}  #endif  #ifdef CONFIG_NUMA @@ -379,8 +551,12 @@ void refresh_cpu_vm_stats(int cpu)   * z 	    = the zone from which the allocation occurred.   *   * Must be called with interrupts disabled. + * + * When __GFP_OTHER_NODE is set assume the node of the preferred + * zone is the local node. This is useful for daemons who allocate + * memory on behalf of other processes.   */ -void zone_statistics(struct zone *preferred_zone, struct zone *z) +void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)  {  	if (z->zone_pgdat == preferred_zone->zone_pgdat) {  		__inc_zone_state(z, NUMA_HIT); @@ -388,7 +564,8 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)  		__inc_zone_state(z, NUMA_MISS);  		__inc_zone_state(preferred_zone, NUMA_FOREIGN);  	} -	if (z->node == numa_node_id()) +	if (z->node == ((flags & __GFP_OTHER_NODE) ? +			preferred_zone->node : numa_node_id()))  		__inc_zone_state(z, NUMA_LOCAL);  	else  		__inc_zone_state(z, NUMA_OTHER); @@ -484,7 +661,12 @@ static char * const migratetype_names[MIGRATE_TYPES] = {  	"Reclaimable",  	"Movable",  	"Reserve", +#ifdef CONFIG_CMA +	"CMA", +#endif +#ifdef CONFIG_MEMORY_ISOLATION  	"Isolate", +#endif  };  static void *frag_start(struct seq_file *m, loff_t *pos) @@ -530,6 +712,171 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,  }  #endif +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) +#ifdef CONFIG_ZONE_DMA +#define TEXT_FOR_DMA(xx) xx "_dma", +#else +#define TEXT_FOR_DMA(xx) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define TEXT_FOR_DMA32(xx) xx "_dma32", +#else +#define TEXT_FOR_DMA32(xx) +#endif + +#ifdef CONFIG_HIGHMEM +#define TEXT_FOR_HIGHMEM(xx) xx "_high", +#else +#define TEXT_FOR_HIGHMEM(xx) +#endif + +#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ +					TEXT_FOR_HIGHMEM(xx) xx "_movable", + +const char * const vmstat_text[] = { +	/* Zoned VM counters */ +	"nr_free_pages", +	"nr_alloc_batch", +	"nr_inactive_anon", +	"nr_active_anon", +	"nr_inactive_file", +	"nr_active_file", +	"nr_unevictable", +	"nr_mlock", +	"nr_anon_pages", +	"nr_mapped", +	"nr_file_pages", +	"nr_dirty", +	"nr_writeback", +	"nr_slab_reclaimable", +	"nr_slab_unreclaimable", +	"nr_page_table_pages", +	"nr_kernel_stack", +	"nr_unstable", +	"nr_bounce", +	"nr_vmscan_write", +	"nr_vmscan_immediate_reclaim", +	"nr_writeback_temp", +	"nr_isolated_anon", +	"nr_isolated_file", +	"nr_shmem", +	"nr_dirtied", +	"nr_written", + +#ifdef CONFIG_NUMA +	"numa_hit", +	"numa_miss", +	"numa_foreign", +	"numa_interleave", +	"numa_local", +	"numa_other", +#endif +	"workingset_refault", +	"workingset_activate", +	"workingset_nodereclaim", +	"nr_anon_transparent_hugepages", +	"nr_free_cma", +	"nr_dirty_threshold", +	"nr_dirty_background_threshold", + +#ifdef CONFIG_VM_EVENT_COUNTERS +	"pgpgin", +	"pgpgout", +	"pswpin", +	"pswpout", + +	TEXTS_FOR_ZONES("pgalloc") + +	"pgfree", +	"pgactivate", +	"pgdeactivate", + +	"pgfault", +	"pgmajfault", + +	TEXTS_FOR_ZONES("pgrefill") +	TEXTS_FOR_ZONES("pgsteal_kswapd") +	TEXTS_FOR_ZONES("pgsteal_direct") +	TEXTS_FOR_ZONES("pgscan_kswapd") +	TEXTS_FOR_ZONES("pgscan_direct") +	"pgscan_direct_throttle", + +#ifdef CONFIG_NUMA +	"zone_reclaim_failed", +#endif +	"pginodesteal", +	"slabs_scanned", +	"kswapd_inodesteal", +	"kswapd_low_wmark_hit_quickly", +	"kswapd_high_wmark_hit_quickly", +	"pageoutrun", +	"allocstall", + +	"pgrotated", + +	"drop_pagecache", +	"drop_slab", + +#ifdef CONFIG_NUMA_BALANCING +	"numa_pte_updates", +	"numa_huge_pte_updates", +	"numa_hint_faults", +	"numa_hint_faults_local", +	"numa_pages_migrated", +#endif +#ifdef CONFIG_MIGRATION +	"pgmigrate_success", +	"pgmigrate_fail", +#endif +#ifdef CONFIG_COMPACTION +	"compact_migrate_scanned", +	"compact_free_scanned", +	"compact_isolated", +	"compact_stall", +	"compact_fail", +	"compact_success", +#endif + +#ifdef CONFIG_HUGETLB_PAGE +	"htlb_buddy_alloc_success", +	"htlb_buddy_alloc_fail", +#endif +	"unevictable_pgs_culled", +	"unevictable_pgs_scanned", +	"unevictable_pgs_rescued", +	"unevictable_pgs_mlocked", +	"unevictable_pgs_munlocked", +	"unevictable_pgs_cleared", +	"unevictable_pgs_stranded", + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +	"thp_fault_alloc", +	"thp_fault_fallback", +	"thp_collapse_alloc", +	"thp_collapse_alloc_failed", +	"thp_split", +	"thp_zero_page_alloc", +	"thp_zero_page_alloc_failed", +#endif +#ifdef CONFIG_DEBUG_TLBFLUSH +#ifdef CONFIG_SMP +	"nr_tlb_remote_flush", +	"nr_tlb_remote_flush_received", +#endif /* CONFIG_SMP */ +	"nr_tlb_local_flush_all", +	"nr_tlb_local_flush_one", +#endif /* CONFIG_DEBUG_TLBFLUSH */ + +#ifdef CONFIG_DEBUG_VM_VMACACHE +	"vmacache_find_calls", +	"vmacache_find_hits", +#endif +#endif /* CONFIG_VM_EVENTS_COUNTERS */ +}; +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ + +  #ifdef CONFIG_PROC_FS  static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,  						struct zone *zone) @@ -600,7 +947,7 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,  	int mtype;  	unsigned long pfn;  	unsigned long start_pfn = zone->zone_start_pfn; -	unsigned long end_pfn = start_pfn + zone->spanned_pages; +	unsigned long end_pfn = zone_end_pfn(zone);  	unsigned long count[MIGRATE_TYPES] = { 0, };  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { @@ -652,7 +999,7 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)  	pg_data_t *pgdat = (pg_data_t *)arg;  	/* check memoryless node */ -	if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) +	if (!node_state(pgdat->node_id, N_MEMORY))  		return 0;  	seq_printf(m, "Page block order: %d\n", pageblock_order); @@ -702,125 +1049,6 @@ static const struct file_operations pagetypeinfo_file_ops = {  	.release	= seq_release,  }; -#ifdef CONFIG_ZONE_DMA -#define TEXT_FOR_DMA(xx) xx "_dma", -#else -#define TEXT_FOR_DMA(xx) -#endif - -#ifdef CONFIG_ZONE_DMA32 -#define TEXT_FOR_DMA32(xx) xx "_dma32", -#else -#define TEXT_FOR_DMA32(xx) -#endif - -#ifdef CONFIG_HIGHMEM -#define TEXT_FOR_HIGHMEM(xx) xx "_high", -#else -#define TEXT_FOR_HIGHMEM(xx) -#endif - -#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ -					TEXT_FOR_HIGHMEM(xx) xx "_movable", - -static const char * const vmstat_text[] = { -	/* Zoned VM counters */ -	"nr_free_pages", -	"nr_inactive_anon", -	"nr_active_anon", -	"nr_inactive_file", -	"nr_active_file", -	"nr_unevictable", -	"nr_mlock", -	"nr_anon_pages", -	"nr_mapped", -	"nr_file_pages", -	"nr_dirty", -	"nr_writeback", -	"nr_slab_reclaimable", -	"nr_slab_unreclaimable", -	"nr_page_table_pages", -	"nr_kernel_stack", -	"nr_unstable", -	"nr_bounce", -	"nr_vmscan_write", -	"nr_writeback_temp", -	"nr_isolated_anon", -	"nr_isolated_file", -	"nr_shmem", -	"nr_dirtied", -	"nr_written", -	"nr_dirty_threshold", -	"nr_dirty_background_threshold", - -#ifdef CONFIG_NUMA -	"numa_hit", -	"numa_miss", -	"numa_foreign", -	"numa_interleave", -	"numa_local", -	"numa_other", -#endif - -#ifdef CONFIG_VM_EVENT_COUNTERS -	"pgpgin", -	"pgpgout", -	"pswpin", -	"pswpout", - -	TEXTS_FOR_ZONES("pgalloc") - -	"pgfree", -	"pgactivate", -	"pgdeactivate", - -	"pgfault", -	"pgmajfault", - -	TEXTS_FOR_ZONES("pgrefill") -	TEXTS_FOR_ZONES("pgsteal") -	TEXTS_FOR_ZONES("pgscan_kswapd") -	TEXTS_FOR_ZONES("pgscan_direct") - -#ifdef CONFIG_NUMA -	"zone_reclaim_failed", -#endif -	"pginodesteal", -	"slabs_scanned", -	"kswapd_steal", -	"kswapd_inodesteal", -	"kswapd_low_wmark_hit_quickly", -	"kswapd_high_wmark_hit_quickly", -	"kswapd_skip_congestion_wait", -	"pageoutrun", -	"allocstall", - -	"pgrotated", - -#ifdef CONFIG_COMPACTION -	"compact_blocks_moved", -	"compact_pages_moved", -	"compact_pagemigrate_failed", -	"compact_stall", -	"compact_fail", -	"compact_success", -#endif - -#ifdef CONFIG_HUGETLB_PAGE -	"htlb_buddy_alloc_success", -	"htlb_buddy_alloc_fail", -#endif -	"unevictable_pgs_culled", -	"unevictable_pgs_scanned", -	"unevictable_pgs_rescued", -	"unevictable_pgs_mlocked", -	"unevictable_pgs_munlocked", -	"unevictable_pgs_cleared", -	"unevictable_pgs_stranded", -	"unevictable_pgs_mlockfreed", -#endif -}; -  static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,  							struct zone *zone)  { @@ -833,14 +1061,16 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,  		   "\n        high     %lu"  		   "\n        scanned  %lu"  		   "\n        spanned  %lu" -		   "\n        present  %lu", -		   zone_nr_free_pages(zone), +		   "\n        present  %lu" +		   "\n        managed  %lu", +		   zone_page_state(zone, NR_FREE_PAGES),  		   min_wmark_pages(zone),  		   low_wmark_pages(zone),  		   high_wmark_pages(zone),  		   zone->pages_scanned,  		   zone->spanned_pages, -		   zone->present_pages); +		   zone->present_pages, +		   zone->managed_pages);  	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)  		seq_printf(m, "\n    %-12s %lu", vmstat_text[i], @@ -876,7 +1106,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,  		   "\n  all_unreclaimable: %u"  		   "\n  start_pfn:         %lu"  		   "\n  inactive_ratio:    %u", -		   zone->all_unreclaimable, +		   !zone_reclaimable(zone),  		   zone->zone_start_pfn,  		   zone->inactive_ratio);  	seq_putc(m, '\n'); @@ -1001,24 +1231,38 @@ int sysctl_stat_interval __read_mostly = HZ;  static void vmstat_update(struct work_struct *w)  { -	refresh_cpu_vm_stats(smp_processor_id()); -	schedule_delayed_work(&__get_cpu_var(vmstat_work), +	refresh_cpu_vm_stats(); +	schedule_delayed_work(this_cpu_ptr(&vmstat_work),  		round_jiffies_relative(sysctl_stat_interval));  } -static void __cpuinit start_cpu_timer(int cpu) +static void start_cpu_timer(int cpu)  {  	struct delayed_work *work = &per_cpu(vmstat_work, cpu); -	INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); +	INIT_DEFERRABLE_WORK(work, vmstat_update);  	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));  } +static void vmstat_cpu_dead(int node) +{ +	int cpu; + +	get_online_cpus(); +	for_each_online_cpu(cpu) +		if (cpu_to_node(cpu) == node) +			goto end; + +	node_clear_state(node, N_CPU); +end: +	put_online_cpus(); +} +  /*   * Use the cpu notifier to insure that the thresholds are recalculated   * when necessary.   */ -static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, +static int vmstat_cpuup_callback(struct notifier_block *nfb,  		unsigned long action,  		void *hcpu)  { @@ -1033,7 +1277,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,  		break;  	case CPU_DOWN_PREPARE:  	case CPU_DOWN_PREPARE_FROZEN: -		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); +		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));  		per_cpu(vmstat_work, cpu).work.func = NULL;  		break;  	case CPU_DOWN_FAILED: @@ -1043,6 +1287,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,  	case CPU_DEAD:  	case CPU_DEAD_FROZEN:  		refresh_zone_stat_thresholds(); +		vmstat_cpu_dead(cpu_to_node(cpu));  		break;  	default:  		break; @@ -1050,7 +1295,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,  	return NOTIFY_OK;  } -static struct notifier_block __cpuinitdata vmstat_notifier = +static struct notifier_block vmstat_notifier =  	{ &vmstat_cpuup_callback, NULL, 0 };  #endif @@ -1059,11 +1304,14 @@ static int __init setup_vmstat(void)  #ifdef CONFIG_SMP  	int cpu; -	refresh_zone_stat_thresholds(); -	register_cpu_notifier(&vmstat_notifier); +	cpu_notifier_register_begin(); +	__register_cpu_notifier(&vmstat_notifier); -	for_each_online_cpu(cpu) +	for_each_online_cpu(cpu) {  		start_cpu_timer(cpu); +		node_set_state(cpu_to_node(cpu), N_CPU); +	} +	cpu_notifier_register_done();  #endif  #ifdef CONFIG_PROC_FS  	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); @@ -1078,7 +1326,6 @@ module_init(setup_vmstat)  #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)  #include <linux/debugfs.h> -static struct dentry *extfrag_debug_root;  /*   * Return an index indicating how much of the available free memory is @@ -1135,7 +1382,7 @@ static int unusable_show(struct seq_file *m, void *arg)  	pg_data_t *pgdat = (pg_data_t *)arg;  	/* check memoryless node */ -	if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) +	if (!node_state(pgdat->node_id, N_MEMORY))  		return 0;  	walk_zones_in_node(m, pgdat, unusable_show_print); @@ -1216,19 +1463,24 @@ static const struct file_operations extfrag_file_ops = {  static int __init extfrag_debug_init(void)  { +	struct dentry *extfrag_debug_root; +  	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);  	if (!extfrag_debug_root)  		return -ENOMEM;  	if (!debugfs_create_file("unusable_index", 0444,  			extfrag_debug_root, NULL, &unusable_file_ops)) -		return -ENOMEM; +		goto fail;  	if (!debugfs_create_file("extfrag_index", 0444,  			extfrag_debug_root, NULL, &extfrag_file_ops)) -		return -ENOMEM; +		goto fail;  	return 0; +fail: +	debugfs_remove_recursive(extfrag_debug_root); +	return -ENOMEM;  }  module_init(extfrag_debug_init);  | 
