diff options
| author | Wim Van Sebroeck <wim@iguana.be> | 2007-05-11 19:03:13 +0000 | 
|---|---|---|
| committer | Wim Van Sebroeck <wim@iguana.be> | 2007-05-11 19:03:13 +0000 | 
| commit | 5c34202b8bf942da411b6599668a76b07449bbfd (patch) | |
| tree | 5719c361321eaddc8e4f1b0c8a7994f0e9a6fdd3 /mm/page_alloc.c | |
| parent | 0d4804b31f91cfbcff6d62af0bc09a893a1c8ae0 (diff) | |
| parent | 1f8a6b658a943b4f04a1fc7b3a420360202c86cd (diff) | |
Merge /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 130 | 
1 files changed, 61 insertions, 69 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 353ce9039a8..ae96dd84443 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -103,7 +103,7 @@ int min_free_kbytes = 1024;  unsigned long __meminitdata nr_kernel_pages;  unsigned long __meminitdata nr_all_pages; -static unsigned long __initdata dma_reserve; +static unsigned long __meminitdata dma_reserve;  #ifdef CONFIG_ARCH_POPULATES_NODE_MAP    /* @@ -126,10 +126,10 @@ static unsigned long __initdata dma_reserve;      #endif    #endif -  struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; -  int __initdata nr_nodemap_entries; -  unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; -  unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; +  struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; +  int __meminitdata nr_nodemap_entries; +  unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; +  unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];  #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE    unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];    unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; @@ -156,10 +156,8 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  static int page_is_consistent(struct zone *zone, struct page *page)  { -#ifdef CONFIG_HOLES_IN_ZONE -	if (!pfn_valid(page_to_pfn(page))) +	if (!pfn_valid_within(page_to_pfn(page)))  		return 0; -#endif  	if (zone != page_zone(page))  		return 0; @@ -227,7 +225,7 @@ static void bad_page(struct page *page)  static void free_compound_page(struct page *page)  { -	__free_pages_ok(page, (unsigned long)page[1].lru.prev); +	__free_pages_ok(page, compound_order(page));  }  static void prep_compound_page(struct page *page, unsigned long order) @@ -236,12 +234,13 @@ static void prep_compound_page(struct page *page, unsigned long order)  	int nr_pages = 1 << order;  	set_compound_page_dtor(page, free_compound_page); -	page[1].lru.prev = (void *)order; -	for (i = 0; i < nr_pages; i++) { +	set_compound_order(page, order); +	__SetPageHead(page); +	for (i = 1; i < nr_pages; i++) {  		struct page *p = page + i; -		__SetPageCompound(p); -		set_page_private(p, (unsigned long)page); +		__SetPageTail(p); +		p->first_page = page;  	}  } @@ -250,16 +249,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)  	int i;  	int nr_pages = 1 << order; -	if (unlikely((unsigned long)page[1].lru.prev != order)) +	if (unlikely(compound_order(page) != order))  		bad_page(page); -	for (i = 0; i < nr_pages; i++) { +	if (unlikely(!PageHead(page))) +			bad_page(page); +	__ClearPageHead(page); +	for (i = 1; i < nr_pages; i++) {  		struct page *p = page + i; -		if (unlikely(!PageCompound(p) | -				(page_private(p) != (unsigned long)page))) +		if (unlikely(!PageTail(p) | +				(p->first_page != page)))  			bad_page(page); -		__ClearPageCompound(p); +		__ClearPageTail(p);  	}  } @@ -346,10 +348,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)  static inline int page_is_buddy(struct page *page, struct page *buddy,  								int order)  { -#ifdef CONFIG_HOLES_IN_ZONE -	if (!pfn_valid(page_to_pfn(buddy))) +	if (!pfn_valid_within(page_to_pfn(buddy)))  		return 0; -#endif  	if (page_zone_id(page) != page_zone_id(buddy))  		return 0; @@ -433,13 +433,18 @@ static inline int free_pages_check(struct page *page)  			1 << PG_private |  			1 << PG_locked	|  			1 << PG_active	| -			1 << PG_reclaim	|  			1 << PG_slab	|  			1 << PG_swapcache |  			1 << PG_writeback |  			1 << PG_reserved |  			1 << PG_buddy ))))  		bad_page(page); +	/* +	 * PageReclaim == PageTail. It is only an error +	 * for PageReclaim to be set if PageCompound is clear. +	 */ +	if (unlikely(!PageCompound(page) && PageReclaim(page))) +		bad_page(page);  	if (PageDirty(page))  		__ClearPageDirty(page);  	/* @@ -665,7 +670,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,  }  #if MAX_NUMNODES > 1 -int nr_node_ids __read_mostly; +int nr_node_ids __read_mostly = MAX_NUMNODES;  EXPORT_SYMBOL(nr_node_ids);  /* @@ -686,43 +691,26 @@ static void __init setup_nr_node_ids(void) {}  #ifdef CONFIG_NUMA  /* - * Called from the slab reaper to drain pagesets on a particular node that - * belongs to the currently executing processor. + * Called from the vmstat counter updater to drain pagesets of this + * currently executing processor on remote nodes after they have + * expired. + *   * Note that this function must be called with the thread pinned to   * a single processor.   */ -void drain_node_pages(int nodeid) +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)  { -	int i; -	enum zone_type z;  	unsigned long flags; +	int to_drain; -	for (z = 0; z < MAX_NR_ZONES; z++) { -		struct zone *zone = NODE_DATA(nodeid)->node_zones + z; -		struct per_cpu_pageset *pset; - -		if (!populated_zone(zone)) -			continue; - -		pset = zone_pcp(zone, smp_processor_id()); -		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { -			struct per_cpu_pages *pcp; - -			pcp = &pset->pcp[i]; -			if (pcp->count) { -				int to_drain; - -				local_irq_save(flags); -				if (pcp->count >= pcp->batch) -					to_drain = pcp->batch; -				else -					to_drain = pcp->count; -				free_pages_bulk(zone, to_drain, &pcp->list, 0); -				pcp->count -= to_drain; -				local_irq_restore(flags); -			} -		} -	} +	local_irq_save(flags); +	if (pcp->count >= pcp->batch) +		to_drain = pcp->batch; +	else +		to_drain = pcp->count; +	free_pages_bulk(zone, to_drain, &pcp->list, 0); +	pcp->count -= to_drain; +	local_irq_restore(flags);  }  #endif @@ -770,8 +758,8 @@ void mark_free_pages(struct zone *zone)  		if (pfn_valid(pfn)) {  			struct page *page = pfn_to_page(pfn); -			if (!PageNosave(page)) -				ClearPageNosaveFree(page); +			if (!swsusp_page_is_forbidden(page)) +				swsusp_unset_page_free(page);  		}  	for (order = MAX_ORDER - 1; order >= 0; --order) @@ -780,7 +768,7 @@ void mark_free_pages(struct zone *zone)  			pfn = page_to_pfn(list_entry(curr, struct page, lru));  			for (i = 0; i < (1UL << order); i++) -				SetPageNosaveFree(pfn_to_page(pfn + i)); +				swsusp_set_page_free(pfn_to_page(pfn + i));  		}  	spin_unlock_irqrestore(&zone->lock, flags); @@ -2143,11 +2131,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,  	switch (action) {  	case CPU_UP_PREPARE: +	case CPU_UP_PREPARE_FROZEN:  		if (process_zones(cpu))  			ret = NOTIFY_BAD;  		break;  	case CPU_UP_CANCELED: +	case CPU_UP_CANCELED_FROZEN:  	case CPU_DEAD: +	case CPU_DEAD_FROZEN:  		free_zone_pagesets(cpu);  		break;  	default: @@ -2174,7 +2165,7 @@ void __init setup_per_cpu_pageset(void)  #endif -static __meminit +static __meminit noinline  int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)  {  	int i; @@ -2262,7 +2253,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,   * Basic iterator support. Return the first range of PFNs for a node   * Note: nid == MAX_NUMNODES returns first region regardless of node   */ -static int __init first_active_region_index_in_nid(int nid) +static int __meminit first_active_region_index_in_nid(int nid)  {  	int i; @@ -2277,7 +2268,7 @@ static int __init first_active_region_index_in_nid(int nid)   * Basic iterator support. Return the next active range of PFNs for a node   * Note: nid == MAX_NUMNODES returns next region regardles of node   */ -static int __init next_active_region_index_in_nid(int index, int nid) +static int __meminit next_active_region_index_in_nid(int index, int nid)  {  	for (index = index + 1; index < nr_nodemap_entries; index++)  		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) @@ -2293,7 +2284,7 @@ static int __init next_active_region_index_in_nid(int index, int nid)   * was used and there are no special requirements, this is a convenient   * alternative   */ -int __init early_pfn_to_nid(unsigned long pfn) +int __meminit early_pfn_to_nid(unsigned long pfn)  {  	int i; @@ -2430,7 +2421,7 @@ static void __init account_node_boundary(unsigned int nid,   * with no available memory, a warning is printed and the start and end   * PFNs will be 0.   */ -void __init get_pfn_range_for_nid(unsigned int nid, +void __meminit get_pfn_range_for_nid(unsigned int nid,  			unsigned long *start_pfn, unsigned long *end_pfn)  {  	int i; @@ -2455,7 +2446,7 @@ void __init get_pfn_range_for_nid(unsigned int nid,   * Return the number of pages a zone spans in a node, including holes   * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()   */ -unsigned long __init zone_spanned_pages_in_node(int nid, +unsigned long __meminit zone_spanned_pages_in_node(int nid,  					unsigned long zone_type,  					unsigned long *ignored)  { @@ -2483,7 +2474,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,   * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,   * then all holes in the requested range will be accounted for.   */ -unsigned long __init __absent_pages_in_range(int nid, +unsigned long __meminit __absent_pages_in_range(int nid,  				unsigned long range_start_pfn,  				unsigned long range_end_pfn)  { @@ -2543,7 +2534,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,  }  /* Return the number of page frames in holes in a zone on a node */ -unsigned long __init zone_absent_pages_in_node(int nid, +unsigned long __meminit zone_absent_pages_in_node(int nid,  					unsigned long zone_type,  					unsigned long *ignored)  { @@ -2579,7 +2570,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,  #endif -static void __init calculate_node_totalpages(struct pglist_data *pgdat, +static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,  		unsigned long *zones_size, unsigned long *zholes_size)  {  	unsigned long realtotalpages, totalpages = 0; @@ -2687,7 +2678,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,  	}  } -static void __init alloc_node_mem_map(struct pglist_data *pgdat) +static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)  {  	/* Skip empty nodes */  	if (!pgdat->node_spanned_pages) @@ -3007,7 +2998,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,  {  	int cpu = (unsigned long)hcpu; -	if (action == CPU_DEAD) { +	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {  		local_irq_disable();  		__drain_pages(cpu);  		vm_events_fold_cpu(cpu); @@ -3203,7 +3194,8 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,  	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)  {  	proc_dointvec(table, write, file, buffer, length, ppos); -	setup_per_zone_pages_min(); +	if (write) +		setup_per_zone_pages_min();  	return 0;  }  | 
