diff options
Diffstat (limited to 'mm/bootmem.c')
| -rw-r--r-- | mm/bootmem.c | 463 | 
1 files changed, 154 insertions, 309 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 13b0caa9793..90bd3507b41 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -12,7 +12,7 @@  #include <linux/pfn.h>  #include <linux/slab.h>  #include <linux/bootmem.h> -#include <linux/module.h> +#include <linux/export.h>  #include <linux/kmemleak.h>  #include <linux/range.h>  #include <linux/memblock.h> @@ -23,19 +23,17 @@  #include "internal.h" +#ifndef CONFIG_NEED_MULTIPLE_NODES +struct pglist_data __refdata contig_page_data = { +	.bdata = &bootmem_node_data[0] +}; +EXPORT_SYMBOL(contig_page_data); +#endif +  unsigned long max_low_pfn;  unsigned long min_low_pfn;  unsigned long max_pfn; -#ifdef CONFIG_CRASH_DUMP -/* - * If we have booted due to a crash, max_pfn will be a very low value. We need - * to know the amount of memory that the previous kernel used. - */ -unsigned long saved_max_pfn; -#endif - -#ifndef CONFIG_NO_BOOTMEM  bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;  static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); @@ -58,7 +56,7 @@ early_param("bootmem_debug", bootmem_debug_setup);  static unsigned long __init bootmap_bytes(unsigned long pages)  { -	unsigned long bytes = (pages + 7) / 8; +	unsigned long bytes = DIV_ROUND_UP(pages, 8);  	return ALIGN(bytes, sizeof(long));  } @@ -79,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)   */  static void __init link_bootmem(bootmem_data_t *bdata)  { -	struct list_head *iter; - -	list_for_each(iter, &bdata_list) { -		bootmem_data_t *ent; +	bootmem_data_t *ent; -		ent = list_entry(iter, bootmem_data_t, list); -		if (bdata->node_min_pfn < ent->node_min_pfn) -			break; +	list_for_each_entry(ent, &bdata_list, list) { +		if (bdata->node_min_pfn < ent->node_min_pfn) { +			list_add_tail(&bdata->list, &ent->list); +			return; +		}  	} -	list_add_tail(&bdata->list, iter); + +	list_add_tail(&bdata->list, &bdata_list);  }  /* @@ -146,24 +144,24 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)  	min_low_pfn = start;  	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);  } -#endif +  /*   * free_bootmem_late - free bootmem pages directly to page allocator - * @addr: starting address of the range + * @addr: starting physical address of the range   * @size: size of the range in bytes   *   * This is only useful when the bootmem allocator has already been torn   * down, but we are still initializing the system.  Pages are given directly   * to the page allocator, no bootmem metadata is updated because it is gone.   */ -void __init free_bootmem_late(unsigned long addr, unsigned long size) +void __init free_bootmem_late(unsigned long physaddr, unsigned long size)  {  	unsigned long cursor, end; -	kmemleak_free_part(__va(addr), size); +	kmemleak_free_part(__va(physaddr), size); -	cursor = PFN_UP(addr); -	end = PFN_DOWN(addr + size); +	cursor = PFN_UP(physaddr); +	end = PFN_DOWN(physaddr + size);  	for (; cursor < end; cursor++) {  		__free_pages_bootmem(pfn_to_page(cursor), 0); @@ -171,100 +169,64 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)  	}  } -#ifdef CONFIG_NO_BOOTMEM -static void __init __free_pages_memory(unsigned long start, unsigned long end) -{ -	int i; -	unsigned long start_aligned, end_aligned; -	int order = ilog2(BITS_PER_LONG); - -	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); -	end_aligned = end & ~(BITS_PER_LONG - 1); - -	if (end_aligned <= start_aligned) { -		for (i = start; i < end; i++) -			__free_pages_bootmem(pfn_to_page(i), 0); - -		return; -	} - -	for (i = start; i < start_aligned; i++) -		__free_pages_bootmem(pfn_to_page(i), 0); - -	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) -		__free_pages_bootmem(pfn_to_page(i), order); - -	for (i = end_aligned; i < end; i++) -		__free_pages_bootmem(pfn_to_page(i), 0); -} - -unsigned long __init free_all_memory_core_early(int nodeid) -{ -	int i; -	u64 start, end; -	unsigned long count = 0; -	struct range *range = NULL; -	int nr_range; - -	nr_range = get_free_all_memory_range(&range, nodeid); - -	for (i = 0; i < nr_range; i++) { -		start = range[i].start; -		end = range[i].end; -		count += end - start; -		__free_pages_memory(start, end); -	} - -	return count; -} -#else  static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)  { -	int aligned;  	struct page *page; -	unsigned long start, end, pages, count = 0; +	unsigned long *map, start, end, pages, count = 0;  	if (!bdata->node_bootmem_map)  		return 0; +	map = bdata->node_bootmem_map;  	start = bdata->node_min_pfn;  	end = bdata->node_low_pfn; -	/* -	 * If the start is aligned to the machines wordsize, we might -	 * be able to free pages in bulks of that order. -	 */ -	aligned = !(start & (BITS_PER_LONG - 1)); - -	bdebug("nid=%td start=%lx end=%lx aligned=%d\n", -		bdata - bootmem_node_data, start, end, aligned); +	bdebug("nid=%td start=%lx end=%lx\n", +		bdata - bootmem_node_data, start, end);  	while (start < end) { -		unsigned long *map, idx, vec; +		unsigned long idx, vec; +		unsigned shift; -		map = bdata->node_bootmem_map;  		idx = start - bdata->node_min_pfn; +		shift = idx & (BITS_PER_LONG - 1); +		/* +		 * vec holds at most BITS_PER_LONG map bits, +		 * bit 0 corresponds to start. +		 */  		vec = ~map[idx / BITS_PER_LONG]; -		if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) { +		if (shift) { +			vec >>= shift; +			if (end - start >= BITS_PER_LONG) +				vec |= ~map[idx / BITS_PER_LONG + 1] << +					(BITS_PER_LONG - shift); +		} +		/* +		 * If we have a properly aligned and fully unreserved +		 * BITS_PER_LONG block of pages in front of us, free +		 * it in one go. +		 */ +		if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {  			int order = ilog2(BITS_PER_LONG);  			__free_pages_bootmem(pfn_to_page(start), order);  			count += BITS_PER_LONG; +			start += BITS_PER_LONG;  		} else { -			unsigned long off = 0; +			unsigned long cur = start; -			while (vec && off < BITS_PER_LONG) { +			start = ALIGN(start + 1, BITS_PER_LONG); +			while (vec && cur != start) {  				if (vec & 1) { -					page = pfn_to_page(start + off); +					page = pfn_to_page(cur);  					__free_pages_bootmem(page, 0);  					count++;  				}  				vec >>= 1; -				off++; +				++cur;  			}  		} -		start += BITS_PER_LONG;  	}  	page = virt_to_page(bdata->node_bootmem_map); @@ -278,23 +240,27 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)  	return count;  } -#endif -/** - * free_all_bootmem_node - release a node's free pages to the buddy allocator - * @pgdat: node to be released - * - * Returns the number of pages actually released. - */ -unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) +static int reset_managed_pages_done __initdata; + +static inline void __init reset_node_managed_pages(pg_data_t *pgdat)  { -	register_page_bootmem_info_node(pgdat); -#ifdef CONFIG_NO_BOOTMEM -	/* free_all_memory_core_early(MAX_NUMNODES) will be called later */ -	return 0; -#else -	return free_all_bootmem_core(pgdat->bdata); -#endif +	struct zone *z; + +	if (reset_managed_pages_done) +		return; + +	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) +		z->managed_pages = 0; +} + +void __init reset_all_zones_managed_pages(void) +{ +	struct pglist_data *pgdat; + +	for_each_online_pgdat(pgdat) +		reset_node_managed_pages(pgdat); +	reset_managed_pages_done = 1;  }  /** @@ -304,27 +270,19 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)   */  unsigned long __init free_all_bootmem(void)  { -#ifdef CONFIG_NO_BOOTMEM -	/* -	 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id -	 *  because in some case like Node0 doesnt have RAM installed -	 *  low ram will be on Node1 -	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[] -	 *  will be used instead of only Node0 related -	 */ -	return free_all_memory_core_early(MAX_NUMNODES); -#else  	unsigned long total_pages = 0;  	bootmem_data_t *bdata; +	reset_all_zones_managed_pages(); +  	list_for_each_entry(bdata, &bdata_list, list)  		total_pages += free_all_bootmem_core(bdata); +	totalram_pages += total_pages; +  	return total_pages; -#endif  } -#ifndef CONFIG_NO_BOOTMEM  static void __init __free(bootmem_data_t *bdata,  			unsigned long sidx, unsigned long eidx)  { @@ -419,7 +377,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,  	}  	BUG();  } -#endif  /**   * free_bootmem_node - mark a page range as usable @@ -434,10 +391,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,  void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,  			      unsigned long size)  { -#ifdef CONFIG_NO_BOOTMEM -	kmemleak_free_part(__va(physaddr), size); -	memblock_x86_free_range(physaddr, physaddr + size); -#else  	unsigned long start, end;  	kmemleak_free_part(__va(physaddr), size); @@ -446,33 +399,27 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,  	end = PFN_DOWN(physaddr + size);  	mark_bootmem_node(pgdat->bdata, start, end, 0, 0); -#endif  }  /**   * free_bootmem - mark a page range as usable - * @addr: starting address of the range + * @addr: starting physical address of the range   * @size: size of the range in bytes   *   * Partial pages will be considered reserved and left as they are.   *   * The range must be contiguous but may span node boundaries.   */ -void __init free_bootmem(unsigned long addr, unsigned long size) +void __init free_bootmem(unsigned long physaddr, unsigned long size)  { -#ifdef CONFIG_NO_BOOTMEM -	kmemleak_free_part(__va(addr), size); -	memblock_x86_free_range(addr, addr + size); -#else  	unsigned long start, end; -	kmemleak_free_part(__va(addr), size); +	kmemleak_free_part(__va(physaddr), size); -	start = PFN_UP(addr); -	end = PFN_DOWN(addr + size); +	start = PFN_UP(physaddr); +	end = PFN_DOWN(physaddr + size);  	mark_bootmem(start, end, 0, 0); -#endif  }  /** @@ -489,21 +436,16 @@ void __init free_bootmem(unsigned long addr, unsigned long size)  int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,  				 unsigned long size, int flags)  { -#ifdef CONFIG_NO_BOOTMEM -	panic("no bootmem"); -	return 0; -#else  	unsigned long start, end;  	start = PFN_DOWN(physaddr);  	end = PFN_UP(physaddr + size);  	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); -#endif  }  /** - * reserve_bootmem - mark a page range as usable + * reserve_bootmem - mark a page range as reserved   * @addr: starting address of the range   * @size: size of the range in bytes   * @flags: reservation flags (see linux/bootmem.h) @@ -515,24 +457,12 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,  int __init reserve_bootmem(unsigned long addr, unsigned long size,  			    int flags)  { -#ifdef CONFIG_NO_BOOTMEM -	panic("no bootmem"); -	return 0; -#else  	unsigned long start, end;  	start = PFN_DOWN(addr);  	end = PFN_UP(addr + size);  	return mark_bootmem(start, end, 1, flags); -#endif -} - -#ifndef CONFIG_NO_BOOTMEM -int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, -				   int flags) -{ -	return reserve_bootmem(phys, len, flags);  }  static unsigned long __init align_idx(struct bootmem_data *bdata, @@ -558,7 +488,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,  	return ALIGN(base + off, align) - base;  } -static void * __init alloc_bootmem_core(struct bootmem_data *bdata, +static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,  					unsigned long size, unsigned long align,  					unsigned long goal, unsigned long limit)  { @@ -665,60 +595,16 @@ find_block:  	return NULL;  } -static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, -					unsigned long size, unsigned long align, -					unsigned long goal, unsigned long limit) -{ -	if (WARN_ON_ONCE(slab_is_available())) -		return kzalloc(size, GFP_NOWAIT); - -#ifdef CONFIG_HAVE_ARCH_BOOTMEM -	{ -		bootmem_data_t *p_bdata; - -		p_bdata = bootmem_arch_preferred_node(bdata, size, align, -							goal, limit); -		if (p_bdata) -			return alloc_bootmem_core(p_bdata, size, align, -							goal, limit); -	} -#endif -	return NULL; -} -#endif - -static void * __init ___alloc_bootmem_nopanic(unsigned long size, +static void * __init alloc_bootmem_core(unsigned long size,  					unsigned long align,  					unsigned long goal,  					unsigned long limit)  { -#ifdef CONFIG_NO_BOOTMEM -	void *ptr; - -	if (WARN_ON_ONCE(slab_is_available())) -		return kzalloc(size, GFP_NOWAIT); - -restart: - -	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); - -	if (ptr) -		return ptr; - -	if (goal != 0) { -		goal = 0; -		goto restart; -	} - -	return NULL; -#else  	bootmem_data_t *bdata;  	void *region; -restart: -	region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); -	if (region) -		return region; +	if (WARN_ON_ONCE(slab_is_available())) +		return kzalloc(size, GFP_NOWAIT);  	list_for_each_entry(bdata, &bdata_list, list) {  		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) @@ -726,18 +612,31 @@ restart:  		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))  			break; -		region = alloc_bootmem_core(bdata, size, align, goal, limit); +		region = alloc_bootmem_bdata(bdata, size, align, goal, limit);  		if (region)  			return region;  	} +	return NULL; +} + +static void * __init ___alloc_bootmem_nopanic(unsigned long size, +					      unsigned long align, +					      unsigned long goal, +					      unsigned long limit) +{ +	void *ptr; + +restart: +	ptr = alloc_bootmem_core(size, align, goal, limit); +	if (ptr) +		return ptr;  	if (goal) {  		goal = 0;  		goto restart;  	}  	return NULL; -#endif  }  /** @@ -758,10 +657,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,  {  	unsigned long limit = 0; -#ifdef CONFIG_NO_BOOTMEM -	limit = -1UL; -#endif -  	return ___alloc_bootmem_nopanic(size, align, goal, limit);  } @@ -798,31 +693,62 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,  {  	unsigned long limit = 0; -#ifdef CONFIG_NO_BOOTMEM -	limit = -1UL; -#endif -  	return ___alloc_bootmem(size, align, goal, limit);  } -#ifndef CONFIG_NO_BOOTMEM -static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, +void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,  				unsigned long size, unsigned long align,  				unsigned long goal, unsigned long limit)  {  	void *ptr; -	ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); +	if (WARN_ON_ONCE(slab_is_available())) +		return kzalloc(size, GFP_NOWAIT); +again: + +	/* do not panic in alloc_bootmem_bdata() */ +	if (limit && goal + size > limit) +		limit = 0; + +	ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);  	if (ptr)  		return ptr; -	ptr = alloc_bootmem_core(bdata, size, align, goal, limit); +	ptr = alloc_bootmem_core(size, align, goal, limit);  	if (ptr)  		return ptr; -	return ___alloc_bootmem(size, align, goal, limit); +	if (goal) { +		goal = 0; +		goto again; +	} + +	return NULL; +} + +void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, +				   unsigned long align, unsigned long goal) +{ +	if (WARN_ON_ONCE(slab_is_available())) +		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); + +	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); +} + +void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, +				    unsigned long align, unsigned long goal, +				    unsigned long limit) +{ +	void *ptr; + +	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); +	if (ptr) +		return ptr; + +	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); +	panic("Out of memory"); +	return NULL;  } -#endif  /**   * __alloc_bootmem_node - allocate boot memory from a specific node @@ -842,24 +768,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,  void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,  				   unsigned long align, unsigned long goal)  { -	void *ptr; -  	if (WARN_ON_ONCE(slab_is_available()))  		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); -#ifdef CONFIG_NO_BOOTMEM -	ptr = __alloc_memory_core_early(pgdat->node_id, size, align, -					 goal, -1ULL); -	if (ptr) -		return ptr; - -	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, -					 goal, -1ULL); -#else -	ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); -#endif - -	return ptr; +	return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);  }  void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, @@ -872,7 +784,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,  		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);  	/* update goal according ...MAX_DMA32_PFN */ -	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; +	end_pfn = pgdat_end_pfn(pgdat);  	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&  	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { @@ -880,13 +792,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,  		unsigned long new_goal;  		new_goal = MAX_DMA32_PFN << PAGE_SHIFT; -#ifdef CONFIG_NO_BOOTMEM -		ptr =  __alloc_memory_core_early(pgdat->node_id, size, align, -						 new_goal, -1ULL); -#else -		ptr = alloc_bootmem_core(pgdat->bdata, size, align, +		ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,  						 new_goal, 0); -#endif  		if (ptr)  			return ptr;  	} @@ -896,64 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,  } -#ifdef CONFIG_SPARSEMEM -/** - * alloc_bootmem_section - allocate boot memory from a specific section - * @size: size of the request in bytes - * @section_nr: sparse map section to allocate from - * - * Return NULL on failure. - */ -void * __init alloc_bootmem_section(unsigned long size, -				    unsigned long section_nr) -{ -#ifdef CONFIG_NO_BOOTMEM -	unsigned long pfn, goal, limit; - -	pfn = section_nr_to_pfn(section_nr); -	goal = pfn << PAGE_SHIFT; -	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; - -	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, -					 SMP_CACHE_BYTES, goal, limit); -#else -	bootmem_data_t *bdata; -	unsigned long pfn, goal, limit; - -	pfn = section_nr_to_pfn(section_nr); -	goal = pfn << PAGE_SHIFT; -	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; -	bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; - -	return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); -#endif -} -#endif - -void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, -				   unsigned long align, unsigned long goal) -{ -	void *ptr; - -	if (WARN_ON_ONCE(slab_is_available())) -		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); - -#ifdef CONFIG_NO_BOOTMEM -	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align, -						 goal, -1ULL); -#else -	ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); -	if (ptr) -		return ptr; - -	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); -#endif -	if (ptr) -		return ptr; - -	return __alloc_bootmem_nopanic(size, align, goal); -} -  #ifndef ARCH_LOW_ADDRESS_LIMIT  #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL  #endif @@ -977,6 +826,14 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,  	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);  } +void * __init __alloc_bootmem_low_nopanic(unsigned long size, +					  unsigned long align, +					  unsigned long goal) +{ +	return ___alloc_bootmem_nopanic(size, align, goal, +					ARCH_LOW_ADDRESS_LIMIT); +} +  /**   * __alloc_bootmem_low_node - allocate low boot memory from a specific node   * @pgdat: node to allocate from @@ -995,21 +852,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,  void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,  				       unsigned long align, unsigned long goal)  { -	void *ptr; -  	if (WARN_ON_ONCE(slab_is_available()))  		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); -#ifdef CONFIG_NO_BOOTMEM -	ptr = __alloc_memory_core_early(pgdat->node_id, size, align, -				goal, ARCH_LOW_ADDRESS_LIMIT); -	if (ptr) -		return ptr; -	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, -				goal, ARCH_LOW_ADDRESS_LIMIT); -#else -	ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, -				goal, ARCH_LOW_ADDRESS_LIMIT); -#endif -	return ptr; +	return ___alloc_bootmem_node(pgdat, size, align, +				     goal, ARCH_LOW_ADDRESS_LIMIT);  }  | 
