diff options
Diffstat (limited to 'mm/sparse.c')
| -rw-r--r-- | mm/sparse.c | 86 | 
1 files changed, 43 insertions, 43 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 4ac1d7ef548..d1b48b691ac 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -5,10 +5,12 @@  #include <linux/slab.h>  #include <linux/mmzone.h>  #include <linux/bootmem.h> +#include <linux/compiler.h>  #include <linux/highmem.h>  #include <linux/export.h>  #include <linux/spinlock.h>  #include <linux/vmalloc.h> +  #include "internal.h"  #include <asm/dma.h>  #include <asm/pgalloc.h> @@ -69,7 +71,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)  		else  			section = kzalloc(array_size, GFP_KERNEL);  	} else { -		section = alloc_bootmem_node(NODE_DATA(nid), array_size); +		section = memblock_virt_alloc_node(array_size, nid);  	}  	return section; @@ -268,7 +270,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,  	/*  	 * A page may contain usemaps for other sections preventing the  	 * page being freed and making a section unremovable while -	 * other sections referencing the usemap retmain active. Similarly, +	 * other sections referencing the usemap remain active. Similarly,  	 * a pgdat can prevent a section being removed. If section A  	 * contains a pgdat and section B contains the usemap, both  	 * sections become inter-dependent. This allocates usemaps @@ -279,8 +281,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,  	limit = goal + (1UL << PA_SECTION_SHIFT);  	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);  again: -	p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, -					  SMP_CACHE_BYTES, goal, limit); +	p = memblock_virt_alloc_try_nid_nopanic(size, +						SMP_CACHE_BYTES, goal, limit, +						nid);  	if (!p && limit) {  		limit = 0;  		goto again; @@ -331,7 +334,7 @@ static unsigned long * __init  sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,  					 unsigned long size)  { -	return alloc_bootmem_node_nopanic(pgdat, size); +	return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);  }  static void __init check_usemap_section_nr(int nid, unsigned long *usemap) @@ -376,8 +379,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)  		return map;  	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); -	map = __alloc_bootmem_node_high(NODE_DATA(nid), size, -					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); +	map = memblock_virt_alloc_try_nid(size, +					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS), +					  BOOTMEM_ALLOC_ACCESSIBLE, nid);  	return map;  }  void __init sparse_mem_maps_populate_node(struct page **map_map, @@ -401,8 +405,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,  	}  	size = PAGE_ALIGN(size); -	map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, -					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); +	map = memblock_virt_alloc_try_nid(size * map_count, +					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS), +					  BOOTMEM_ALLOC_ACCESSIBLE, nodeid);  	if (map) {  		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {  			if (!present_section_nr(pnum)) @@ -458,7 +463,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)  }  #endif -void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) +void __weak __meminit vmemmap_populate_print_last(void)  {  } @@ -545,7 +550,7 @@ void __init sparse_init(void)  	 * sparse_early_mem_map_alloc, so allocate usemap_map at first.  	 */  	size = sizeof(unsigned long *) * NR_MEM_SECTIONS; -	usemap_map = alloc_bootmem(size); +	usemap_map = memblock_virt_alloc(size, 0);  	if (!usemap_map)  		panic("can not allocate usemap_map\n");  	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, @@ -553,7 +558,7 @@ void __init sparse_init(void)  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER  	size2 = sizeof(struct page *) * NR_MEM_SECTIONS; -	map_map = alloc_bootmem(size2); +	map_map = memblock_virt_alloc(size2, 0);  	if (!map_map)  		panic("can not allocate map_map\n");  	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, @@ -583,40 +588,39 @@ void __init sparse_init(void)  	vmemmap_populate_print_last();  #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER -	free_bootmem(__pa(map_map), size2); +	memblock_free_early(__pa(map_map), size2);  #endif -	free_bootmem(__pa(usemap_map), size); +	memblock_free_early(__pa(usemap_map), size);  }  #ifdef CONFIG_MEMORY_HOTPLUG  #ifdef CONFIG_SPARSEMEM_VMEMMAP -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, -						 unsigned long nr_pages) +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)  {  	/* This will make the necessary allocations eventually. */  	return sparse_mem_map_populate(pnum, nid);  } -static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +static void __kfree_section_memmap(struct page *memmap)  {  	unsigned long start = (unsigned long)memmap; -	unsigned long end = (unsigned long)(memmap + nr_pages); +	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);  	vmemmap_free(start, end);  }  #ifdef CONFIG_MEMORY_HOTREMOVE -static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap)  {  	unsigned long start = (unsigned long)memmap; -	unsigned long end = (unsigned long)(memmap + nr_pages); +	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);  	vmemmap_free(start, end);  }  #endif /* CONFIG_MEMORY_HOTREMOVE */  #else -static struct page *__kmalloc_section_memmap(unsigned long nr_pages) +static struct page *__kmalloc_section_memmap(void)  {  	struct page *page, *ret; -	unsigned long memmap_size = sizeof(struct page) * nr_pages; +	unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;  	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));  	if (page) @@ -634,28 +638,30 @@ got_map_ptr:  	return ret;  } -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, -						  unsigned long nr_pages) +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)  { -	return __kmalloc_section_memmap(nr_pages); +	return __kmalloc_section_memmap();  } -static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +static void __kfree_section_memmap(struct page *memmap)  {  	if (is_vmalloc_addr(memmap))  		vfree(memmap);  	else  		free_pages((unsigned long)memmap, -			   get_order(sizeof(struct page) * nr_pages)); +			   get_order(sizeof(struct page) * PAGES_PER_SECTION));  }  #ifdef CONFIG_MEMORY_HOTREMOVE -static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap)  {  	unsigned long maps_section_nr, removing_section_nr, i; -	unsigned long magic; +	unsigned long magic, nr_pages;  	struct page *page = virt_to_page(memmap); +	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) +		>> PAGE_SHIFT; +  	for (i = 0; i < nr_pages; i++, page++) {  		magic = (unsigned long) page->lru.next; @@ -684,8 +690,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)   * set.  If this is <=0, then that means that the passed-in   * map was not consumed and must be freed.   */ -int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, -			   int nr_pages) +int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)  {  	unsigned long section_nr = pfn_to_section_nr(start_pfn);  	struct pglist_data *pgdat = zone->zone_pgdat; @@ -702,12 +707,12 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,  	ret = sparse_index_init(section_nr, pgdat->node_id);  	if (ret < 0 && ret != -EEXIST)  		return ret; -	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); +	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);  	if (!memmap)  		return -ENOMEM;  	usemap = __kmalloc_section_usemap();  	if (!usemap) { -		__kfree_section_memmap(memmap, nr_pages); +		__kfree_section_memmap(memmap);  		return -ENOMEM;  	} @@ -719,7 +724,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,  		goto out;  	} -	memset(memmap, 0, sizeof(struct page) * nr_pages); +	memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);  	ms->section_mem_map |= SECTION_MARKED_PRESENT; @@ -729,7 +734,7 @@ out:  	pgdat_resize_unlock(pgdat, &flags);  	if (ret <= 0) {  		kfree(usemap); -		__kfree_section_memmap(memmap, nr_pages); +		__kfree_section_memmap(memmap);  	}  	return ret;  } @@ -759,7 +764,6 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)  static void free_section_usemap(struct page *memmap, unsigned long *usemap)  {  	struct page *usemap_page; -	unsigned long nr_pages;  	if (!usemap)  		return; @@ -771,7 +775,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)  	if (PageSlab(usemap_page) || PageCompound(usemap_page)) {  		kfree(usemap);  		if (memmap) -			__kfree_section_memmap(memmap, PAGES_PER_SECTION); +			__kfree_section_memmap(memmap);  		return;  	} @@ -780,12 +784,8 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)  	 * on the section which has pgdat at boot time. Just keep it as is now.  	 */ -	if (memmap) { -		nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) -			>> PAGE_SHIFT; - -		free_map_bootmem(memmap, nr_pages); -	} +	if (memmap) +		free_map_bootmem(memmap);  }  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)  | 
