diff options
Diffstat (limited to 'mm/memory_hotplug.c')
| -rw-r--r-- | mm/memory_hotplug.c | 1437 | 
1 files changed, 1250 insertions, 187 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9260314a221..469bbf505f8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -9,9 +9,8 @@  #include <linux/swap.h>  #include <linux/interrupt.h>  #include <linux/pagemap.h> -#include <linux/bootmem.h>  #include <linux/compiler.h> -#include <linux/module.h> +#include <linux/export.h>  #include <linux/pagevec.h>  #include <linux/writeback.h>  #include <linux/slab.h> @@ -29,11 +28,103 @@  #include <linux/suspend.h>  #include <linux/mm_inline.h>  #include <linux/firmware-map.h> +#include <linux/stop_machine.h> +#include <linux/hugetlb.h> +#include <linux/memblock.h>  #include <asm/tlbflush.h>  #include "internal.h" +/* + * online_page_callback contains pointer to current page onlining function. + * Initially it is generic_online_page(). If it is required it could be + * changed by calling set_online_page_callback() for callback registration + * and restore_online_page_callback() for generic callback restore. + */ + +static void generic_online_page(struct page *page); + +static online_page_callback_t online_page_callback = generic_online_page; +static DEFINE_MUTEX(online_page_callback_lock); + +/* The same as the cpu_hotplug lock, but for memory hotplug. */ +static struct { +	struct task_struct *active_writer; +	struct mutex lock; /* Synchronizes accesses to refcount, */ +	/* +	 * Also blocks the new readers during +	 * an ongoing mem hotplug operation. +	 */ +	int refcount; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lockdep_map dep_map; +#endif +} mem_hotplug = { +	.active_writer = NULL, +	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock), +	.refcount = 0, +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	.dep_map = {.name = "mem_hotplug.lock" }, +#endif +}; + +/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ +#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map) +#define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map) +#define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map) + +void get_online_mems(void) +{ +	might_sleep(); +	if (mem_hotplug.active_writer == current) +		return; +	memhp_lock_acquire_read(); +	mutex_lock(&mem_hotplug.lock); +	mem_hotplug.refcount++; +	mutex_unlock(&mem_hotplug.lock); + +} + +void put_online_mems(void) +{ +	if (mem_hotplug.active_writer == current) +		return; +	mutex_lock(&mem_hotplug.lock); + +	if (WARN_ON(!mem_hotplug.refcount)) +		mem_hotplug.refcount++; /* try to fix things up */ + +	if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer)) +		wake_up_process(mem_hotplug.active_writer); +	mutex_unlock(&mem_hotplug.lock); +	memhp_lock_release(); + +} + +static void mem_hotplug_begin(void) +{ +	mem_hotplug.active_writer = current; + +	memhp_lock_acquire(); +	for (;;) { +		mutex_lock(&mem_hotplug.lock); +		if (likely(!mem_hotplug.refcount)) +			break; +		__set_current_state(TASK_UNINTERRUPTIBLE); +		mutex_unlock(&mem_hotplug.lock); +		schedule(); +	} +} + +static void mem_hotplug_done(void) +{ +	mem_hotplug.active_writer = NULL; +	mutex_unlock(&mem_hotplug.lock); +	memhp_lock_release(); +} +  /* add this memory to iomem resource */  static struct resource *register_memory_resource(u64 start, u64 size)  { @@ -46,8 +137,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)  	res->end = start + size - 1;  	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;  	if (request_resource(&iomem_resource, res) < 0) { -		printk("System RAM resource %llx - %llx cannot be added\n", -		(unsigned long long)res->start, (unsigned long long)res->end); +		pr_debug("System RAM resource %pR cannot be added\n", res);  		kfree(res);  		res = NULL;  	} @@ -64,42 +154,39 @@ static void release_memory_resource(struct resource *res)  }  #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE -#ifndef CONFIG_SPARSEMEM_VMEMMAP -static void get_page_bootmem(unsigned long info,  struct page *page, int type) +void get_page_bootmem(unsigned long info,  struct page *page, +		      unsigned long type)  { -	atomic_set(&page->_mapcount, type); +	page->lru.next = (struct list_head *) type;  	SetPagePrivate(page);  	set_page_private(page, info);  	atomic_inc(&page->_count);  } -/* reference to __meminit __free_pages_bootmem is valid - * so use __ref to tell modpost not to generate a warning */ -void __ref put_page_bootmem(struct page *page) +void put_page_bootmem(struct page *page)  { -	int type; +	unsigned long type; -	type = atomic_read(&page->_mapcount); -	BUG_ON(type >= -1); +	type = (unsigned long) page->lru.next; +	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || +	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);  	if (atomic_dec_return(&page->_count) == 1) {  		ClearPagePrivate(page);  		set_page_private(page, 0); -		reset_page_mapcount(page); -		__free_pages_bootmem(page, 0); +		INIT_LIST_HEAD(&page->lru); +		free_reserved_page(page);  	} -  } +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE +#ifndef CONFIG_SPARSEMEM_VMEMMAP  static void register_page_bootmem_info_section(unsigned long start_pfn)  {  	unsigned long *usemap, mapsize, section_nr, i;  	struct mem_section *ms;  	struct page *page, *memmap; -	if (!pfn_valid(start_pfn)) -		return; -  	section_nr = pfn_to_section_nr(start_pfn);  	ms = __nr_to_section(section_nr); @@ -127,6 +214,32 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)  		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);  } +#else /* CONFIG_SPARSEMEM_VMEMMAP */ +static void register_page_bootmem_info_section(unsigned long start_pfn) +{ +	unsigned long *usemap, mapsize, section_nr, i; +	struct mem_section *ms; +	struct page *page, *memmap; + +	if (!pfn_valid(start_pfn)) +		return; + +	section_nr = pfn_to_section_nr(start_pfn); +	ms = __nr_to_section(section_nr); + +	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); + +	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); + +	usemap = __nr_to_section(section_nr)->pageblock_flags; +	page = virt_to_page(usemap); + +	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; + +	for (i = 0; i < mapsize; i++, page++) +		get_page_bootmem(section_nr, page, MIX_SECTION_INFO); +} +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */  void register_page_bootmem_info_node(struct pglist_data *pgdat)  { @@ -143,7 +256,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)  	zone = &pgdat->node_zones[0];  	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { -		if (zone->wait_table) { +		if (zone_is_initialized(zone)) {  			nr_pages = zone->wait_table_hash_nr_entries  				* sizeof(wait_queue_head_t);  			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; @@ -155,14 +268,21 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)  	}  	pfn = pgdat->node_start_pfn; -	end_pfn = pfn + pgdat->node_spanned_pages; - -	/* register_section info */ -	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) -		register_page_bootmem_info_section(pfn); +	end_pfn = pgdat_end_pfn(pgdat); +	/* register section info */ +	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { +		/* +		 * Some platforms can assign the same pfn to multiple nodes - on +		 * node0 as well as nodeN.  To avoid registering a pfn against +		 * multiple nodes we check that this pfn does not already +		 * reside in some other nodes. +		 */ +		if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) +			register_page_bootmem_info_section(pfn); +	}  } -#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ +#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */  static void grow_zone_span(struct zone *zone, unsigned long start_pfn,  			   unsigned long end_pfn) @@ -171,8 +291,8 @@ static void grow_zone_span(struct zone *zone, unsigned long start_pfn,  	zone_span_writelock(zone); -	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; -	if (start_pfn < zone->zone_start_pfn) +	old_zone_end_pfn = zone_end_pfn(zone); +	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)  		zone->zone_start_pfn = start_pfn;  	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - @@ -181,13 +301,138 @@ static void grow_zone_span(struct zone *zone, unsigned long start_pfn,  	zone_span_writeunlock(zone);  } +static void resize_zone(struct zone *zone, unsigned long start_pfn, +		unsigned long end_pfn) +{ +	zone_span_writelock(zone); + +	if (end_pfn - start_pfn) { +		zone->zone_start_pfn = start_pfn; +		zone->spanned_pages = end_pfn - start_pfn; +	} else { +		/* +		 * make it consist as free_area_init_core(), +		 * if spanned_pages = 0, then keep start_pfn = 0 +		 */ +		zone->zone_start_pfn = 0; +		zone->spanned_pages = 0; +	} + +	zone_span_writeunlock(zone); +} + +static void fix_zone_id(struct zone *zone, unsigned long start_pfn, +		unsigned long end_pfn) +{ +	enum zone_type zid = zone_idx(zone); +	int nid = zone->zone_pgdat->node_id; +	unsigned long pfn; + +	for (pfn = start_pfn; pfn < end_pfn; pfn++) +		set_page_links(pfn_to_page(pfn), zid, nid, pfn); +} + +/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or + * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */ +static int __ref ensure_zone_is_initialized(struct zone *zone, +			unsigned long start_pfn, unsigned long num_pages) +{ +	if (!zone_is_initialized(zone)) +		return init_currently_empty_zone(zone, start_pfn, num_pages, +						 MEMMAP_HOTPLUG); +	return 0; +} + +static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, +		unsigned long start_pfn, unsigned long end_pfn) +{ +	int ret; +	unsigned long flags; +	unsigned long z1_start_pfn; + +	ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); +	if (ret) +		return ret; + +	pgdat_resize_lock(z1->zone_pgdat, &flags); + +	/* can't move pfns which are higher than @z2 */ +	if (end_pfn > zone_end_pfn(z2)) +		goto out_fail; +	/* the move out part must be at the left most of @z2 */ +	if (start_pfn > z2->zone_start_pfn) +		goto out_fail; +	/* must included/overlap */ +	if (end_pfn <= z2->zone_start_pfn) +		goto out_fail; + +	/* use start_pfn for z1's start_pfn if z1 is empty */ +	if (!zone_is_empty(z1)) +		z1_start_pfn = z1->zone_start_pfn; +	else +		z1_start_pfn = start_pfn; + +	resize_zone(z1, z1_start_pfn, end_pfn); +	resize_zone(z2, end_pfn, zone_end_pfn(z2)); + +	pgdat_resize_unlock(z1->zone_pgdat, &flags); + +	fix_zone_id(z1, start_pfn, end_pfn); + +	return 0; +out_fail: +	pgdat_resize_unlock(z1->zone_pgdat, &flags); +	return -1; +} + +static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, +		unsigned long start_pfn, unsigned long end_pfn) +{ +	int ret; +	unsigned long flags; +	unsigned long z2_end_pfn; + +	ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); +	if (ret) +		return ret; + +	pgdat_resize_lock(z1->zone_pgdat, &flags); + +	/* can't move pfns which are lower than @z1 */ +	if (z1->zone_start_pfn > start_pfn) +		goto out_fail; +	/* the move out part mast at the right most of @z1 */ +	if (zone_end_pfn(z1) >  end_pfn) +		goto out_fail; +	/* must included/overlap */ +	if (start_pfn >= zone_end_pfn(z1)) +		goto out_fail; + +	/* use end_pfn for z2's end_pfn if z2 is empty */ +	if (!zone_is_empty(z2)) +		z2_end_pfn = zone_end_pfn(z2); +	else +		z2_end_pfn = end_pfn; + +	resize_zone(z1, z1->zone_start_pfn, start_pfn); +	resize_zone(z2, start_pfn, z2_end_pfn); + +	pgdat_resize_unlock(z1->zone_pgdat, &flags); + +	fix_zone_id(z2, start_pfn, end_pfn); + +	return 0; +out_fail: +	pgdat_resize_unlock(z1->zone_pgdat, &flags); +	return -1; +} +  static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,  			    unsigned long end_pfn)  { -	unsigned long old_pgdat_end_pfn = -		pgdat->node_start_pfn + pgdat->node_spanned_pages; +	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat); -	if (start_pfn < pgdat->node_start_pfn) +	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)  		pgdat->node_start_pfn = start_pfn;  	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - @@ -201,16 +446,13 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)  	int nid = pgdat->node_id;  	int zone_type;  	unsigned long flags; +	int ret;  	zone_type = zone - pgdat->node_zones; -	if (!zone->wait_table) { -		int ret; +	ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); +	if (ret) +		return ret; -		ret = init_currently_empty_zone(zone, phys_start_pfn, -						nr_pages, MEMMAP_HOTPLUG); -		if (ret) -			return ret; -	}  	pgdat_resize_lock(zone->zone_pgdat, &flags);  	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);  	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, @@ -224,13 +466,12 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)  static int __meminit __add_section(int nid, struct zone *zone,  					unsigned long phys_start_pfn)  { -	int nr_pages = PAGES_PER_SECTION;  	int ret;  	if (pfn_valid(phys_start_pfn))  		return -EEXIST; -	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); +	ret = sparse_add_one_section(zone, phys_start_pfn);  	if (ret < 0)  		return ret; @@ -243,36 +484,6 @@ static int __meminit __add_section(int nid, struct zone *zone,  	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));  } -#ifdef CONFIG_SPARSEMEM_VMEMMAP -static int __remove_section(struct zone *zone, struct mem_section *ms) -{ -	/* -	 * XXX: Freeing memmap with vmemmap is not implement yet. -	 *      This should be removed later. -	 */ -	return -EBUSY; -} -#else -static int __remove_section(struct zone *zone, struct mem_section *ms) -{ -	unsigned long flags; -	struct pglist_data *pgdat = zone->zone_pgdat; -	int ret = -EINVAL; - -	if (!valid_section(ms)) -		return ret; - -	ret = unregister_memory_section(ms); -	if (ret) -		return ret; - -	pgdat_resize_lock(pgdat, &flags); -	sparse_remove_one_section(zone, ms); -	pgdat_resize_unlock(pgdat, &flags); -	return 0; -} -#endif -  /*   * Reasonably generic function for adding memory.  It is   * expected that archs that support memory hotplug will @@ -306,6 +517,230 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,  }  EXPORT_SYMBOL_GPL(__add_pages); +#ifdef CONFIG_MEMORY_HOTREMOVE +/* find the smallest valid pfn in the range [start_pfn, end_pfn) */ +static int find_smallest_section_pfn(int nid, struct zone *zone, +				     unsigned long start_pfn, +				     unsigned long end_pfn) +{ +	struct mem_section *ms; + +	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { +		ms = __pfn_to_section(start_pfn); + +		if (unlikely(!valid_section(ms))) +			continue; + +		if (unlikely(pfn_to_nid(start_pfn) != nid)) +			continue; + +		if (zone && zone != page_zone(pfn_to_page(start_pfn))) +			continue; + +		return start_pfn; +	} + +	return 0; +} + +/* find the biggest valid pfn in the range [start_pfn, end_pfn). */ +static int find_biggest_section_pfn(int nid, struct zone *zone, +				    unsigned long start_pfn, +				    unsigned long end_pfn) +{ +	struct mem_section *ms; +	unsigned long pfn; + +	/* pfn is the end pfn of a memory section. */ +	pfn = end_pfn - 1; +	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { +		ms = __pfn_to_section(pfn); + +		if (unlikely(!valid_section(ms))) +			continue; + +		if (unlikely(pfn_to_nid(pfn) != nid)) +			continue; + +		if (zone && zone != page_zone(pfn_to_page(pfn))) +			continue; + +		return pfn; +	} + +	return 0; +} + +static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, +			     unsigned long end_pfn) +{ +	unsigned long zone_start_pfn = zone->zone_start_pfn; +	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ +	unsigned long zone_end_pfn = z; +	unsigned long pfn; +	struct mem_section *ms; +	int nid = zone_to_nid(zone); + +	zone_span_writelock(zone); +	if (zone_start_pfn == start_pfn) { +		/* +		 * If the section is smallest section in the zone, it need +		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. +		 * In this case, we find second smallest valid mem_section +		 * for shrinking zone. +		 */ +		pfn = find_smallest_section_pfn(nid, zone, end_pfn, +						zone_end_pfn); +		if (pfn) { +			zone->zone_start_pfn = pfn; +			zone->spanned_pages = zone_end_pfn - pfn; +		} +	} else if (zone_end_pfn == end_pfn) { +		/* +		 * If the section is biggest section in the zone, it need +		 * shrink zone->spanned_pages. +		 * In this case, we find second biggest valid mem_section for +		 * shrinking zone. +		 */ +		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, +					       start_pfn); +		if (pfn) +			zone->spanned_pages = pfn - zone_start_pfn + 1; +	} + +	/* +	 * The section is not biggest or smallest mem_section in the zone, it +	 * only creates a hole in the zone. So in this case, we need not +	 * change the zone. But perhaps, the zone has only hole data. Thus +	 * it check the zone has only hole or not. +	 */ +	pfn = zone_start_pfn; +	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { +		ms = __pfn_to_section(pfn); + +		if (unlikely(!valid_section(ms))) +			continue; + +		if (page_zone(pfn_to_page(pfn)) != zone) +			continue; + +		 /* If the section is current section, it continues the loop */ +		if (start_pfn == pfn) +			continue; + +		/* If we find valid section, we have nothing to do */ +		zone_span_writeunlock(zone); +		return; +	} + +	/* The zone has no valid section */ +	zone->zone_start_pfn = 0; +	zone->spanned_pages = 0; +	zone_span_writeunlock(zone); +} + +static void shrink_pgdat_span(struct pglist_data *pgdat, +			      unsigned long start_pfn, unsigned long end_pfn) +{ +	unsigned long pgdat_start_pfn = pgdat->node_start_pfn; +	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ +	unsigned long pgdat_end_pfn = p; +	unsigned long pfn; +	struct mem_section *ms; +	int nid = pgdat->node_id; + +	if (pgdat_start_pfn == start_pfn) { +		/* +		 * If the section is smallest section in the pgdat, it need +		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. +		 * In this case, we find second smallest valid mem_section +		 * for shrinking zone. +		 */ +		pfn = find_smallest_section_pfn(nid, NULL, end_pfn, +						pgdat_end_pfn); +		if (pfn) { +			pgdat->node_start_pfn = pfn; +			pgdat->node_spanned_pages = pgdat_end_pfn - pfn; +		} +	} else if (pgdat_end_pfn == end_pfn) { +		/* +		 * If the section is biggest section in the pgdat, it need +		 * shrink pgdat->node_spanned_pages. +		 * In this case, we find second biggest valid mem_section for +		 * shrinking zone. +		 */ +		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, +					       start_pfn); +		if (pfn) +			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; +	} + +	/* +	 * If the section is not biggest or smallest mem_section in the pgdat, +	 * it only creates a hole in the pgdat. So in this case, we need not +	 * change the pgdat. +	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat +	 * has only hole or not. +	 */ +	pfn = pgdat_start_pfn; +	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { +		ms = __pfn_to_section(pfn); + +		if (unlikely(!valid_section(ms))) +			continue; + +		if (pfn_to_nid(pfn) != nid) +			continue; + +		 /* If the section is current section, it continues the loop */ +		if (start_pfn == pfn) +			continue; + +		/* If we find valid section, we have nothing to do */ +		return; +	} + +	/* The pgdat has no valid section */ +	pgdat->node_start_pfn = 0; +	pgdat->node_spanned_pages = 0; +} + +static void __remove_zone(struct zone *zone, unsigned long start_pfn) +{ +	struct pglist_data *pgdat = zone->zone_pgdat; +	int nr_pages = PAGES_PER_SECTION; +	int zone_type; +	unsigned long flags; + +	zone_type = zone - pgdat->node_zones; + +	pgdat_resize_lock(zone->zone_pgdat, &flags); +	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); +	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); +	pgdat_resize_unlock(zone->zone_pgdat, &flags); +} + +static int __remove_section(struct zone *zone, struct mem_section *ms) +{ +	unsigned long start_pfn; +	int scn_nr; +	int ret = -EINVAL; + +	if (!valid_section(ms)) +		return ret; + +	ret = unregister_memory_section(ms); +	if (ret) +		return ret; + +	scn_nr = __section_nr(ms); +	start_pfn = section_nr_to_pfn(scn_nr); +	__remove_zone(zone, start_pfn); + +	sparse_remove_one_section(zone, ms); +	return 0; +} +  /**   * __remove_pages() - remove sections of pages from a zone   * @zone: zone from which pages need to be removed @@ -320,8 +755,10 @@ EXPORT_SYMBOL_GPL(__add_pages);  int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,  		 unsigned long nr_pages)  { -	unsigned long i, ret = 0; +	unsigned long i;  	int sections_to_remove; +	resource_size_t start, size; +	int ret = 0;  	/*  	 * We can only remove entire sections @@ -329,11 +766,19 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,  	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);  	BUG_ON(nr_pages % PAGES_PER_SECTION); +	start = phys_start_pfn << PAGE_SHIFT; +	size = nr_pages * PAGE_SIZE; +	ret = release_mem_region_adjustable(&iomem_resource, start, size); +	if (ret) { +		resource_size_t endres = start + size - 1; + +		pr_warn("Unable to release resource <%pa-%pa> (%d)\n", +				&start, &endres, ret); +	} +  	sections_to_remove = nr_pages / PAGES_PER_SECTION;  	for (i = 0; i < sections_to_remove; i++) {  		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; -		release_mem_region(pfn << PAGE_SHIFT, -				   PAGES_PER_SECTION << PAGE_SHIFT);  		ret = __remove_section(zone, __pfn_to_section(pfn));  		if (ret)  			break; @@ -341,27 +786,68 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,  	return ret;  }  EXPORT_SYMBOL_GPL(__remove_pages); +#endif /* CONFIG_MEMORY_HOTREMOVE */ -void online_page(struct page *page) +int set_online_page_callback(online_page_callback_t callback)  { -	unsigned long pfn = page_to_pfn(page); +	int rc = -EINVAL; -	totalram_pages++; -	if (pfn >= num_physpages) -		num_physpages = pfn + 1; +	get_online_mems(); +	mutex_lock(&online_page_callback_lock); -#ifdef CONFIG_HIGHMEM -	if (PageHighMem(page)) -		totalhigh_pages++; -#endif +	if (online_page_callback == generic_online_page) { +		online_page_callback = callback; +		rc = 0; +	} -#ifdef CONFIG_FLATMEM -	max_mapnr = max(page_to_pfn(page), max_mapnr); -#endif +	mutex_unlock(&online_page_callback_lock); +	put_online_mems(); + +	return rc; +} +EXPORT_SYMBOL_GPL(set_online_page_callback); -	ClearPageReserved(page); -	init_page_count(page); -	__free_page(page); +int restore_online_page_callback(online_page_callback_t callback) +{ +	int rc = -EINVAL; + +	get_online_mems(); +	mutex_lock(&online_page_callback_lock); + +	if (online_page_callback == callback) { +		online_page_callback = generic_online_page; +		rc = 0; +	} + +	mutex_unlock(&online_page_callback_lock); +	put_online_mems(); + +	return rc; +} +EXPORT_SYMBOL_GPL(restore_online_page_callback); + +void __online_page_set_limits(struct page *page) +{ +} +EXPORT_SYMBOL_GPL(__online_page_set_limits); + +void __online_page_increment_counters(struct page *page) +{ +	adjust_managed_page_count(page, 1); +} +EXPORT_SYMBOL_GPL(__online_page_increment_counters); + +void __online_page_free(struct page *page) +{ +	__free_reserved_page(page); +} +EXPORT_SYMBOL_GPL(__online_page_free); + +static void generic_online_page(struct page *page) +{ +	__online_page_set_limits(page); +	__online_page_increment_counters(page); +	__online_page_free(page);  }  static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, @@ -373,16 +859,108 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,  	if (PageReserved(pfn_to_page(start_pfn)))  		for (i = 0; i < nr_pages; i++) {  			page = pfn_to_page(start_pfn + i); -			online_page(page); +			(*online_page_callback)(page);  			onlined_pages++;  		}  	*(unsigned long *)arg = onlined_pages;  	return 0;  } +#ifdef CONFIG_MOVABLE_NODE +/* + * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have + * normal memory. + */ +static bool can_online_high_movable(struct zone *zone) +{ +	return true; +} +#else /* CONFIG_MOVABLE_NODE */ +/* ensure every online node has NORMAL memory */ +static bool can_online_high_movable(struct zone *zone) +{ +	return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); +} +#endif /* CONFIG_MOVABLE_NODE */ -int online_pages(unsigned long pfn, unsigned long nr_pages) +/* check which state of node_states will be changed when online memory */ +static void node_states_check_changes_online(unsigned long nr_pages, +	struct zone *zone, struct memory_notify *arg)  { +	int nid = zone_to_nid(zone); +	enum zone_type zone_last = ZONE_NORMAL; + +	/* +	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] +	 * contains nodes which have zones of 0...ZONE_NORMAL, +	 * set zone_last to ZONE_NORMAL. +	 * +	 * If we don't have HIGHMEM nor movable node, +	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of +	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. +	 */ +	if (N_MEMORY == N_NORMAL_MEMORY) +		zone_last = ZONE_MOVABLE; + +	/* +	 * if the memory to be online is in a zone of 0...zone_last, and +	 * the zones of 0...zone_last don't have memory before online, we will +	 * need to set the node to node_states[N_NORMAL_MEMORY] after +	 * the memory is online. +	 */ +	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) +		arg->status_change_nid_normal = nid; +	else +		arg->status_change_nid_normal = -1; + +#ifdef CONFIG_HIGHMEM +	/* +	 * If we have movable node, node_states[N_HIGH_MEMORY] +	 * contains nodes which have zones of 0...ZONE_HIGHMEM, +	 * set zone_last to ZONE_HIGHMEM. +	 * +	 * If we don't have movable node, node_states[N_NORMAL_MEMORY] +	 * contains nodes which have zones of 0...ZONE_MOVABLE, +	 * set zone_last to ZONE_MOVABLE. +	 */ +	zone_last = ZONE_HIGHMEM; +	if (N_MEMORY == N_HIGH_MEMORY) +		zone_last = ZONE_MOVABLE; + +	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) +		arg->status_change_nid_high = nid; +	else +		arg->status_change_nid_high = -1; +#else +	arg->status_change_nid_high = arg->status_change_nid_normal; +#endif + +	/* +	 * if the node don't have memory befor online, we will need to +	 * set the node to node_states[N_MEMORY] after the memory +	 * is online. +	 */ +	if (!node_state(nid, N_MEMORY)) +		arg->status_change_nid = nid; +	else +		arg->status_change_nid = -1; +} + +static void node_states_set_node(int node, struct memory_notify *arg) +{ +	if (arg->status_change_nid_normal >= 0) +		node_set_state(node, N_NORMAL_MEMORY); + +	if (arg->status_change_nid_high >= 0) +		node_set_state(node, N_HIGH_MEMORY); + +	node_set_state(node, N_MEMORY); +} + + +int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) +{ +	unsigned long flags;  	unsigned long onlined_pages = 0;  	struct zone *zone;  	int need_zonelists_rebuild = 0; @@ -390,68 +968,98 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)  	int ret;  	struct memory_notify arg; +	mem_hotplug_begin(); +	/* +	 * This doesn't need a lock to do pfn_to_page(). +	 * The section can't be removed here because of the +	 * memory_block->state_mutex. +	 */ +	zone = page_zone(pfn_to_page(pfn)); + +	ret = -EINVAL; +	if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && +	    !can_online_high_movable(zone)) +		goto out; + +	if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { +		if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) +			goto out; +	} +	if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { +		if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) +			goto out; +	} + +	/* Previous code may changed the zone of the pfn range */ +	zone = page_zone(pfn_to_page(pfn)); +  	arg.start_pfn = pfn;  	arg.nr_pages = nr_pages; -	arg.status_change_nid = -1; +	node_states_check_changes_online(nr_pages, zone, &arg); -	nid = page_to_nid(pfn_to_page(pfn)); -	if (node_present_pages(nid) == 0) -		arg.status_change_nid = nid; +	nid = pfn_to_nid(pfn);  	ret = memory_notify(MEM_GOING_ONLINE, &arg);  	ret = notifier_to_errno(ret);  	if (ret) {  		memory_notify(MEM_CANCEL_ONLINE, &arg); -		return ret; +		goto out;  	}  	/* -	 * This doesn't need a lock to do pfn_to_page(). -	 * The section can't be removed here because of the -	 * memory_block->state_mutex. -	 */ -	zone = page_zone(pfn_to_page(pfn)); -	/*  	 * If this zone is not populated, then it is not in zonelist.  	 * This means the page allocator ignores this zone.  	 * So, zonelist must be updated after online.  	 */  	mutex_lock(&zonelists_mutex); -	if (!populated_zone(zone)) +	if (!populated_zone(zone)) {  		need_zonelists_rebuild = 1; +		build_all_zonelists(NULL, zone); +	}  	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,  		online_pages_range);  	if (ret) { +		if (need_zonelists_rebuild) +			zone_pcp_reset(zone);  		mutex_unlock(&zonelists_mutex); -		printk(KERN_DEBUG "online_pages %lx at %lx failed\n", -			nr_pages, pfn); +		printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", +		       (unsigned long long) pfn << PAGE_SHIFT, +		       (((unsigned long long) pfn + nr_pages) +			    << PAGE_SHIFT) - 1);  		memory_notify(MEM_CANCEL_ONLINE, &arg); -		return ret; +		goto out;  	}  	zone->present_pages += onlined_pages; + +	pgdat_resize_lock(zone->zone_pgdat, &flags);  	zone->zone_pgdat->node_present_pages += onlined_pages; -	if (need_zonelists_rebuild) -		build_all_zonelists(zone); -	else -		zone_pcp_update(zone); +	pgdat_resize_unlock(zone->zone_pgdat, &flags); -	mutex_unlock(&zonelists_mutex); -	setup_per_zone_wmarks(); -	calculate_zone_inactive_ratio(zone);  	if (onlined_pages) { -		kswapd_run(zone_to_nid(zone)); -		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); +		node_states_set_node(zone_to_nid(zone), &arg); +		if (need_zonelists_rebuild) +			build_all_zonelists(NULL, NULL); +		else +			zone_pcp_update(zone);  	} +	mutex_unlock(&zonelists_mutex); + +	init_per_zone_wmark_min(); + +	if (onlined_pages) +		kswapd_run(zone_to_nid(zone)); +  	vm_total_pages = nr_free_pagecache_pages();  	writeback_set_ratelimit();  	if (onlined_pages)  		memory_notify(MEM_ONLINE, &arg); - -	return 0; +out: +	mem_hotplug_done(); +	return ret;  }  #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ @@ -461,19 +1069,30 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)  	struct pglist_data *pgdat;  	unsigned long zones_size[MAX_NR_ZONES] = {0};  	unsigned long zholes_size[MAX_NR_ZONES] = {0}; -	unsigned long start_pfn = start >> PAGE_SHIFT; +	unsigned long start_pfn = PFN_DOWN(start); -	pgdat = arch_alloc_nodedata(nid); -	if (!pgdat) -		return NULL; +	pgdat = NODE_DATA(nid); +	if (!pgdat) { +		pgdat = arch_alloc_nodedata(nid); +		if (!pgdat) +			return NULL; -	arch_refresh_nodedata(nid, pgdat); +		arch_refresh_nodedata(nid, pgdat); +	}  	/* we can use NODE_DATA(nid) from here */  	/* init node's zones as empty zones, we don't have any present pages.*/  	free_area_init_node(nid, zones_size, start_pfn, zholes_size); +	/* +	 * The node we allocated has no zone fallback lists. For avoiding +	 * to access not-initialized zonelist, build here. +	 */ +	mutex_lock(&zonelists_mutex); +	build_all_zonelists(pgdat, NULL); +	mutex_unlock(&zonelists_mutex); +  	return pgdat;  } @@ -485,17 +1104,23 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)  } -/* +/** + * try_online_node - online a node if offlined + *   * called by cpu_up() to online a node without onlined memory.   */ -int mem_online_node(int nid) +int try_online_node(int nid)  {  	pg_data_t	*pgdat;  	int	ret; -	lock_system_sleep(); +	if (node_online(nid)) +		return 0; + +	mem_hotplug_begin();  	pgdat = hotadd_new_pgdat(nid, 0); -	if (pgdat) { +	if (!pgdat) { +		pr_err("Cannot online node %d due to NULL pgdat\n", nid);  		ret = -ENOMEM;  		goto out;  	} @@ -503,32 +1128,65 @@ int mem_online_node(int nid)  	ret = register_one_node(nid);  	BUG_ON(ret); +	if (pgdat->node_zonelists->_zonerefs->zone == NULL) { +		mutex_lock(&zonelists_mutex); +		build_all_zonelists(NULL, NULL); +		mutex_unlock(&zonelists_mutex); +	} +  out: -	unlock_system_sleep(); +	mem_hotplug_done();  	return ret;  } +static int check_hotplug_memory_range(u64 start, u64 size) +{ +	u64 start_pfn = PFN_DOWN(start); +	u64 nr_pages = size >> PAGE_SHIFT; + +	/* Memory range must be aligned with section */ +	if ((start_pfn & ~PAGE_SECTION_MASK) || +	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { +		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n", +				(unsigned long long)start, +				(unsigned long long)size); +		return -EINVAL; +	} + +	return 0; +} +  /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */  int __ref add_memory(int nid, u64 start, u64 size)  {  	pg_data_t *pgdat = NULL; -	int new_pgdat = 0; +	bool new_pgdat; +	bool new_node;  	struct resource *res;  	int ret; -	lock_system_sleep(); +	ret = check_hotplug_memory_range(start, size); +	if (ret) +		return ret;  	res = register_memory_resource(start, size);  	ret = -EEXIST;  	if (!res) -		goto out; +		return ret; + +	{	/* Stupid hack to suppress address-never-null warning */ +		void *p = NODE_DATA(nid); +		new_pgdat = !p; +	} + +	mem_hotplug_begin(); -	if (!node_online(nid)) { +	new_node = !node_online(nid); +	if (new_node) {  		pgdat = hotadd_new_pgdat(nid, start);  		ret = -ENOMEM;  		if (!pgdat) -			goto out; -		new_pgdat = 1; +			goto error;  	}  	/* call arch's memory hotadd */ @@ -540,7 +1198,7 @@ int __ref add_memory(int nid, u64 start, u64 size)  	/* we online node here. we can't roll back from here. */  	node_set_online(nid); -	if (new_pgdat) { +	if (new_node) {  		ret = register_one_node(nid);  		/*  		 * If sysfs file of new node can't create, cpu on the node @@ -559,11 +1217,10 @@ error:  	/* rollback pgdat allocation and others */  	if (new_pgdat)  		rollback_node_hotadd(nid, pgdat); -	if (res) -		release_memory_resource(res); +	release_memory_resource(res);  out: -	unlock_system_sleep(); +	mem_hotplug_done();  	return ret;  }  EXPORT_SYMBOL_GPL(add_memory); @@ -643,10 +1300,12 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)  }  /* - * Scanning pfn is much easier than scanning lru list. - * Scan pfn from start to end and Find LRU page. + * Scan pfn range [start,end) to find movable/migratable pages (LRU pages + * and hugepages). We scan pfn because it's much easier than scanning over + * linked list. This function returns the pfn of the first found movable + * page if it's found, otherwise 0.   */ -static unsigned long scan_lru_pages(unsigned long start, unsigned long end) +static unsigned long scan_movable_pages(unsigned long start, unsigned long end)  {  	unsigned long pfn;  	struct page *page; @@ -655,18 +1314,18 @@ static unsigned long scan_lru_pages(unsigned long start, unsigned long end)  			page = pfn_to_page(pfn);  			if (PageLRU(page))  				return pfn; +			if (PageHuge(page)) { +				if (is_hugepage_active(page)) +					return pfn; +				else +					pfn = round_up(pfn + 1, +						1 << compound_order(page)) - 1; +			}  		}  	}  	return 0;  } -static struct page * -hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) -{ -	/* This should be improooooved!! */ -	return alloc_page(GFP_HIGHUSER_MOVABLE); -} -  #define NR_OFFLINE_AT_ONCE_PAGES	(256)  static int  do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) @@ -682,7 +1341,20 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)  		if (!pfn_valid(pfn))  			continue;  		page = pfn_to_page(pfn); -		if (!page_count(page)) + +		if (PageHuge(page)) { +			struct page *head = compound_head(page); +			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; +			if (compound_order(head) > PFN_SECTION_SHIFT) { +				ret = -EBUSY; +				break; +			} +			if (isolate_huge_page(page, &source)) +				move_pages -= 1 << compound_order(head); +			continue; +		} + +		if (!get_page_unless_zero(page))  			continue;  		/*  		 * We can skip free pages. And we can only deal with pages on @@ -690,6 +1362,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)  		 */  		ret = isolate_lru_page(page);  		if (!ret) { /* Success */ +			put_page(page);  			list_add_tail(&page->lru, &source);  			move_pages--;  			inc_zone_page_state(page, NR_ISOLATED_ANON + @@ -699,9 +1372,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)  #ifdef CONFIG_DEBUG_VM  			printk(KERN_ALERT "removing pfn %lx from LRU failed\n",  			       pfn); -			dump_page(page); +			dump_page(page, "failed to remove from LRU");  #endif -			/* Becasue we don't have big zone->lock. we should +			put_page(page); +			/* Because we don't have big zone->lock. we should  			   check this again here. */  			if (page_count(page)) {  				not_managed++; @@ -712,13 +1386,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)  	}  	if (!list_empty(&source)) {  		if (not_managed) { -			putback_lru_pages(&source); +			putback_movable_pages(&source);  			goto out;  		} -		/* this function returns # of failed pages */ -		ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); + +		/* +		 * alloc_migrate_target should be improooooved!! +		 * migrate_pages returns # of failed pages. +		 */ +		ret = migrate_pages(&source, alloc_migrate_target, NULL, 0, +					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);  		if (ret) -			putback_lru_pages(&source); +			putback_movable_pages(&source);  	}  out:  	return ret; @@ -751,7 +1430,7 @@ check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,  {  	int ret;  	long offlined = *(long *)data; -	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); +	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);  	offlined = nr_pages;  	if (!ret)  		*(long *)data += offlined; @@ -771,16 +1450,173 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)  	return offlined;  } -static int offline_pages(unsigned long start_pfn, +#ifdef CONFIG_MOVABLE_NODE +/* + * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have + * normal memory. + */ +static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) +{ +	return true; +} +#else /* CONFIG_MOVABLE_NODE */ +/* ensure the node has NORMAL memory if it is still online */ +static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) +{ +	struct pglist_data *pgdat = zone->zone_pgdat; +	unsigned long present_pages = 0; +	enum zone_type zt; + +	for (zt = 0; zt <= ZONE_NORMAL; zt++) +		present_pages += pgdat->node_zones[zt].present_pages; + +	if (present_pages > nr_pages) +		return true; + +	present_pages = 0; +	for (; zt <= ZONE_MOVABLE; zt++) +		present_pages += pgdat->node_zones[zt].present_pages; + +	/* +	 * we can't offline the last normal memory until all +	 * higher memory is offlined. +	 */ +	return present_pages == 0; +} +#endif /* CONFIG_MOVABLE_NODE */ + +static int __init cmdline_parse_movable_node(char *p) +{ +#ifdef CONFIG_MOVABLE_NODE +	/* +	 * Memory used by the kernel cannot be hot-removed because Linux +	 * cannot migrate the kernel pages. When memory hotplug is +	 * enabled, we should prevent memblock from allocating memory +	 * for the kernel. +	 * +	 * ACPI SRAT records all hotpluggable memory ranges. But before +	 * SRAT is parsed, we don't know about it. +	 * +	 * The kernel image is loaded into memory at very early time. We +	 * cannot prevent this anyway. So on NUMA system, we set any +	 * node the kernel resides in as un-hotpluggable. +	 * +	 * Since on modern servers, one node could have double-digit +	 * gigabytes memory, we can assume the memory around the kernel +	 * image is also un-hotpluggable. So before SRAT is parsed, just +	 * allocate memory near the kernel image to try the best to keep +	 * the kernel away from hotpluggable memory. +	 */ +	memblock_set_bottom_up(true); +	movable_node_enabled = true; +#else +	pr_warn("movable_node option not supported\n"); +#endif +	return 0; +} +early_param("movable_node", cmdline_parse_movable_node); + +/* check which state of node_states will be changed when offline memory */ +static void node_states_check_changes_offline(unsigned long nr_pages, +		struct zone *zone, struct memory_notify *arg) +{ +	struct pglist_data *pgdat = zone->zone_pgdat; +	unsigned long present_pages = 0; +	enum zone_type zt, zone_last = ZONE_NORMAL; + +	/* +	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] +	 * contains nodes which have zones of 0...ZONE_NORMAL, +	 * set zone_last to ZONE_NORMAL. +	 * +	 * If we don't have HIGHMEM nor movable node, +	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of +	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. +	 */ +	if (N_MEMORY == N_NORMAL_MEMORY) +		zone_last = ZONE_MOVABLE; + +	/* +	 * check whether node_states[N_NORMAL_MEMORY] will be changed. +	 * If the memory to be offline is in a zone of 0...zone_last, +	 * and it is the last present memory, 0...zone_last will +	 * become empty after offline , thus we can determind we will +	 * need to clear the node from node_states[N_NORMAL_MEMORY]. +	 */ +	for (zt = 0; zt <= zone_last; zt++) +		present_pages += pgdat->node_zones[zt].present_pages; +	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) +		arg->status_change_nid_normal = zone_to_nid(zone); +	else +		arg->status_change_nid_normal = -1; + +#ifdef CONFIG_HIGHMEM +	/* +	 * If we have movable node, node_states[N_HIGH_MEMORY] +	 * contains nodes which have zones of 0...ZONE_HIGHMEM, +	 * set zone_last to ZONE_HIGHMEM. +	 * +	 * If we don't have movable node, node_states[N_NORMAL_MEMORY] +	 * contains nodes which have zones of 0...ZONE_MOVABLE, +	 * set zone_last to ZONE_MOVABLE. +	 */ +	zone_last = ZONE_HIGHMEM; +	if (N_MEMORY == N_HIGH_MEMORY) +		zone_last = ZONE_MOVABLE; + +	for (; zt <= zone_last; zt++) +		present_pages += pgdat->node_zones[zt].present_pages; +	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) +		arg->status_change_nid_high = zone_to_nid(zone); +	else +		arg->status_change_nid_high = -1; +#else +	arg->status_change_nid_high = arg->status_change_nid_normal; +#endif + +	/* +	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE +	 */ +	zone_last = ZONE_MOVABLE; + +	/* +	 * check whether node_states[N_HIGH_MEMORY] will be changed +	 * If we try to offline the last present @nr_pages from the node, +	 * we can determind we will need to clear the node from +	 * node_states[N_HIGH_MEMORY]. +	 */ +	for (; zt <= zone_last; zt++) +		present_pages += pgdat->node_zones[zt].present_pages; +	if (nr_pages >= present_pages) +		arg->status_change_nid = zone_to_nid(zone); +	else +		arg->status_change_nid = -1; +} + +static void node_states_clear_node(int node, struct memory_notify *arg) +{ +	if (arg->status_change_nid_normal >= 0) +		node_clear_state(node, N_NORMAL_MEMORY); + +	if ((N_MEMORY != N_NORMAL_MEMORY) && +	    (arg->status_change_nid_high >= 0)) +		node_clear_state(node, N_HIGH_MEMORY); + +	if ((N_MEMORY != N_HIGH_MEMORY) && +	    (arg->status_change_nid >= 0)) +		node_clear_state(node, N_MEMORY); +} + +static int __ref __offline_pages(unsigned long start_pfn,  		  unsigned long end_pfn, unsigned long timeout)  {  	unsigned long pfn, nr_pages, expire;  	long offlined_pages;  	int ret, drain, retry_max, node; +	unsigned long flags;  	struct zone *zone;  	struct memory_notify arg; -	BUG_ON(start_pfn >= end_pfn);  	/* at least, alignment against pageblock is necessary */  	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))  		return -EINVAL; @@ -791,22 +1627,25 @@ static int offline_pages(unsigned long start_pfn,  	if (!test_pages_in_a_zone(start_pfn, end_pfn))  		return -EINVAL; -	lock_system_sleep(); +	mem_hotplug_begin();  	zone = page_zone(pfn_to_page(start_pfn));  	node = zone_to_nid(zone);  	nr_pages = end_pfn - start_pfn; +	ret = -EINVAL; +	if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) +		goto out; +  	/* set above range as isolated */ -	ret = start_isolate_page_range(start_pfn, end_pfn); +	ret = start_isolate_page_range(start_pfn, end_pfn, +				       MIGRATE_MOVABLE, true);  	if (ret)  		goto out;  	arg.start_pfn = start_pfn;  	arg.nr_pages = nr_pages; -	arg.status_change_nid = -1; -	if (nr_pages >= node_present_pages(node)) -		arg.status_change_nid = node; +	node_states_check_changes_offline(nr_pages, zone, &arg);  	ret = memory_notify(MEM_GOING_OFFLINE, &arg);  	ret = notifier_to_errno(ret); @@ -832,8 +1671,8 @@ repeat:  		drain_all_pages();  	} -	pfn = scan_lru_pages(start_pfn, end_pfn); -	if (pfn) { /* We have page on LRU */ +	pfn = scan_movable_pages(start_pfn, end_pfn); +	if (pfn) { /* We have movable pages */  		ret = do_migrate_range(pfn, end_pfn);  		if (!ret) {  			drain = 1; @@ -847,11 +1686,16 @@ repeat:  			goto repeat;  		}  	} -	/* drain all zone's lru pagevec, this is asyncronous... */ +	/* drain all zone's lru pagevec, this is asynchronous... */  	lru_add_drain_all();  	yield(); -	/* drain pcp pages , this is synchrouns. */ +	/* drain pcp pages, this is synchronous. */  	drain_all_pages(); +	/* +	 * dissolve free hugepages in the memory block before doing offlining +	 * actually in order to make hugetlbfs's object counting consistent. +	 */ +	dissolve_free_huge_pages(start_pfn, end_pfn);  	/* check again */  	offlined_pages = check_pages_isolated(start_pfn, end_pfn);  	if (offlined_pages < 0) { @@ -859,54 +1703,273 @@ repeat:  		goto failed_removal;  	}  	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); -	/* Ok, all of our target is islaoted. +	/* Ok, all of our target is isolated.  	   We cannot do rollback at this point. */  	offline_isolated_pages(start_pfn, end_pfn);  	/* reset pagetype flags and makes migrate type to be MOVABLE */ -	undo_isolate_page_range(start_pfn, end_pfn); +	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);  	/* removal success */ +	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);  	zone->present_pages -= offlined_pages; + +	pgdat_resize_lock(zone->zone_pgdat, &flags);  	zone->zone_pgdat->node_present_pages -= offlined_pages; -	totalram_pages -= offlined_pages; +	pgdat_resize_unlock(zone->zone_pgdat, &flags); -	setup_per_zone_wmarks(); -	calculate_zone_inactive_ratio(zone); -	if (!node_present_pages(node)) { -		node_clear_state(node, N_HIGH_MEMORY); +	init_per_zone_wmark_min(); + +	if (!populated_zone(zone)) { +		zone_pcp_reset(zone); +		mutex_lock(&zonelists_mutex); +		build_all_zonelists(NULL, NULL); +		mutex_unlock(&zonelists_mutex); +	} else +		zone_pcp_update(zone); + +	node_states_clear_node(node, &arg); +	if (arg.status_change_nid >= 0)  		kswapd_stop(node); -	}  	vm_total_pages = nr_free_pagecache_pages();  	writeback_set_ratelimit();  	memory_notify(MEM_OFFLINE, &arg); -	unlock_system_sleep(); +	mem_hotplug_done();  	return 0;  failed_removal: -	printk(KERN_INFO "memory offlining %lx to %lx failed\n", -		start_pfn, end_pfn); +	printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", +	       (unsigned long long) start_pfn << PAGE_SHIFT, +	       ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);  	memory_notify(MEM_CANCEL_OFFLINE, &arg);  	/* pushback to free area */ -	undo_isolate_page_range(start_pfn, end_pfn); +	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);  out: -	unlock_system_sleep(); +	mem_hotplug_done();  	return ret;  } -int remove_memory(u64 start, u64 size) +int offline_pages(unsigned long start_pfn, unsigned long nr_pages) +{ +	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + +/** + * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) + * @start_pfn: start pfn of the memory range + * @end_pfn: end pfn of the memory range + * @arg: argument passed to func + * @func: callback for each memory section walked + * + * This function walks through all present mem sections in range + * [start_pfn, end_pfn) and call func on each mem section. + * + * Returns the return value of func. + */ +int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, +		void *arg, int (*func)(struct memory_block *, void *))  { -	unsigned long start_pfn, end_pfn; +	struct memory_block *mem = NULL; +	struct mem_section *section; +	unsigned long pfn, section_nr; +	int ret; + +	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { +		section_nr = pfn_to_section_nr(pfn); +		if (!present_section_nr(section_nr)) +			continue; + +		section = __nr_to_section(section_nr); +		/* same memblock? */ +		if (mem) +			if ((section_nr >= mem->start_section_nr) && +			    (section_nr <= mem->end_section_nr)) +				continue; + +		mem = find_memory_block_hinted(section, mem); +		if (!mem) +			continue; + +		ret = func(mem, arg); +		if (ret) { +			kobject_put(&mem->dev.kobj); +			return ret; +		} +	} -	start_pfn = PFN_DOWN(start); -	end_pfn = start_pfn + PFN_DOWN(size); -	return offline_pages(start_pfn, end_pfn, 120 * HZ); +	if (mem) +		kobject_put(&mem->dev.kobj); + +	return 0;  } -#else -int remove_memory(u64 start, u64 size) + +#ifdef CONFIG_MEMORY_HOTREMOVE +static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)  { -	return -EINVAL; +	int ret = !is_memblock_offlined(mem); + +	if (unlikely(ret)) { +		phys_addr_t beginpa, endpa; + +		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); +		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; +		pr_warn("removing memory fails, because memory " +			"[%pa-%pa] is onlined\n", +			&beginpa, &endpa); +	} + +	return ret; +} + +static int check_cpu_on_node(pg_data_t *pgdat) +{ +	int cpu; + +	for_each_present_cpu(cpu) { +		if (cpu_to_node(cpu) == pgdat->node_id) +			/* +			 * the cpu on this node isn't removed, and we can't +			 * offline this node. +			 */ +			return -EBUSY; +	} + +	return 0; +} + +static void unmap_cpu_on_node(pg_data_t *pgdat) +{ +#ifdef CONFIG_ACPI_NUMA +	int cpu; + +	for_each_possible_cpu(cpu) +		if (cpu_to_node(cpu) == pgdat->node_id) +			numa_clear_node(cpu); +#endif +} + +static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) +{ +	int ret; + +	ret = check_cpu_on_node(pgdat); +	if (ret) +		return ret; + +	/* +	 * the node will be offlined when we come here, so we can clear +	 * the cpu_to_node() now. +	 */ + +	unmap_cpu_on_node(pgdat); +	return 0; +} + +/** + * try_offline_node + * + * Offline a node if all memory sections and cpus of the node are removed. + * + * NOTE: The caller must call lock_device_hotplug() to serialize hotplug + * and online/offline operations before this call. + */ +void try_offline_node(int nid) +{ +	pg_data_t *pgdat = NODE_DATA(nid); +	unsigned long start_pfn = pgdat->node_start_pfn; +	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; +	unsigned long pfn; +	struct page *pgdat_page = virt_to_page(pgdat); +	int i; + +	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { +		unsigned long section_nr = pfn_to_section_nr(pfn); + +		if (!present_section_nr(section_nr)) +			continue; + +		if (pfn_to_nid(pfn) != nid) +			continue; + +		/* +		 * some memory sections of this node are not removed, and we +		 * can't offline node now. +		 */ +		return; +	} + +	if (check_and_unmap_cpu_on_node(pgdat)) +		return; + +	/* +	 * all memory/cpu of this node are removed, we can offline this +	 * node now. +	 */ +	node_set_offline(nid); +	unregister_one_node(nid); + +	if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) +		/* node data is allocated from boot memory */ +		return; + +	/* free waittable in each zone */ +	for (i = 0; i < MAX_NR_ZONES; i++) { +		struct zone *zone = pgdat->node_zones + i; + +		/* +		 * wait_table may be allocated from boot memory, +		 * here only free if it's allocated by vmalloc. +		 */ +		if (is_vmalloc_addr(zone->wait_table)) +			vfree(zone->wait_table); +	} + +	/* +	 * Since there is no way to guarentee the address of pgdat/zone is not +	 * on stack of any kernel threads or used by other kernel objects +	 * without reference counting or other symchronizing method, do not +	 * reset node_data and free pgdat here. Just reset it to 0 and reuse +	 * the memory when the node is online again. +	 */ +	memset(pgdat, 0, sizeof(*pgdat)); +} +EXPORT_SYMBOL(try_offline_node); + +/** + * remove_memory + * + * NOTE: The caller must call lock_device_hotplug() to serialize hotplug + * and online/offline operations before this call, as required by + * try_offline_node(). + */ +void __ref remove_memory(int nid, u64 start, u64 size) +{ +	int ret; + +	BUG_ON(check_hotplug_memory_range(start, size)); + +	mem_hotplug_begin(); + +	/* +	 * All memory blocks must be offlined before removing memory.  Check +	 * whether all memory blocks in question are offline and trigger a BUG() +	 * if this is not the case. +	 */ +	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, +				check_memblock_offlined_cb); +	if (ret) +		BUG(); + +	/* remove memmap entry */ +	firmware_map_remove(start, start + size, "System RAM"); + +	arch_remove_memory(start, size); + +	try_offline_node(nid); + +	mem_hotplug_done();  } -#endif /* CONFIG_MEMORY_HOTREMOVE */  EXPORT_SYMBOL_GPL(remove_memory); +#endif /* CONFIG_MEMORY_HOTREMOVE */  | 
