diff options
Diffstat (limited to 'mm/swap_state.c')
| -rw-r--r-- | mm/swap_state.c | 207 | 
1 files changed, 153 insertions, 54 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index e10f5833167..2972eee184a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -6,7 +6,6 @@   *   *  Rewritten to use page cache, (C) 1998 Stephen Tweedie   */ -#include <linux/module.h>  #include <linux/mm.h>  #include <linux/gfp.h>  #include <linux/kernel_stat.h> @@ -14,8 +13,8 @@  #include <linux/swapops.h>  #include <linux/init.h>  #include <linux/pagemap.h> -#include <linux/buffer_head.h>  #include <linux/backing-dev.h> +#include <linux/blkdev.h>  #include <linux/pagevec.h>  #include <linux/migrate.h>  #include <linux/page_cgroup.h> @@ -24,28 +23,25 @@  /*   * swapper_space is a fiction, retained to simplify the path through - * vmscan's shrink_page_list, to make sync_page look nicer, and to allow - * future use of radix_tree tags in the swap cache. + * vmscan's shrink_page_list.   */  static const struct address_space_operations swap_aops = {  	.writepage	= swap_writepage, -	.sync_page	= block_sync_page, -	.set_page_dirty	= __set_page_dirty_nobuffers, +	.set_page_dirty	= swap_set_page_dirty,  	.migratepage	= migrate_page,  };  static struct backing_dev_info swap_backing_dev_info = {  	.name		= "swap",  	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, -	.unplug_io_fn	= swap_unplug_io_fn,  }; -struct address_space swapper_space = { -	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), -	.tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), -	.a_ops		= &swap_aops, -	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), -	.backing_dev_info = &swap_backing_dev_info, +struct address_space swapper_spaces[MAX_SWAPFILES] = { +	[0 ... MAX_SWAPFILES - 1] = { +		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), +		.a_ops		= &swap_aops, +		.backing_dev_info = &swap_backing_dev_info, +	}  };  #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0) @@ -57,13 +53,26 @@ static struct {  	unsigned long find_total;  } swap_cache_info; +unsigned long total_swapcache_pages(void) +{ +	int i; +	unsigned long ret = 0; + +	for (i = 0; i < MAX_SWAPFILES; i++) +		ret += swapper_spaces[i].nrpages; +	return ret; +} + +static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); +  void show_swap_cache_info(void)  { -	printk("%lu pages in swap cache\n", total_swapcache_pages); +	printk("%lu pages in swap cache\n", total_swapcache_pages());  	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",  		swap_cache_info.add_total, swap_cache_info.del_total,  		swap_cache_info.find_success, swap_cache_info.find_total); -	printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); +	printk("Free swap  = %ldkB\n", +		get_nr_swap_pages() << (PAGE_SHIFT - 10));  	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));  } @@ -71,26 +80,29 @@ void show_swap_cache_info(void)   * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,   * but sets SwapCache flag and private instead of mapping and index.   */ -static int __add_to_swap_cache(struct page *page, swp_entry_t entry) +int __add_to_swap_cache(struct page *page, swp_entry_t entry)  {  	int error; +	struct address_space *address_space; -	VM_BUG_ON(!PageLocked(page)); -	VM_BUG_ON(PageSwapCache(page)); -	VM_BUG_ON(!PageSwapBacked(page)); +	VM_BUG_ON_PAGE(!PageLocked(page), page); +	VM_BUG_ON_PAGE(PageSwapCache(page), page); +	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);  	page_cache_get(page);  	SetPageSwapCache(page);  	set_page_private(page, entry.val); -	spin_lock_irq(&swapper_space.tree_lock); -	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); +	address_space = swap_address_space(entry); +	spin_lock_irq(&address_space->tree_lock); +	error = radix_tree_insert(&address_space->page_tree, +					entry.val, page);  	if (likely(!error)) { -		total_swapcache_pages++; +		address_space->nrpages++;  		__inc_zone_page_state(page, NR_FILE_PAGES);  		INC_CACHE_INFO(add_total);  	} -	spin_unlock_irq(&swapper_space.tree_lock); +	spin_unlock_irq(&address_space->tree_lock);  	if (unlikely(error)) {  		/* @@ -112,7 +124,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)  {  	int error; -	error = radix_tree_preload(gfp_mask); +	error = radix_tree_maybe_preload(gfp_mask);  	if (!error) {  		error = __add_to_swap_cache(page, entry);  		radix_tree_preload_end(); @@ -126,14 +138,19 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)   */  void __delete_from_swap_cache(struct page *page)  { -	VM_BUG_ON(!PageLocked(page)); -	VM_BUG_ON(!PageSwapCache(page)); -	VM_BUG_ON(PageWriteback(page)); +	swp_entry_t entry; +	struct address_space *address_space; -	radix_tree_delete(&swapper_space.page_tree, page_private(page)); +	VM_BUG_ON_PAGE(!PageLocked(page), page); +	VM_BUG_ON_PAGE(!PageSwapCache(page), page); +	VM_BUG_ON_PAGE(PageWriteback(page), page); + +	entry.val = page_private(page); +	address_space = swap_address_space(entry); +	radix_tree_delete(&address_space->page_tree, page_private(page));  	set_page_private(page, 0);  	ClearPageSwapCache(page); -	total_swapcache_pages--; +	address_space->nrpages--;  	__dec_zone_page_state(page, NR_FILE_PAGES);  	INC_CACHE_INFO(del_total);  } @@ -145,18 +162,24 @@ void __delete_from_swap_cache(struct page *page)   * Allocate swap space for the page and add the page to the   * swap cache.  Caller needs to hold the page lock.    */ -int add_to_swap(struct page *page) +int add_to_swap(struct page *page, struct list_head *list)  {  	swp_entry_t entry;  	int err; -	VM_BUG_ON(!PageLocked(page)); -	VM_BUG_ON(!PageUptodate(page)); +	VM_BUG_ON_PAGE(!PageLocked(page), page); +	VM_BUG_ON_PAGE(!PageUptodate(page), page);  	entry = get_swap_page();  	if (!entry.val)  		return 0; +	if (unlikely(PageTransHuge(page))) +		if (unlikely(split_huge_page_to_list(page, list))) { +			swapcache_free(entry, NULL); +			return 0; +		} +  	/*  	 * Radix-tree node allocations from PF_MEMALLOC contexts could  	 * completely exhaust the page allocator. __GFP_NOMEMALLOC @@ -193,12 +216,14 @@ int add_to_swap(struct page *page)  void delete_from_swap_cache(struct page *page)  {  	swp_entry_t entry; +	struct address_space *address_space;  	entry.val = page_private(page); -	spin_lock_irq(&swapper_space.tree_lock); +	address_space = swap_address_space(entry); +	spin_lock_irq(&address_space->tree_lock);  	__delete_from_swap_cache(page); -	spin_unlock_irq(&swapper_space.tree_lock); +	spin_unlock_irq(&address_space->tree_lock);  	swapcache_free(entry, page);  	page_cache_release(page); @@ -245,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)  		for (i = 0; i < todo; i++)  			free_swap_cache(pagep[i]); -		release_pages(pagep, todo, 0); +		release_pages(pagep, todo, false);  		pagep += todo;  		nr -= todo;  	} @@ -261,10 +286,13 @@ struct page * lookup_swap_cache(swp_entry_t entry)  {  	struct page *page; -	page = find_get_page(&swapper_space, entry.val); +	page = find_get_page(swap_address_space(entry), entry.val); -	if (page) +	if (page) {  		INC_CACHE_INFO(find_success); +		if (TestClearPageReadahead(page)) +			atomic_inc(&swapin_readahead_hits); +	}  	INC_CACHE_INFO(find_total);  	return page; @@ -288,7 +316,8 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,  		 * called after lookup_swap_cache() failed, re-calling  		 * that would confuse statistics.  		 */ -		found_page = find_get_page(&swapper_space, entry.val); +		found_page = find_get_page(swap_address_space(entry), +					entry.val);  		if (found_page)  			break; @@ -304,7 +333,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,  		/*  		 * call radix_tree_preload() while we can wait.  		 */ -		err = radix_tree_preload(gfp_mask & GFP_KERNEL); +		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);  		if (err)  			break; @@ -312,8 +341,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,  		 * Swap entry may have been freed since our caller observed it.  		 */  		err = swapcache_prepare(entry); -		if (err == -EEXIST) {	/* seems racy */ +		if (err == -EEXIST) {  			radix_tree_preload_end(); +			/* +			 * We might race against get_swap_page() and stumble +			 * across a SWAP_HAS_CACHE swap_map entry whose page +			 * has not been brought into the swapcache yet, while +			 * the other end is scheduled away waiting on discard +			 * I/O completion at scan_swap_map(). +			 * +			 * In order to avoid turning this transitory state +			 * into a permanent loop around this -EEXIST case +			 * if !CONFIG_PREEMPT and the I/O completion happens +			 * to be waiting on the CPU waitqueue where we are now +			 * busy looping, we just conditionally invoke the +			 * scheduler here, if there are some more important +			 * tasks to run. +			 */ +			cond_resched();  			continue;  		}  		if (err) {		/* swp entry is obsolete ? */ @@ -349,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,  	return found_page;  } +static unsigned long swapin_nr_pages(unsigned long offset) +{ +	static unsigned long prev_offset; +	unsigned int pages, max_pages, last_ra; +	static atomic_t last_readahead_pages; + +	max_pages = 1 << ACCESS_ONCE(page_cluster); +	if (max_pages <= 1) +		return 1; + +	/* +	 * This heuristic has been found to work well on both sequential and +	 * random loads, swapping to hard disk or to SSD: please don't ask +	 * what the "+ 2" means, it just happens to work well, that's all. +	 */ +	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; +	if (pages == 2) { +		/* +		 * We can have no readahead hits to judge by: but must not get +		 * stuck here forever, so check for an adjacent offset instead +		 * (and don't even bother to check whether swap type is same). +		 */ +		if (offset != prev_offset + 1 && offset != prev_offset - 1) +			pages = 1; +		prev_offset = offset; +	} else { +		unsigned int roundup = 4; +		while (roundup < pages) +			roundup <<= 1; +		pages = roundup; +	} + +	if (pages > max_pages) +		pages = max_pages; + +	/* Don't shrink readahead too fast */ +	last_ra = atomic_read(&last_readahead_pages) / 2; +	if (pages < last_ra) +		pages = last_ra; +	atomic_set(&last_readahead_pages, pages); + +	return pages; +} +  /**   * swapin_readahead - swap in pages in hope we need them soon   * @entry: swap entry of this memory @@ -371,27 +460,37 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,  			struct vm_area_struct *vma, unsigned long addr)  { -	int nr_pages;  	struct page *page; -	unsigned long offset; -	unsigned long end_offset; - -	/* -	 * Get starting offset for readaround, and number of pages to read. -	 * Adjust starting address by readbehind (for NUMA interleave case)? -	 * No, it's very unlikely that swap layout would follow vma layout, -	 * more likely that neighbouring swap pages came from the same node: -	 * so use the same "addr" to choose the same node for each swap read. -	 */ -	nr_pages = valid_swaphandles(entry, &offset); -	for (end_offset = offset + nr_pages; offset < end_offset; offset++) { +	unsigned long entry_offset = swp_offset(entry); +	unsigned long offset = entry_offset; +	unsigned long start_offset, end_offset; +	unsigned long mask; +	struct blk_plug plug; + +	mask = swapin_nr_pages(offset) - 1; +	if (!mask) +		goto skip; + +	/* Read a page_cluster sized and aligned cluster around offset. */ +	start_offset = offset & ~mask; +	end_offset = offset | mask; +	if (!start_offset)	/* First page is swap header. */ +		start_offset++; + +	blk_start_plug(&plug); +	for (offset = start_offset; offset <= end_offset ; offset++) {  		/* Ok, do the async read-ahead now */  		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),  						gfp_mask, vma, addr);  		if (!page) -			break; +			continue; +		if (offset != entry_offset) +			SetPageReadahead(page);  		page_cache_release(page);  	} +	blk_finish_plug(&plug); +  	lru_add_drain();	/* Push any new pages onto the LRU now */ +skip:  	return read_swap_cache_async(entry, gfp_mask, vma, addr);  }  | 
