diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-05-11 14:44:27 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-05-11 14:44:31 +0200 | 
| commit | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (patch) | |
| tree | 51c50bcb67a5039448ddfa1869d7948cab1217e9 /fs/btrfs/extent_io.c | |
| parent | 19c1a6f5764d787113fa323ffb18be7991208f82 (diff) | |
| parent | 091bf7624d1c90cec9e578a18529f615213ff847 (diff) | |
Merge commit 'v2.6.30-rc5' into core/iommu
Merge reason: core/iommu was on an .30-rc1 base,
              update it to .30-rc5 to refresh.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/btrfs/extent_io.c')
| -rw-r--r-- | fs/btrfs/extent_io.c | 167 | 
1 files changed, 69 insertions, 98 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eb2bee8b7fb..fe9eb990e44 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -17,12 +17,6 @@  #include "ctree.h"  #include "btrfs_inode.h" -/* temporary define until extent_map moves out of btrfs */ -struct kmem_cache *btrfs_cache_create(const char *name, size_t size, -				       unsigned long extra_flags, -				       void (*ctor)(void *, struct kmem_cache *, -						    unsigned long)); -  static struct kmem_cache *extent_state_cache;  static struct kmem_cache *extent_buffer_cache; @@ -50,20 +44,23 @@ struct extent_page_data {  	/* tells writepage not to lock the state bits for this range  	 * it still does the unlocking  	 */ -	int extent_locked; +	unsigned int extent_locked:1; + +	/* tells the submit_bio code to use a WRITE_SYNC */ +	unsigned int sync_io:1;  };  int __init extent_io_init(void)  { -	extent_state_cache = btrfs_cache_create("extent_state", -					    sizeof(struct extent_state), 0, -					    NULL); +	extent_state_cache = kmem_cache_create("extent_state", +			sizeof(struct extent_state), 0, +			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);  	if (!extent_state_cache)  		return -ENOMEM; -	extent_buffer_cache = btrfs_cache_create("extent_buffers", -					    sizeof(struct extent_buffer), 0, -					    NULL); +	extent_buffer_cache = kmem_cache_create("extent_buffers", +			sizeof(struct extent_buffer), 0, +			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);  	if (!extent_buffer_cache)  		goto free_state_cache;  	return 0; @@ -1404,69 +1401,6 @@ out:  	return total_bytes;  } -#if 0 -/* - * helper function to lock both pages and extents in the tree. - * pages must be locked first. - */ -static int lock_range(struct extent_io_tree *tree, u64 start, u64 end) -{ -	unsigned long index = start >> PAGE_CACHE_SHIFT; -	unsigned long end_index = end >> PAGE_CACHE_SHIFT; -	struct page *page; -	int err; - -	while (index <= end_index) { -		page = grab_cache_page(tree->mapping, index); -		if (!page) { -			err = -ENOMEM; -			goto failed; -		} -		if (IS_ERR(page)) { -			err = PTR_ERR(page); -			goto failed; -		} -		index++; -	} -	lock_extent(tree, start, end, GFP_NOFS); -	return 0; - -failed: -	/* -	 * we failed above in getting the page at 'index', so we undo here -	 * up to but not including the page at 'index' -	 */ -	end_index = index; -	index = start >> PAGE_CACHE_SHIFT; -	while (index < end_index) { -		page = find_get_page(tree->mapping, index); -		unlock_page(page); -		page_cache_release(page); -		index++; -	} -	return err; -} - -/* - * helper function to unlock both pages and extents in the tree. - */ -static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) -{ -	unsigned long index = start >> PAGE_CACHE_SHIFT; -	unsigned long end_index = end >> PAGE_CACHE_SHIFT; -	struct page *page; - -	while (index <= end_index) { -		page = find_get_page(tree->mapping, index); -		unlock_page(page); -		page_cache_release(page); -		index++; -	} -	unlock_extent(tree, start, end, GFP_NOFS); -	return 0; -} -#endif -  /*   * set the private field for a given byte offset in the tree.  If there isn't   * an extent_state there already, this does nothing. @@ -2101,6 +2035,16 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,  	return ret;  } +static noinline void update_nr_written(struct page *page, +				      struct writeback_control *wbc, +				      unsigned long nr_written) +{ +	wbc->nr_to_write -= nr_written; +	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && +	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) +		page->mapping->writeback_index = page->index + nr_written; +} +  /*   * the writepage semantics are similar to regular writepage.  extent   * records are inserted to lock ranges in the tree, and as dirty areas @@ -2136,8 +2080,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  	u64 delalloc_end;  	int page_started;  	int compressed; +	int write_flags;  	unsigned long nr_written = 0; +	if (wbc->sync_mode == WB_SYNC_ALL) +		write_flags = WRITE_SYNC_PLUG; +	else +		write_flags = WRITE; +  	WARN_ON(!PageLocked(page));  	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);  	if (page->index > end_index || @@ -2164,6 +2114,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  	delalloc_end = 0;  	page_started = 0;  	if (!epd->extent_locked) { +		/* +		 * make sure the wbc mapping index is at least updated +		 * to this page. +		 */ +		update_nr_written(page, wbc, 0); +  		while (delalloc_end < page_end) {  			nr_delalloc = find_lock_delalloc_range(inode, tree,  						       page, @@ -2185,7 +2141,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  		 */  		if (page_started) {  			ret = 0; -			goto update_nr_written; +			/* +			 * we've unlocked the page, so we can't update +			 * the mapping's writeback index, just update +			 * nr_to_write. +			 */ +			wbc->nr_to_write -= nr_written; +			goto done_unlocked;  		}  	}  	lock_extent(tree, start, page_end, GFP_NOFS); @@ -2198,13 +2160,18 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  		if (ret == -EAGAIN) {  			unlock_extent(tree, start, page_end, GFP_NOFS);  			redirty_page_for_writepage(wbc, page); +			update_nr_written(page, wbc, nr_written);  			unlock_page(page);  			ret = 0; -			goto update_nr_written; +			goto done_unlocked;  		}  	} -	nr_written++; +	/* +	 * we don't want to touch the inode after unlocking the page, +	 * so we update the mapping writeback index now +	 */ +	update_nr_written(page, wbc, nr_written + 1);  	end = page_end;  	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) @@ -2314,9 +2281,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  				       (unsigned long long)end);  			} -			ret = submit_extent_page(WRITE, tree, page, sector, -						 iosize, pg_offset, bdev, -						 &epd->bio, max_nr, +			ret = submit_extent_page(write_flags, tree, page, +						 sector, iosize, pg_offset, +						 bdev, &epd->bio, max_nr,  						 end_bio_extent_writepage,  						 0, 0, 0);  			if (ret) @@ -2336,11 +2303,8 @@ done:  		unlock_extent(tree, unlock_start, page_end, GFP_NOFS);  	unlock_page(page); -update_nr_written: -	wbc->nr_to_write -= nr_written; -	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && -	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) -		page->mapping->writeback_index = page->index + nr_written; +done_unlocked: +  	return 0;  } @@ -2460,15 +2424,23 @@ retry:  	return ret;  } -static noinline void flush_write_bio(void *data) +static void flush_epd_write_bio(struct extent_page_data *epd)  { -	struct extent_page_data *epd = data;  	if (epd->bio) { -		submit_one_bio(WRITE, epd->bio, 0, 0); +		if (epd->sync_io) +			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); +		else +			submit_one_bio(WRITE, epd->bio, 0, 0);  		epd->bio = NULL;  	}  } +static noinline void flush_write_bio(void *data) +{ +	struct extent_page_data *epd = data; +	flush_epd_write_bio(epd); +} +  int extent_write_full_page(struct extent_io_tree *tree, struct page *page,  			  get_extent_t *get_extent,  			  struct writeback_control *wbc) @@ -2480,23 +2452,22 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,  		.tree = tree,  		.get_extent = get_extent,  		.extent_locked = 0, +		.sync_io = wbc->sync_mode == WB_SYNC_ALL,  	};  	struct writeback_control wbc_writepages = {  		.bdi		= wbc->bdi, -		.sync_mode	= WB_SYNC_NONE, +		.sync_mode	= wbc->sync_mode,  		.older_than_this = NULL,  		.nr_to_write	= 64,  		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,  		.range_end	= (loff_t)-1,  	}; -  	ret = __extent_writepage(page, wbc, &epd);  	extent_write_cache_pages(tree, mapping, &wbc_writepages,  				 __extent_writepage, &epd, flush_write_bio); -	if (epd.bio) -		submit_one_bio(WRITE, epd.bio, 0, 0); +	flush_epd_write_bio(&epd);  	return ret;  } @@ -2515,6 +2486,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,  		.tree = tree,  		.get_extent = get_extent,  		.extent_locked = 1, +		.sync_io = mode == WB_SYNC_ALL,  	};  	struct writeback_control wbc_writepages = {  		.bdi		= inode->i_mapping->backing_dev_info, @@ -2540,8 +2512,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,  		start += PAGE_CACHE_SIZE;  	} -	if (epd.bio) -		submit_one_bio(WRITE, epd.bio, 0, 0); +	flush_epd_write_bio(&epd);  	return ret;  } @@ -2556,13 +2527,13 @@ int extent_writepages(struct extent_io_tree *tree,  		.tree = tree,  		.get_extent = get_extent,  		.extent_locked = 0, +		.sync_io = wbc->sync_mode == WB_SYNC_ALL,  	};  	ret = extent_write_cache_pages(tree, mapping, wbc,  				       __extent_writepage, &epd,  				       flush_write_bio); -	if (epd.bio) -		submit_one_bio(WRITE, epd.bio, 0, 0); +	flush_epd_write_bio(&epd);  	return ret;  }  | 
