diff options
Diffstat (limited to 'fs/btrfs/disk-io.c')
| -rw-r--r-- | fs/btrfs/disk-io.c | 3633 | 
1 files changed, 2607 insertions, 1026 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fb827d0d718..08e65e9cf2a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -26,11 +26,15 @@  #include <linux/workqueue.h>  #include <linux/kthread.h>  #include <linux/freezer.h> -#include <linux/crc32c.h>  #include <linux/slab.h> -#include "compat.h" +#include <linux/migrate.h> +#include <linux/ratelimit.h> +#include <linux/uuid.h> +#include <linux/semaphore.h> +#include <asm/unaligned.h>  #include "ctree.h"  #include "disk-io.h" +#include "hash.h"  #include "transaction.h"  #include "btrfs_inode.h"  #include "volumes.h" @@ -39,10 +43,36 @@  #include "locking.h"  #include "tree-log.h"  #include "free-space-cache.h" +#include "inode-map.h" +#include "check-integrity.h" +#include "rcu-string.h" +#include "dev-replace.h" +#include "raid56.h" +#include "sysfs.h" +#include "qgroup.h" + +#ifdef CONFIG_X86 +#include <asm/cpufeature.h> +#endif  static struct extent_io_ops btree_extent_io_ops;  static void end_workqueue_fn(struct btrfs_work *work);  static void free_fs_root(struct btrfs_root *root); +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, +				    int read_only); +static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, +					     struct btrfs_root *root); +static void btrfs_destroy_ordered_extents(struct btrfs_root *root); +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, +				      struct btrfs_root *root); +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); +static int btrfs_destroy_marked_extents(struct btrfs_root *root, +					struct extent_io_tree *dirty_pages, +					int mark); +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, +				       struct extent_io_tree *pinned_extents); +static int btrfs_cleanup_transaction(struct btrfs_root *root); +static void btrfs_error_commit_super(struct btrfs_root *root);  /*   * end_io_wq structs are used to do processing in task context when an IO is @@ -80,40 +110,87 @@ struct async_submit_bio {  	 */  	u64 bio_offset;  	struct btrfs_work work; +	int error;  }; -/* These are used to set the lockdep class on the extent buffer locks. - * The class is set by the readpage_end_io_hook after the buffer has - * passed csum validation but before the pages are unlocked. +/* + * Lockdep class keys for extent_buffer->lock's in this root.  For a given + * eb, the lockdep key is determined by the btrfs_root it belongs to and + * the level the eb occupies in the tree.   * - * The lockdep class is also set by btrfs_init_new_buffer on freshly - * allocated blocks. + * Different roots are used for different purposes and may nest inside each + * other and they require separate keysets.  As lockdep keys should be + * static, assign keysets according to the purpose of the root as indicated + * by btrfs_root->objectid.  This ensures that all special purpose roots + * have separate keysets.   * - * The class is based on the level in the tree block, which allows lockdep - * to know that lower nodes nest inside the locks of higher nodes. + * Lock-nesting across peer nodes is always done with the immediate parent + * node locked thus preventing deadlock.  As lockdep doesn't know this, use + * subclass to avoid triggering lockdep warning in such cases.   * - * We also add a check to make sure the highest level of the tree is - * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this - * code needs update as well. + * The key is set by the readpage_end_io_hook after the buffer has passed + * csum validation but before the pages are unlocked.  It is also set by + * btrfs_init_new_buffer on freshly allocated blocks. + * + * We also add a check to make sure the highest level of the tree is the + * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code + * needs update as well.   */  #ifdef CONFIG_DEBUG_LOCK_ALLOC  # if BTRFS_MAX_LEVEL != 8  #  error  # endif -static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1]; -static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { -	/* leaf */ -	"btrfs-extent-00", -	"btrfs-extent-01", -	"btrfs-extent-02", -	"btrfs-extent-03", -	"btrfs-extent-04", -	"btrfs-extent-05", -	"btrfs-extent-06", -	"btrfs-extent-07", -	/* highest possible level */ -	"btrfs-extent-08", + +static struct btrfs_lockdep_keyset { +	u64			id;		/* root objectid */ +	const char		*name_stem;	/* lock name stem */ +	char			names[BTRFS_MAX_LEVEL + 1][20]; +	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1]; +} btrfs_lockdep_keysets[] = { +	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	}, +	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	}, +	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	}, +	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	}, +	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	}, +	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	}, +	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	}, +	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	}, +	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	}, +	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	}, +	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	}, +	{ .id = 0,				.name_stem = "tree"	},  }; + +void __init btrfs_init_lockdep(void) +{ +	int i, j; + +	/* initialize lockdep class names */ +	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { +		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; + +		for (j = 0; j < ARRAY_SIZE(ks->names); j++) +			snprintf(ks->names[j], sizeof(ks->names[j]), +				 "btrfs-%s-%02d", ks->name_stem, j); +	} +} + +void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, +				    int level) +{ +	struct btrfs_lockdep_keyset *ks; + +	BUG_ON(level >= ARRAY_SIZE(ks->keys)); + +	/* find the matching keyset, id 0 is the default entry */ +	for (ks = btrfs_lockdep_keysets; ks->id; ks++) +		if (ks->id == objectid) +			break; + +	lockdep_set_class_and_name(&eb->lock, +				   &ks->keys[level], ks->names[level]); +} +  #endif  /* @@ -121,7 +198,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {   * that covers the entire device   */  static struct extent_map *btree_get_extent(struct inode *inode, -		struct page *page, size_t page_offset, u64 start, u64 len, +		struct page *page, size_t pg_offset, u64 start, u64 len,  		int create)  {  	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; @@ -138,7 +215,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,  	}  	read_unlock(&em_tree->lock); -	em = alloc_extent_map(GFP_NOFS); +	em = alloc_extent_map();  	if (!em) {  		em = ERR_PTR(-ENOMEM);  		goto out; @@ -150,40 +227,30 @@ static struct extent_map *btree_get_extent(struct inode *inode,  	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;  	write_lock(&em_tree->lock); -	ret = add_extent_mapping(em_tree, em); +	ret = add_extent_mapping(em_tree, em, 0);  	if (ret == -EEXIST) { -		u64 failed_start = em->start; -		u64 failed_len = em->len; -  		free_extent_map(em);  		em = lookup_extent_mapping(em_tree, start, len); -		if (em) { -			ret = 0; -		} else { -			em = lookup_extent_mapping(em_tree, failed_start, -						   failed_len); -			ret = -EIO; -		} +		if (!em) +			em = ERR_PTR(-EIO);  	} else if (ret) {  		free_extent_map(em); -		em = NULL; +		em = ERR_PTR(ret);  	}  	write_unlock(&em_tree->lock); -	if (ret) -		em = ERR_PTR(ret);  out:  	return em;  } -u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len) +u32 btrfs_csum_data(char *data, u32 seed, size_t len)  { -	return crc32c(seed, data, len); +	return btrfs_crc32c(seed, data, len);  }  void btrfs_csum_final(u32 crc, char *result)  { -	*(__le32 *)result = ~cpu_to_le32(crc); +	put_unaligned_le32(~crc, result);  }  /* @@ -193,13 +260,11 @@ void btrfs_csum_final(u32 crc, char *result)  static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,  			   int verify)  { -	u16 csum_size = -		btrfs_super_csum_size(&root->fs_info->super_copy); +	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);  	char *result = NULL;  	unsigned long len;  	unsigned long cur_len;  	unsigned long offset = BTRFS_CSUM_SIZE; -	char *map_token = NULL;  	char *kaddr;  	unsigned long map_start;  	unsigned long map_len; @@ -210,16 +275,14 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,  	len = buf->len - offset;  	while (len > 0) {  		err = map_private_extent_buffer(buf, offset, 32, -					&map_token, &kaddr, -					&map_start, &map_len, KM_USER0); +					&kaddr, &map_start, &map_len);  		if (err)  			return 1;  		cur_len = min(len, map_len - (offset - map_start)); -		crc = btrfs_csum_data(root, kaddr + offset - map_start, +		crc = btrfs_csum_data(kaddr + offset - map_start,  				      crc, cur_len);  		len -= cur_len;  		offset += cur_len; -		unmap_extent_buffer(buf, map_token, KM_USER0);  	}  	if (csum_size > sizeof(inline_result)) {  		result = kzalloc(csum_size * sizeof(char), GFP_NOFS); @@ -238,14 +301,11 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,  			memcpy(&found, result, csum_size);  			read_extent_buffer(buf, &val, 0, csum_size); -			if (printk_ratelimit()) { -				printk(KERN_INFO "btrfs: %s checksum verify " -				       "failed on %llu wanted %X found %X " -				       "level %d\n", -				       root->fs_info->sb->s_id, -				       (unsigned long long)buf->start, val, found, -				       btrfs_header_level(buf)); -			} +			printk_ratelimited(KERN_INFO +				"BTRFS: %s checksum verify failed on %llu wanted %X found %X " +				"level %d\n", +				root->fs_info->sb->s_id, buf->start, +				val, found, btrfs_header_level(buf));  			if (result != (char *)&inline_result)  				kfree(result);  			return 1; @@ -265,33 +325,96 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,   * in the wrong place.   */  static int verify_parent_transid(struct extent_io_tree *io_tree, -				 struct extent_buffer *eb, u64 parent_transid) +				 struct extent_buffer *eb, u64 parent_transid, +				 int atomic)  {  	struct extent_state *cached_state = NULL;  	int ret; +	bool need_lock = (current->journal_info == +			  (void *)BTRFS_SEND_TRANS_STUB);  	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)  		return 0; +	if (atomic) +		return -EAGAIN; + +	if (need_lock) { +		btrfs_tree_read_lock(eb); +		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); +	} +  	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, -			 0, &cached_state, GFP_NOFS); -	if (extent_buffer_uptodate(io_tree, eb, cached_state) && +			 0, &cached_state); +	if (extent_buffer_uptodate(eb) &&  	    btrfs_header_generation(eb) == parent_transid) {  		ret = 0;  		goto out;  	} -	if (printk_ratelimit()) { -		printk("parent transid verify failed on %llu wanted %llu " +	printk_ratelimited("parent transid verify failed on %llu wanted %llu "  		       "found %llu\n", -		       (unsigned long long)eb->start, -		       (unsigned long long)parent_transid, -		       (unsigned long long)btrfs_header_generation(eb)); -	} +		       eb->start, parent_transid, btrfs_header_generation(eb));  	ret = 1; -	clear_extent_buffer_uptodate(io_tree, eb, &cached_state); + +	/* +	 * Things reading via commit roots that don't have normal protection, +	 * like send, can have a really old block in cache that may point at a +	 * block that has been free'd and re-allocated.  So don't clear uptodate +	 * if we find an eb that is under IO (dirty/writeback) because we could +	 * end up reading in the stale data and then writing it back out and +	 * making everybody very sad. +	 */ +	if (!extent_buffer_under_io(eb)) +		clear_extent_buffer_uptodate(eb);  out:  	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,  			     &cached_state, GFP_NOFS); +	if (need_lock) +		btrfs_tree_read_unlock_blocking(eb); +	return ret; +} + +/* + * Return 0 if the superblock checksum type matches the checksum value of that + * algorithm. Pass the raw disk superblock data. + */ +static int btrfs_check_super_csum(char *raw_disk_sb) +{ +	struct btrfs_super_block *disk_sb = +		(struct btrfs_super_block *)raw_disk_sb; +	u16 csum_type = btrfs_super_csum_type(disk_sb); +	int ret = 0; + +	if (csum_type == BTRFS_CSUM_TYPE_CRC32) { +		u32 crc = ~(u32)0; +		const int csum_size = sizeof(crc); +		char result[csum_size]; + +		/* +		 * The super_block structure does not span the whole +		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space +		 * is filled with zeros and is included in the checkum. +		 */ +		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, +				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); +		btrfs_csum_final(crc, result); + +		if (memcmp(raw_disk_sb, result, csum_size)) +			ret = 1; + +		if (ret && btrfs_super_generation(disk_sb) < 10) { +			printk(KERN_WARNING +				"BTRFS: super block crcs don't match, older mkfs detected\n"); +			ret = 0; +		} +	} + +	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { +		printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", +				csum_type); +		ret = 1; +	} +  	return ret;  } @@ -304,28 +427,56 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,  					  u64 start, u64 parent_transid)  {  	struct extent_io_tree *io_tree; +	int failed = 0;  	int ret;  	int num_copies = 0;  	int mirror_num = 0; +	int failed_mirror = 0; +	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);  	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;  	while (1) { -		ret = read_extent_buffer_pages(io_tree, eb, start, 1, +		ret = read_extent_buffer_pages(io_tree, eb, start, +					       WAIT_COMPLETE,  					       btree_get_extent, mirror_num); -		if (!ret && -		    !verify_parent_transid(io_tree, eb, parent_transid)) -			return ret; +		if (!ret) { +			if (!verify_parent_transid(io_tree, eb, +						   parent_transid, 0)) +				break; +			else +				ret = -EIO; +		} + +		/* +		 * This buffer's crc is fine, but its contents are corrupted, so +		 * there is no reason to read the other copies, they won't be +		 * any less wrong. +		 */ +		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) +			break; -		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, +		num_copies = btrfs_num_copies(root->fs_info,  					      eb->start, eb->len);  		if (num_copies == 1) -			return ret; +			break; + +		if (!failed_mirror) { +			failed = 1; +			failed_mirror = eb->read_mirror; +		}  		mirror_num++; +		if (mirror_num == failed_mirror) +			mirror_num++; +  		if (mirror_num > num_copies) -			return ret; +			break;  	} -	return -EIO; + +	if (failed && !ret && failed_mirror) +		repair_eb_io_failure(root, eb, failed_mirror); + +	return ret;  }  /* @@ -335,43 +486,17 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,  static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)  { -	struct extent_io_tree *tree; -	u64 start = (u64)page->index << PAGE_CACHE_SHIFT; +	u64 start = page_offset(page);  	u64 found_start; -	unsigned long len;  	struct extent_buffer *eb; -	int ret; -	tree = &BTRFS_I(page->mapping->host)->io_tree; - -	if (page->private == EXTENT_PAGE_PRIVATE) -		goto out; -	if (!page->private) -		goto out; -	len = page->private >> 2; -	WARN_ON(len == 0); - -	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); -	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, -					     btrfs_header_generation(eb)); -	BUG_ON(ret); +	eb = (struct extent_buffer *)page->private; +	if (page != eb->pages[0]) +		return 0;  	found_start = btrfs_header_bytenr(eb); -	if (found_start != start) { -		WARN_ON(1); -		goto err; -	} -	if (eb->first_page != page) { -		WARN_ON(1); -		goto err; -	} -	if (!PageUptodate(page)) { -		WARN_ON(1); -		goto err; -	} +	if (WARN_ON(found_start != start || !PageUptodate(page))) +		return 0;  	csum_tree_block(root, eb, 0); -err: -	free_extent_buffer(eb); -out:  	return 0;  } @@ -382,8 +507,7 @@ static int check_tree_block_fsid(struct btrfs_root *root,  	u8 fsid[BTRFS_UUID_SIZE];  	int ret = 1; -	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb), -			   BTRFS_FSID_SIZE); +	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);  	while (fs_devices) {  		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {  			ret = 0; @@ -394,79 +518,179 @@ static int check_tree_block_fsid(struct btrfs_root *root,  	return ret;  } -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) +#define CORRUPT(reason, eb, root, slot)				\ +	btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"	\ +		   "root=%llu, slot=%d", reason,			\ +	       btrfs_header_bytenr(eb),	root->objectid, slot) + +static noinline int check_leaf(struct btrfs_root *root, +			       struct extent_buffer *leaf)  { -	lockdep_set_class_and_name(&eb->lock, -			   &btrfs_eb_class[level], -			   btrfs_eb_name[level]); +	struct btrfs_key key; +	struct btrfs_key leaf_key; +	u32 nritems = btrfs_header_nritems(leaf); +	int slot; + +	if (nritems == 0) +		return 0; + +	/* Check the 0 item */ +	if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != +	    BTRFS_LEAF_DATA_SIZE(root)) { +		CORRUPT("invalid item offset size pair", leaf, root, 0); +		return -EIO; +	} + +	/* +	 * Check to make sure each items keys are in the correct order and their +	 * offsets make sense.  We only have to loop through nritems-1 because +	 * we check the current slot against the next slot, which verifies the +	 * next slot's offset+size makes sense and that the current's slot +	 * offset is correct. +	 */ +	for (slot = 0; slot < nritems - 1; slot++) { +		btrfs_item_key_to_cpu(leaf, &leaf_key, slot); +		btrfs_item_key_to_cpu(leaf, &key, slot + 1); + +		/* Make sure the keys are in the right order */ +		if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { +			CORRUPT("bad key order", leaf, root, slot); +			return -EIO; +		} + +		/* +		 * Make sure the offset and ends are right, remember that the +		 * item data starts at the end of the leaf and grows towards the +		 * front. +		 */ +		if (btrfs_item_offset_nr(leaf, slot) != +			btrfs_item_end_nr(leaf, slot + 1)) { +			CORRUPT("slot offset bad", leaf, root, slot); +			return -EIO; +		} + +		/* +		 * Check to make sure that we don't point outside of the leaf, +		 * just incase all the items are consistent to eachother, but +		 * all point outside of the leaf. +		 */ +		if (btrfs_item_end_nr(leaf, slot) > +		    BTRFS_LEAF_DATA_SIZE(root)) { +			CORRUPT("slot end outside of leaf", leaf, root, slot); +			return -EIO; +		} +	} + +	return 0;  } -#endif -static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, -			       struct extent_state *state) +static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, +				      u64 phy_offset, struct page *page, +				      u64 start, u64 end, int mirror)  { -	struct extent_io_tree *tree;  	u64 found_start;  	int found_level; -	unsigned long len;  	struct extent_buffer *eb;  	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;  	int ret = 0; +	int reads_done; -	tree = &BTRFS_I(page->mapping->host)->io_tree; -	if (page->private == EXTENT_PAGE_PRIVATE) -		goto out;  	if (!page->private)  		goto out; -	len = page->private >> 2; -	WARN_ON(len == 0); +	eb = (struct extent_buffer *)page->private; -	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); +	/* the pending IO might have been the only thing that kept this buffer +	 * in memory.  Make sure we have a ref for all this other checks +	 */ +	extent_buffer_get(eb); -	found_start = btrfs_header_bytenr(eb); -	if (found_start != start) { -		if (printk_ratelimit()) { -			printk(KERN_INFO "btrfs bad tree block start " -			       "%llu %llu\n", -			       (unsigned long long)found_start, -			       (unsigned long long)eb->start); -		} +	reads_done = atomic_dec_and_test(&eb->io_pages); +	if (!reads_done) +		goto err; + +	eb->read_mirror = mirror; +	if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {  		ret = -EIO;  		goto err;  	} -	if (eb->first_page != page) { -		printk(KERN_INFO "btrfs bad first page %lu %lu\n", -		       eb->first_page->index, page->index); -		WARN_ON(1); + +	found_start = btrfs_header_bytenr(eb); +	if (found_start != eb->start) { +		printk_ratelimited(KERN_INFO "BTRFS: bad tree block start " +			       "%llu %llu\n", +			       found_start, eb->start);  		ret = -EIO;  		goto err;  	}  	if (check_tree_block_fsid(root, eb)) { -		if (printk_ratelimit()) { -			printk(KERN_INFO "btrfs bad fsid on block %llu\n", -			       (unsigned long long)eb->start); -		} +		printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n", +			       eb->start);  		ret = -EIO;  		goto err;  	}  	found_level = btrfs_header_level(eb); +	if (found_level >= BTRFS_MAX_LEVEL) { +		btrfs_info(root->fs_info, "bad tree block level %d", +			   (int)btrfs_header_level(eb)); +		ret = -EIO; +		goto err; +	} -	btrfs_set_buffer_lockdep_class(eb, found_level); +	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), +				       eb, found_level);  	ret = csum_tree_block(root, eb, 1); -	if (ret) +	if (ret) {  		ret = -EIO; +		goto err; +	} -	end = min_t(u64, eb->len, PAGE_CACHE_SIZE); -	end = eb->start + end - 1; +	/* +	 * If this is a leaf block and it is corrupt, set the corrupt bit so +	 * that we don't try and read the other copies of this block, just +	 * return -EIO. +	 */ +	if (found_level == 0 && check_leaf(root, eb)) { +		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); +		ret = -EIO; +	} + +	if (!ret) +		set_extent_buffer_uptodate(eb);  err: +	if (reads_done && +	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) +		btree_readahead_hook(root, eb, eb->start, ret); + +	if (ret) { +		/* +		 * our io error hook is going to dec the io pages +		 * again, we have to make sure it has something +		 * to decrement +		 */ +		atomic_inc(&eb->io_pages); +		clear_extent_buffer_uptodate(eb); +	}  	free_extent_buffer(eb);  out:  	return ret;  } +static int btree_io_failed_hook(struct page *page, int failed_mirror) +{ +	struct extent_buffer *eb; +	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; + +	eb = (struct extent_buffer *)page->private; +	set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); +	eb->read_mirror = failed_mirror; +	atomic_dec(&eb->io_pages); +	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) +		btree_readahead_hook(root, eb, eb->start, -EIO); +	return -EIO;	/* we fixed nothing */ +} +  static void end_workqueue_bio(struct bio *bio, int err)  {  	struct end_io_wq *end_io_wq = bio->bi_private; @@ -474,26 +698,31 @@ static void end_workqueue_bio(struct bio *bio, int err)  	fs_info = end_io_wq->info;  	end_io_wq->error = err; -	end_io_wq->work.func = end_workqueue_fn; -	end_io_wq->work.flags = 0; +	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);  	if (bio->bi_rw & REQ_WRITE) { -		if (end_io_wq->metadata == 1) -			btrfs_queue_worker(&fs_info->endio_meta_write_workers, -					   &end_io_wq->work); -		else if (end_io_wq->metadata == 2) -			btrfs_queue_worker(&fs_info->endio_freespace_worker, -					   &end_io_wq->work); +		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) +			btrfs_queue_work(fs_info->endio_meta_write_workers, +					 &end_io_wq->work); +		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) +			btrfs_queue_work(fs_info->endio_freespace_worker, +					 &end_io_wq->work); +		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) +			btrfs_queue_work(fs_info->endio_raid56_workers, +					 &end_io_wq->work);  		else -			btrfs_queue_worker(&fs_info->endio_write_workers, -					   &end_io_wq->work); +			btrfs_queue_work(fs_info->endio_write_workers, +					 &end_io_wq->work);  	} else { -		if (end_io_wq->metadata) -			btrfs_queue_worker(&fs_info->endio_meta_workers, -					   &end_io_wq->work); +		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) +			btrfs_queue_work(fs_info->endio_raid56_workers, +					 &end_io_wq->work); +		else if (end_io_wq->metadata) +			btrfs_queue_work(fs_info->endio_meta_workers, +					 &end_io_wq->work);  		else -			btrfs_queue_worker(&fs_info->endio_workers, -					   &end_io_wq->work); +			btrfs_queue_work(fs_info->endio_workers, +					 &end_io_wq->work);  	}  } @@ -503,6 +732,7 @@ static void end_workqueue_bio(struct bio *bio, int err)   * 0 - if data   * 1 - if normal metadta   * 2 - if writing to the free space cache area + * 3 - raid parity work   */  int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,  			int metadata) @@ -527,25 +757,22 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,  unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)  {  	unsigned long limit = min_t(unsigned long, -				    info->workers.max_workers, +				    info->thread_pool_size,  				    info->fs_devices->open_devices);  	return 256 * limit;  } -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) -{ -	return atomic_read(&info->nr_async_bios) > -		btrfs_async_submit_limit(info); -} -  static void run_one_async_start(struct btrfs_work *work)  {  	struct async_submit_bio *async; +	int ret;  	async = container_of(work, struct  async_submit_bio, work); -	async->submit_bio_start(async->inode, async->rw, async->bio, -			       async->mirror_num, async->bio_flags, -			       async->bio_offset); +	ret = async->submit_bio_start(async->inode, async->rw, async->bio, +				      async->mirror_num, async->bio_flags, +				      async->bio_offset); +	if (ret) +		async->error = ret;  }  static void run_one_async_done(struct btrfs_work *work) @@ -560,12 +787,16 @@ static void run_one_async_done(struct btrfs_work *work)  	limit = btrfs_async_submit_limit(fs_info);  	limit = limit * 2 / 3; -	atomic_dec(&fs_info->nr_async_submits); - -	if (atomic_read(&fs_info->nr_async_submits) < limit && +	if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&  	    waitqueue_active(&fs_info->async_submit_wait))  		wake_up(&fs_info->async_submit_wait); +	/* If an error occured we just want to clean up the bio and move on */ +	if (async->error) { +		bio_endio(async->bio, async->error); +		return; +	} +  	async->submit_bio_done(async->inode, async->rw, async->bio,  			       async->mirror_num, async->bio_flags,  			       async->bio_offset); @@ -599,20 +830,20 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,  	async->submit_bio_start = submit_bio_start;  	async->submit_bio_done = submit_bio_done; -	async->work.func = run_one_async_start; -	async->work.ordered_func = run_one_async_done; -	async->work.ordered_free = run_one_async_free; +	btrfs_init_work(&async->work, run_one_async_start, +			run_one_async_done, run_one_async_free); -	async->work.flags = 0;  	async->bio_flags = bio_flags;  	async->bio_offset = bio_offset; +	async->error = 0; +  	atomic_inc(&fs_info->nr_async_submits);  	if (rw & REQ_SYNC) -		btrfs_set_work_high_prio(&async->work); +		btrfs_set_work_high_priority(&async->work); -	btrfs_queue_worker(&fs_info->workers, &async->work); +	btrfs_queue_work(fs_info->workers, &async->work);  	while (atomic_read(&fs_info->async_submit_draining) &&  	      atomic_read(&fs_info->nr_async_submits)) { @@ -625,18 +856,18 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,  static int btree_csum_one_bio(struct bio *bio)  { -	struct bio_vec *bvec = bio->bi_io_vec; -	int bio_index = 0; +	struct bio_vec *bvec;  	struct btrfs_root *root; +	int i, ret = 0; -	WARN_ON(bio->bi_vcnt <= 0); -	while (bio_index < bio->bi_vcnt) { +	bio_for_each_segment_all(bvec, bio, i) {  		root = BTRFS_I(bvec->bv_page->mapping->host)->root; -		csum_dirty_buffer(root, bvec->bv_page); -		bio_index++; -		bvec++; +		ret = csum_dirty_buffer(root, bvec->bv_page); +		if (ret) +			break;  	} -	return 0; + +	return ret;  }  static int __btree_submit_bio_start(struct inode *inode, int rw, @@ -648,157 +879,179 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,  	 * when we're called for a write, we're already in the async  	 * submission context.  Just jump into btrfs_map_bio  	 */ -	btree_csum_one_bio(bio); -	return 0; +	return btree_csum_one_bio(bio);  }  static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,  				 int mirror_num, unsigned long bio_flags,  				 u64 bio_offset)  { +	int ret; +  	/*  	 * when we're called for a write, we're already in the async  	 * submission context.  Just jump into btrfs_map_bio  	 */ -	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); +	ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); +	if (ret) +		bio_endio(bio, ret); +	return ret; +} + +static int check_async_write(struct inode *inode, unsigned long bio_flags) +{ +	if (bio_flags & EXTENT_BIO_TREE_LOG) +		return 0; +#ifdef CONFIG_X86 +	if (cpu_has_xmm4_2) +		return 0; +#endif +	return 1;  }  static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,  				 int mirror_num, unsigned long bio_flags,  				 u64 bio_offset)  { +	int async = check_async_write(inode, bio_flags);  	int ret; -	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, -					  bio, 1); -	BUG_ON(ret); -  	if (!(rw & REQ_WRITE)) {  		/*  		 * called for a read, do the setup so that checksum validation  		 * can happen in the async kernel threads  		 */ -		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, -				     mirror_num, 0); +		ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, +					  bio, 1); +		if (ret) +			goto out_w_error; +		ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, +				    mirror_num, 0); +	} else if (!async) { +		ret = btree_csum_one_bio(bio); +		if (ret) +			goto out_w_error; +		ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, +				    mirror_num, 0); +	} else { +		/* +		 * kthread helpers are used to submit writes so that +		 * checksumming can happen in parallel across all CPUs +		 */ +		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, +					  inode, rw, bio, mirror_num, 0, +					  bio_offset, +					  __btree_submit_bio_start, +					  __btree_submit_bio_done);  	} -	/* -	 * kthread helpers are used to submit writes so that checksumming -	 * can happen in parallel across all CPUs -	 */ -	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, -				   inode, rw, bio, mirror_num, 0, -				   bio_offset, -				   __btree_submit_bio_start, -				   __btree_submit_bio_done); +	if (ret) { +out_w_error: +		bio_endio(bio, ret); +	} +	return ret;  } -static int btree_writepage(struct page *page, struct writeback_control *wbc) +#ifdef CONFIG_MIGRATION +static int btree_migratepage(struct address_space *mapping, +			struct page *newpage, struct page *page, +			enum migrate_mode mode)  { -	struct extent_io_tree *tree; -	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; -	struct extent_buffer *eb; -	int was_dirty; - -	tree = &BTRFS_I(page->mapping->host)->io_tree; -	if (!(current->flags & PF_MEMALLOC)) { -		return extent_write_full_page(tree, page, -					      btree_get_extent, wbc); -	} - -	redirty_page_for_writepage(wbc, page); -	eb = btrfs_find_tree_block(root, page_offset(page), -				      PAGE_CACHE_SIZE); -	WARN_ON(!eb); - -	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); -	if (!was_dirty) { -		spin_lock(&root->fs_info->delalloc_lock); -		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE; -		spin_unlock(&root->fs_info->delalloc_lock); -	} -	free_extent_buffer(eb); - -	unlock_page(page); -	return 0; +	/* +	 * we can't safely write a btree page from here, +	 * we haven't done the locking hook +	 */ +	if (PageDirty(page)) +		return -EAGAIN; +	/* +	 * Buffers may be managed in a filesystem specific way. +	 * We must have no buffers or drop them. +	 */ +	if (page_has_private(page) && +	    !try_to_release_page(page, GFP_KERNEL)) +		return -EAGAIN; +	return migrate_page(mapping, newpage, page, mode);  } +#endif +  static int btree_writepages(struct address_space *mapping,  			    struct writeback_control *wbc)  { -	struct extent_io_tree *tree; -	tree = &BTRFS_I(mapping->host)->io_tree; +	struct btrfs_fs_info *fs_info; +	int ret; +  	if (wbc->sync_mode == WB_SYNC_NONE) { -		struct btrfs_root *root = BTRFS_I(mapping->host)->root; -		u64 num_dirty; -		unsigned long thresh = 32 * 1024 * 1024;  		if (wbc->for_kupdate)  			return 0; +		fs_info = BTRFS_I(mapping->host)->root->fs_info;  		/* this is a bit racy, but that's ok */ -		num_dirty = root->fs_info->dirty_metadata_bytes; -		if (num_dirty < thresh) +		ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, +					     BTRFS_DIRTY_METADATA_THRESH); +		if (ret < 0)  			return 0;  	} -	return extent_writepages(tree, mapping, btree_get_extent, wbc); +	return btree_write_cache_pages(mapping, wbc);  }  static int btree_readpage(struct file *file, struct page *page)  {  	struct extent_io_tree *tree;  	tree = &BTRFS_I(page->mapping->host)->io_tree; -	return extent_read_full_page(tree, page, btree_get_extent); +	return extent_read_full_page(tree, page, btree_get_extent, 0);  }  static int btree_releasepage(struct page *page, gfp_t gfp_flags)  { -	struct extent_io_tree *tree; -	struct extent_map_tree *map; -	int ret; -  	if (PageWriteback(page) || PageDirty(page))  		return 0; -	tree = &BTRFS_I(page->mapping->host)->io_tree; -	map = &BTRFS_I(page->mapping->host)->extent_tree; - -	ret = try_release_extent_state(map, tree, page, gfp_flags); -	if (!ret) -		return 0; - -	ret = try_release_extent_buffer(tree, page); -	if (ret == 1) { -		ClearPagePrivate(page); -		set_page_private(page, 0); -		page_cache_release(page); -	} - -	return ret; +	return try_release_extent_buffer(page);  } -static void btree_invalidatepage(struct page *page, unsigned long offset) +static void btree_invalidatepage(struct page *page, unsigned int offset, +				 unsigned int length)  {  	struct extent_io_tree *tree;  	tree = &BTRFS_I(page->mapping->host)->io_tree;  	extent_invalidatepage(tree, page, offset);  	btree_releasepage(page, GFP_NOFS);  	if (PagePrivate(page)) { -		printk(KERN_WARNING "btrfs warning page private not zero " -		       "on page %llu\n", (unsigned long long)page_offset(page)); +		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, +			   "page private not zero on page %llu", +			   (unsigned long long)page_offset(page));  		ClearPagePrivate(page);  		set_page_private(page, 0);  		page_cache_release(page);  	}  } +static int btree_set_page_dirty(struct page *page) +{ +#ifdef DEBUG +	struct extent_buffer *eb; + +	BUG_ON(!PagePrivate(page)); +	eb = (struct extent_buffer *)page->private; +	BUG_ON(!eb); +	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); +	BUG_ON(!atomic_read(&eb->refs)); +	btrfs_assert_tree_locked(eb); +#endif +	return __set_page_dirty_nobuffers(page); +} +  static const struct address_space_operations btree_aops = {  	.readpage	= btree_readpage, -	.writepage	= btree_writepage,  	.writepages	= btree_writepages,  	.releasepage	= btree_releasepage,  	.invalidatepage = btree_invalidatepage, -	.sync_page	= block_sync_page, +#ifdef CONFIG_MIGRATION +	.migratepage	= btree_migratepage, +#endif +	.set_page_dirty = btree_set_page_dirty,  };  int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, @@ -812,42 +1065,70 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,  	if (!buf)  		return 0;  	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, -				 buf, 0, 0, btree_get_extent, 0); +				 buf, 0, WAIT_NONE, btree_get_extent, 0);  	free_extent_buffer(buf);  	return ret;  } +int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, +			 int mirror_num, struct extent_buffer **eb) +{ +	struct extent_buffer *buf = NULL; +	struct inode *btree_inode = root->fs_info->btree_inode; +	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; +	int ret; + +	buf = btrfs_find_create_tree_block(root, bytenr, blocksize); +	if (!buf) +		return 0; + +	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); + +	ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, +				       btree_get_extent, mirror_num); +	if (ret) { +		free_extent_buffer(buf); +		return ret; +	} + +	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { +		free_extent_buffer(buf); +		return -EIO; +	} else if (extent_buffer_uptodate(buf)) { +		*eb = buf; +	} else { +		free_extent_buffer(buf); +	} +	return 0; +} +  struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,  					    u64 bytenr, u32 blocksize)  { -	struct inode *btree_inode = root->fs_info->btree_inode; -	struct extent_buffer *eb; -	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, -				bytenr, blocksize, GFP_NOFS); -	return eb; +	return find_extent_buffer(root->fs_info, bytenr);  }  struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,  						 u64 bytenr, u32 blocksize)  { -	struct inode *btree_inode = root->fs_info->btree_inode; -	struct extent_buffer *eb; - -	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, -				 bytenr, blocksize, NULL, GFP_NOFS); -	return eb; +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +	if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) +		return alloc_test_extent_buffer(root->fs_info, bytenr, +						blocksize); +#endif +	return alloc_extent_buffer(root->fs_info, bytenr, blocksize);  }  int btrfs_write_tree_block(struct extent_buffer *buf)  { -	return filemap_fdatawrite_range(buf->first_page->mapping, buf->start, +	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,  					buf->start + buf->len - 1);  }  int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)  { -	return filemap_fdatawait_range(buf->first_page->mapping, +	return filemap_fdatawait_range(buf->pages[0]->mapping,  				       buf->start, buf->start + buf->len - 1);  } @@ -862,42 +1143,64 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,  		return NULL;  	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - -	if (ret == 0) -		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); +	if (ret) { +		free_extent_buffer(buf); +		return NULL; +	}  	return buf;  } -int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, -		     struct extent_buffer *buf) +void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, +		      struct extent_buffer *buf)  { -	struct inode *btree_inode = root->fs_info->btree_inode; +	struct btrfs_fs_info *fs_info = root->fs_info; +  	if (btrfs_header_generation(buf) == -	    root->fs_info->running_transaction->transid) { +	    fs_info->running_transaction->transid) {  		btrfs_assert_tree_locked(buf);  		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { -			spin_lock(&root->fs_info->delalloc_lock); -			if (root->fs_info->dirty_metadata_bytes >= buf->len) -				root->fs_info->dirty_metadata_bytes -= buf->len; -			else -				WARN_ON(1); -			spin_unlock(&root->fs_info->delalloc_lock); +			__percpu_counter_add(&fs_info->dirty_metadata_bytes, +					     -buf->len, +					     fs_info->dirty_metadata_batch); +			/* ugh, clear_extent_buffer_dirty needs to lock the page */ +			btrfs_set_lock_blocking(buf); +			clear_extent_buffer_dirty(buf);  		} +	} +} + +static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) +{ +	struct btrfs_subvolume_writers *writers; +	int ret; + +	writers = kmalloc(sizeof(*writers), GFP_NOFS); +	if (!writers) +		return ERR_PTR(-ENOMEM); -		/* ugh, clear_extent_buffer_dirty needs to lock the page */ -		btrfs_set_lock_blocking(buf); -		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, -					  buf); +	ret = percpu_counter_init(&writers->counter, 0); +	if (ret < 0) { +		kfree(writers); +		return ERR_PTR(ret);  	} -	return 0; + +	init_waitqueue_head(&writers->wait); +	return writers;  } -static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, -			u32 stripesize, struct btrfs_root *root, -			struct btrfs_fs_info *fs_info, -			u64 objectid) +static void +btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) +{ +	percpu_counter_destroy(&writers->counter); +	kfree(writers); +} + +static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, +			 u32 stripesize, struct btrfs_root *root, +			 struct btrfs_fs_info *fs_info, +			 u64 objectid)  {  	root->node = NULL;  	root->commit_root = NULL; @@ -905,85 +1208,178 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,  	root->nodesize = nodesize;  	root->leafsize = leafsize;  	root->stripesize = stripesize; -	root->ref_cows = 0; -	root->track_dirty = 0; -	root->in_radix = 0; -	root->orphan_item_inserted = 0; +	root->state = 0;  	root->orphan_cleanup_state = 0; -	root->fs_info = fs_info;  	root->objectid = objectid;  	root->last_trans = 0;  	root->highest_objectid = 0; +	root->nr_delalloc_inodes = 0; +	root->nr_ordered_extents = 0;  	root->name = NULL; -	root->in_sysfs = 0;  	root->inode_tree = RB_ROOT; +	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);  	root->block_rsv = NULL;  	root->orphan_block_rsv = NULL;  	INIT_LIST_HEAD(&root->dirty_list); -	INIT_LIST_HEAD(&root->orphan_list);  	INIT_LIST_HEAD(&root->root_list); -	spin_lock_init(&root->node_lock); +	INIT_LIST_HEAD(&root->delalloc_inodes); +	INIT_LIST_HEAD(&root->delalloc_root); +	INIT_LIST_HEAD(&root->ordered_extents); +	INIT_LIST_HEAD(&root->ordered_root); +	INIT_LIST_HEAD(&root->logged_list[0]); +	INIT_LIST_HEAD(&root->logged_list[1]);  	spin_lock_init(&root->orphan_lock);  	spin_lock_init(&root->inode_lock); +	spin_lock_init(&root->delalloc_lock); +	spin_lock_init(&root->ordered_extent_lock);  	spin_lock_init(&root->accounting_lock); +	spin_lock_init(&root->log_extents_lock[0]); +	spin_lock_init(&root->log_extents_lock[1]);  	mutex_init(&root->objectid_mutex);  	mutex_init(&root->log_mutex); +	mutex_init(&root->ordered_extent_mutex); +	mutex_init(&root->delalloc_mutex);  	init_waitqueue_head(&root->log_writer_wait);  	init_waitqueue_head(&root->log_commit_wait[0]);  	init_waitqueue_head(&root->log_commit_wait[1]); +	INIT_LIST_HEAD(&root->log_ctxs[0]); +	INIT_LIST_HEAD(&root->log_ctxs[1]);  	atomic_set(&root->log_commit[0], 0);  	atomic_set(&root->log_commit[1], 0);  	atomic_set(&root->log_writers, 0); -	root->log_batch = 0; +	atomic_set(&root->log_batch, 0); +	atomic_set(&root->orphan_inodes, 0); +	atomic_set(&root->refs, 1); +	atomic_set(&root->will_be_snapshoted, 0);  	root->log_transid = 0; +	root->log_transid_committed = -1;  	root->last_log_commit = 0; -	extent_io_tree_init(&root->dirty_log_pages, -			     fs_info->btree_inode->i_mapping, GFP_NOFS); +	if (fs_info) +		extent_io_tree_init(&root->dirty_log_pages, +				     fs_info->btree_inode->i_mapping);  	memset(&root->root_key, 0, sizeof(root->root_key));  	memset(&root->root_item, 0, sizeof(root->root_item));  	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));  	memset(&root->root_kobj, 0, sizeof(root->root_kobj)); -	root->defrag_trans_start = fs_info->generation; +	if (fs_info) +		root->defrag_trans_start = fs_info->generation; +	else +		root->defrag_trans_start = 0;  	init_completion(&root->kobj_unregister); -	root->defrag_running = 0;  	root->root_key.objectid = objectid; -	root->anon_super.s_root = NULL; -	root->anon_super.s_dev = 0; -	INIT_LIST_HEAD(&root->anon_super.s_list); -	INIT_LIST_HEAD(&root->anon_super.s_instances); -	init_rwsem(&root->anon_super.s_umount); +	root->anon_dev = 0; -	return 0; +	spin_lock_init(&root->root_item_lock);  } -static int find_and_setup_root(struct btrfs_root *tree_root, -			       struct btrfs_fs_info *fs_info, -			       u64 objectid, -			       struct btrfs_root *root) +static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)  { -	int ret; -	u32 blocksize; -	u64 generation; +	struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS); +	if (root) +		root->fs_info = fs_info; +	return root; +} + +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +/* Should only be used by the testing infrastructure */ +struct btrfs_root *btrfs_alloc_dummy_root(void) +{ +	struct btrfs_root *root; + +	root = btrfs_alloc_root(NULL); +	if (!root) +		return ERR_PTR(-ENOMEM); +	__setup_root(4096, 4096, 4096, 4096, root, NULL, 1); +	set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); +	root->alloc_bytenr = 0; + +	return root; +} +#endif + +struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, +				     struct btrfs_fs_info *fs_info, +				     u64 objectid) +{ +	struct extent_buffer *leaf; +	struct btrfs_root *tree_root = fs_info->tree_root; +	struct btrfs_root *root; +	struct btrfs_key key; +	int ret = 0; +	uuid_le uuid; + +	root = btrfs_alloc_root(fs_info); +	if (!root) +		return ERR_PTR(-ENOMEM);  	__setup_root(tree_root->nodesize, tree_root->leafsize,  		     tree_root->sectorsize, tree_root->stripesize,  		     root, fs_info, objectid); -	ret = btrfs_find_last_root(tree_root, objectid, -				   &root->root_item, &root->root_key); -	if (ret > 0) -		return -ENOENT; -	BUG_ON(ret); +	root->root_key.objectid = objectid; +	root->root_key.type = BTRFS_ROOT_ITEM_KEY; +	root->root_key.offset = 0; + +	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, +				      0, objectid, NULL, 0, 0, 0); +	if (IS_ERR(leaf)) { +		ret = PTR_ERR(leaf); +		leaf = NULL; +		goto fail; +	} + +	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); +	btrfs_set_header_bytenr(leaf, leaf->start); +	btrfs_set_header_generation(leaf, trans->transid); +	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); +	btrfs_set_header_owner(leaf, objectid); +	root->node = leaf; + +	write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), +			    BTRFS_FSID_SIZE); +	write_extent_buffer(leaf, fs_info->chunk_tree_uuid, +			    btrfs_header_chunk_tree_uuid(leaf), +			    BTRFS_UUID_SIZE); +	btrfs_mark_buffer_dirty(leaf); -	generation = btrfs_root_generation(&root->root_item); -	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); -	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), -				     blocksize, generation); -	BUG_ON(!root->node);  	root->commit_root = btrfs_root_node(root); -	return 0; +	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); + +	root->root_item.flags = 0; +	root->root_item.byte_limit = 0; +	btrfs_set_root_bytenr(&root->root_item, leaf->start); +	btrfs_set_root_generation(&root->root_item, trans->transid); +	btrfs_set_root_level(&root->root_item, 0); +	btrfs_set_root_refs(&root->root_item, 1); +	btrfs_set_root_used(&root->root_item, leaf->len); +	btrfs_set_root_last_snapshot(&root->root_item, 0); +	btrfs_set_root_dirid(&root->root_item, 0); +	uuid_le_gen(&uuid); +	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); +	root->root_item.drop_level = 0; + +	key.objectid = objectid; +	key.type = BTRFS_ROOT_ITEM_KEY; +	key.offset = 0; +	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); +	if (ret) +		goto fail; + +	btrfs_tree_unlock(leaf); + +	return root; + +fail: +	if (leaf) { +		btrfs_tree_unlock(leaf); +		free_extent_buffer(root->commit_root); +		free_extent_buffer(leaf); +	} +	kfree(root); + +	return ERR_PTR(ret);  }  static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, @@ -993,7 +1389,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,  	struct btrfs_root *tree_root = fs_info->tree_root;  	struct extent_buffer *leaf; -	root = kzalloc(sizeof(*root), GFP_NOFS); +	root = btrfs_alloc_root(fs_info);  	if (!root)  		return ERR_PTR(-ENOMEM); @@ -1004,16 +1400,19 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,  	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;  	root->root_key.type = BTRFS_ROOT_ITEM_KEY;  	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; +  	/* +	 * DON'T set REF_COWS for log trees +	 *  	 * log trees do not get reference counted because they go away  	 * before a real commit is actually done.  They do store pointers  	 * to file data extents, and those reference counts still get  	 * updated (along with back refs to the log tree).  	 */ -	root->ref_cows = 0;  	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, -				      BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0); +				      BTRFS_TREE_LOG_OBJECTID, NULL, +				      0, 0, 0);  	if (IS_ERR(leaf)) {  		kfree(root);  		return ERR_CAST(leaf); @@ -1027,8 +1426,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,  	root->node = leaf;  	write_extent_buffer(root->node, root->fs_info->fsid, -			    (unsigned long)btrfs_header_fsid(root->node), -			    BTRFS_FSID_SIZE); +			    btrfs_header_fsid(), BTRFS_FSID_SIZE);  	btrfs_mark_buffer_dirty(root->node);  	btrfs_tree_unlock(root->node);  	return root; @@ -1061,96 +1459,169 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,  	log_root->root_key.offset = root->root_key.objectid;  	inode_item = &log_root->root_item.inode; -	inode_item->generation = cpu_to_le64(1); -	inode_item->size = cpu_to_le64(3); -	inode_item->nlink = cpu_to_le32(1); -	inode_item->nbytes = cpu_to_le64(root->leafsize); -	inode_item->mode = cpu_to_le32(S_IFDIR | 0755); +	btrfs_set_stack_inode_generation(inode_item, 1); +	btrfs_set_stack_inode_size(inode_item, 3); +	btrfs_set_stack_inode_nlink(inode_item, 1); +	btrfs_set_stack_inode_nbytes(inode_item, root->leafsize); +	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);  	btrfs_set_root_node(&log_root->root_item, log_root->node);  	WARN_ON(root->log_root);  	root->log_root = log_root;  	root->log_transid = 0; +	root->log_transid_committed = -1;  	root->last_log_commit = 0;  	return 0;  } -struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, -					       struct btrfs_key *location) +static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, +					       struct btrfs_key *key)  {  	struct btrfs_root *root;  	struct btrfs_fs_info *fs_info = tree_root->fs_info;  	struct btrfs_path *path; -	struct extent_buffer *l;  	u64 generation;  	u32 blocksize; -	int ret = 0; +	int ret; -	root = kzalloc(sizeof(*root), GFP_NOFS); -	if (!root) +	path = btrfs_alloc_path(); +	if (!path)  		return ERR_PTR(-ENOMEM); -	if (location->offset == (u64)-1) { -		ret = find_and_setup_root(tree_root, fs_info, -					  location->objectid, root); -		if (ret) { -			kfree(root); -			return ERR_PTR(ret); -		} -		goto out; + +	root = btrfs_alloc_root(fs_info); +	if (!root) { +		ret = -ENOMEM; +		goto alloc_fail;  	}  	__setup_root(tree_root->nodesize, tree_root->leafsize,  		     tree_root->sectorsize, tree_root->stripesize, -		     root, fs_info, location->objectid); +		     root, fs_info, key->objectid); -	path = btrfs_alloc_path(); -	BUG_ON(!path); -	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); -	if (ret == 0) { -		l = path->nodes[0]; -		read_extent_buffer(l, &root->root_item, -				btrfs_item_ptr_offset(l, path->slots[0]), -				sizeof(root->root_item)); -		memcpy(&root->root_key, location, sizeof(*location)); -	} -	btrfs_free_path(path); +	ret = btrfs_find_root(tree_root, key, path, +			      &root->root_item, &root->root_key);  	if (ret) {  		if (ret > 0)  			ret = -ENOENT; -		return ERR_PTR(ret); +		goto find_fail;  	}  	generation = btrfs_root_generation(&root->root_item);  	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));  	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),  				     blocksize, generation); +	if (!root->node) { +		ret = -ENOMEM; +		goto find_fail; +	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { +		ret = -EIO; +		goto read_fail; +	}  	root->commit_root = btrfs_root_node(root); -	BUG_ON(!root->node);  out: -	if (location->objectid != BTRFS_TREE_LOG_OBJECTID) -		root->ref_cows = 1; - +	btrfs_free_path(path);  	return root; + +read_fail: +	free_extent_buffer(root->node); +find_fail: +	kfree(root); +alloc_fail: +	root = ERR_PTR(ret); +	goto out;  } -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, -					u64 root_objectid) +struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, +				      struct btrfs_key *location)  {  	struct btrfs_root *root; -	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID) -		return fs_info->tree_root; -	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID) -		return fs_info->extent_root; +	root = btrfs_read_tree_root(tree_root, location); +	if (IS_ERR(root)) +		return root; + +	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { +		set_bit(BTRFS_ROOT_REF_COWS, &root->state); +		btrfs_check_and_init_root_item(&root->root_item); +	} + +	return root; +} + +int btrfs_init_fs_root(struct btrfs_root *root) +{ +	int ret; +	struct btrfs_subvolume_writers *writers; + +	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); +	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), +					GFP_NOFS); +	if (!root->free_ino_pinned || !root->free_ino_ctl) { +		ret = -ENOMEM; +		goto fail; +	} + +	writers = btrfs_alloc_subvolume_writers(); +	if (IS_ERR(writers)) { +		ret = PTR_ERR(writers); +		goto fail; +	} +	root->subv_writers = writers; + +	btrfs_init_free_ino_ctl(root); +	spin_lock_init(&root->cache_lock); +	init_waitqueue_head(&root->cache_wait); + +	ret = get_anon_bdev(&root->anon_dev); +	if (ret) +		goto free_writers; +	return 0; + +free_writers: +	btrfs_free_subvolume_writers(root->subv_writers); +fail: +	kfree(root->free_ino_ctl); +	kfree(root->free_ino_pinned); +	return ret; +} +static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, +					       u64 root_id) +{ +	struct btrfs_root *root; + +	spin_lock(&fs_info->fs_roots_radix_lock);  	root = radix_tree_lookup(&fs_info->fs_roots_radix, -				 (unsigned long)root_objectid); +				 (unsigned long)root_id); +	spin_unlock(&fs_info->fs_roots_radix_lock);  	return root;  } -struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, -					      struct btrfs_key *location) +int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, +			 struct btrfs_root *root) +{ +	int ret; + +	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); +	if (ret) +		return ret; + +	spin_lock(&fs_info->fs_roots_radix_lock); +	ret = radix_tree_insert(&fs_info->fs_roots_radix, +				(unsigned long)root->root_key.objectid, +				root); +	if (ret == 0) +		set_bit(BTRFS_ROOT_IN_RADIX, &root->state); +	spin_unlock(&fs_info->fs_roots_radix_lock); +	radix_tree_preload_end(); + +	return ret; +} + +struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, +				     struct btrfs_key *location, +				     bool check_ref)  {  	struct btrfs_root *root;  	int ret; @@ -1165,44 +1636,41 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,  		return fs_info->dev_root;  	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)  		return fs_info->csum_root; +	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) +		return fs_info->quota_root ? fs_info->quota_root : +					     ERR_PTR(-ENOENT); +	if (location->objectid == BTRFS_UUID_TREE_OBJECTID) +		return fs_info->uuid_root ? fs_info->uuid_root : +					    ERR_PTR(-ENOENT);  again: -	spin_lock(&fs_info->fs_roots_radix_lock); -	root = radix_tree_lookup(&fs_info->fs_roots_radix, -				 (unsigned long)location->objectid); -	spin_unlock(&fs_info->fs_roots_radix_lock); -	if (root) +	root = btrfs_lookup_fs_root(fs_info, location->objectid); +	if (root) { +		if (check_ref && btrfs_root_refs(&root->root_item) == 0) +			return ERR_PTR(-ENOENT);  		return root; +	} -	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); +	root = btrfs_read_fs_root(fs_info->tree_root, location);  	if (IS_ERR(root))  		return root; -	set_anon_super(&root->anon_super, NULL); - -	if (btrfs_root_refs(&root->root_item) == 0) { +	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {  		ret = -ENOENT;  		goto fail;  	} -	ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid); -	if (ret < 0) -		goto fail; -	if (ret == 0) -		root->orphan_item_inserted = 1; - -	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); +	ret = btrfs_init_fs_root(root);  	if (ret)  		goto fail; -	spin_lock(&fs_info->fs_roots_radix_lock); -	ret = radix_tree_insert(&fs_info->fs_roots_radix, -				(unsigned long)root->root_key.objectid, -				root); +	ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, +			location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); +	if (ret < 0) +		goto fail;  	if (ret == 0) -		root->in_radix = 1; +		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); -	spin_unlock(&fs_info->fs_roots_radix_lock); -	radix_tree_preload_end(); +	ret = btrfs_insert_fs_root(fs_info, root);  	if (ret) {  		if (ret == -EEXIST) {  			free_fs_root(root); @@ -1210,51 +1678,12 @@ again:  		}  		goto fail;  	} - -	ret = btrfs_find_dead_roots(fs_info->tree_root, -				    root->root_key.objectid); -	WARN_ON(ret);  	return root;  fail:  	free_fs_root(root);  	return ERR_PTR(ret);  } -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, -				      struct btrfs_key *location, -				      const char *name, int namelen) -{ -	return btrfs_read_fs_root_no_name(fs_info, location); -#if 0 -	struct btrfs_root *root; -	int ret; - -	root = btrfs_read_fs_root_no_name(fs_info, location); -	if (!root) -		return NULL; - -	if (root->in_sysfs) -		return root; - -	ret = btrfs_set_root_name(root, name, namelen); -	if (ret) { -		free_extent_buffer(root->node); -		kfree(root); -		return ERR_PTR(ret); -	} - -	ret = btrfs_sysfs_add_root(root); -	if (ret) { -		free_extent_buffer(root->node); -		kfree(root->name); -		kfree(root); -		return ERR_PTR(ret); -	} -	root->in_sysfs = 1; -	return root; -#endif -} -  static int btrfs_congested_fn(void *congested_data, int bdi_bits)  {  	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; @@ -1262,7 +1691,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)  	struct btrfs_device *device;  	struct backing_dev_info *bdi; -	list_for_each_entry(device, &info->fs_devices->devices, dev_list) { +	rcu_read_lock(); +	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {  		if (!device->bdev)  			continue;  		bdi = blk_get_backing_dev_info(device->bdev); @@ -1271,86 +1701,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)  			break;  		}  	} +	rcu_read_unlock();  	return ret;  }  /* - * this unplugs every device on the box, and it is only used when page - * is null - */ -static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) -{ -	struct btrfs_device *device; -	struct btrfs_fs_info *info; - -	info = (struct btrfs_fs_info *)bdi->unplug_io_data; -	list_for_each_entry(device, &info->fs_devices->devices, dev_list) { -		if (!device->bdev) -			continue; - -		bdi = blk_get_backing_dev_info(device->bdev); -		if (bdi->unplug_io_fn) -			bdi->unplug_io_fn(bdi, page); -	} -} - -static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) -{ -	struct inode *inode; -	struct extent_map_tree *em_tree; -	struct extent_map *em; -	struct address_space *mapping; -	u64 offset; - -	/* the generic O_DIRECT read code does this */ -	if (1 || !page) { -		__unplug_io_fn(bdi, page); -		return; -	} - -	/* -	 * page->mapping may change at any time.  Get a consistent copy -	 * and use that for everything below -	 */ -	smp_mb(); -	mapping = page->mapping; -	if (!mapping) -		return; - -	inode = mapping->host; - -	/* -	 * don't do the expensive searching for a small number of -	 * devices -	 */ -	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { -		__unplug_io_fn(bdi, page); -		return; -	} - -	offset = page_offset(page); - -	em_tree = &BTRFS_I(inode)->extent_tree; -	read_lock(&em_tree->lock); -	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); -	read_unlock(&em_tree->lock); -	if (!em) { -		__unplug_io_fn(bdi, page); -		return; -	} - -	if (em->block_start >= EXTENT_MAP_LAST_BYTE) { -		free_extent_map(em); -		__unplug_io_fn(bdi, page); -		return; -	} -	offset = offset - em->start; -	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, -			  em->block_start + offset, page); -	free_extent_map(em); -} - -/*   * If this fails, caller must call bdi_destroy() to get rid of the   * bdi again.   */ @@ -1364,48 +1719,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)  		return err;  	bdi->ra_pages	= default_backing_dev_info.ra_pages; -	bdi->unplug_io_fn	= btrfs_unplug_io_fn; -	bdi->unplug_io_data	= info;  	bdi->congested_fn	= btrfs_congested_fn;  	bdi->congested_data	= info;  	return 0;  } -static int bio_ready_for_csum(struct bio *bio) -{ -	u64 length = 0; -	u64 buf_len = 0; -	u64 start = 0; -	struct page *page; -	struct extent_io_tree *io_tree = NULL; -	struct bio_vec *bvec; -	int i; -	int ret; - -	bio_for_each_segment(bvec, bio, i) { -		page = bvec->bv_page; -		if (page->private == EXTENT_PAGE_PRIVATE) { -			length += bvec->bv_len; -			continue; -		} -		if (!page->private) { -			length += bvec->bv_len; -			continue; -		} -		length = bvec->bv_len; -		buf_len = page->private >> 2; -		start = page_offset(page) + bvec->bv_offset; -		io_tree = &BTRFS_I(page->mapping->host)->io_tree; -	} -	/* are we fully contained in this bio? */ -	if (buf_len <= length) -		return 1; - -	ret = extent_range_uptodate(io_tree, start + length, -				    start + buf_len - 1); -	return ret; -} -  /*   * called by the kthread helper functions to finally call the bio end_io   * functions.  This is where read checksum verification actually happens @@ -1414,48 +1732,53 @@ static void end_workqueue_fn(struct btrfs_work *work)  {  	struct bio *bio;  	struct end_io_wq *end_io_wq; -	struct btrfs_fs_info *fs_info;  	int error;  	end_io_wq = container_of(work, struct end_io_wq, work);  	bio = end_io_wq->bio; -	fs_info = end_io_wq->info; -	/* metadata bio reads are special because the whole tree block must -	 * be checksummed at once.  This makes sure the entire block is in -	 * ram and up to date before trying to verify things.  For -	 * blocksize <= pagesize, it is basically a noop -	 */ -	if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && -	    !bio_ready_for_csum(bio)) { -		btrfs_queue_worker(&fs_info->endio_meta_workers, -				   &end_io_wq->work); -		return; -	}  	error = end_io_wq->error;  	bio->bi_private = end_io_wq->private;  	bio->bi_end_io = end_io_wq->end_io;  	kfree(end_io_wq); -	bio_endio(bio, error); +	bio_endio_nodec(bio, error);  }  static int cleaner_kthread(void *arg)  {  	struct btrfs_root *root = arg; +	int again;  	do { -		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); +		again = 0; + +		/* Make the cleaner go to sleep early. */ +		if (btrfs_need_cleaner_sleep(root)) +			goto sleep; + +		if (!mutex_trylock(&root->fs_info->cleaner_mutex)) +			goto sleep; -		if (!(root->fs_info->sb->s_flags & MS_RDONLY) && -		    mutex_trylock(&root->fs_info->cleaner_mutex)) { -			btrfs_run_delayed_iputs(root); -			btrfs_clean_old_snapshots(root); +		/* +		 * Avoid the problem that we change the status of the fs +		 * during the above check and trylock. +		 */ +		if (btrfs_need_cleaner_sleep(root)) {  			mutex_unlock(&root->fs_info->cleaner_mutex); +			goto sleep;  		} -		if (freezing(current)) { -			refrigerator(); -		} else { +		btrfs_run_delayed_iputs(root); +		again = btrfs_clean_one_deleted_snapshot(root); +		mutex_unlock(&root->fs_info->cleaner_mutex); + +		/* +		 * The defragger has dealt with the R/O remount and umount, +		 * needn't do anything special here. +		 */ +		btrfs_run_defrag_inodes(root->fs_info); +sleep: +		if (!try_to_freeze() && !again) {  			set_current_state(TASK_INTERRUPTIBLE);  			if (!kthread_should_stop())  				schedule(); @@ -1473,34 +1796,40 @@ static int transaction_kthread(void *arg)  	u64 transid;  	unsigned long now;  	unsigned long delay; -	int ret; +	bool cannot_commit;  	do { -		delay = HZ * 30; -		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); +		cannot_commit = false; +		delay = HZ * root->fs_info->commit_interval;  		mutex_lock(&root->fs_info->transaction_kthread_mutex); -		spin_lock(&root->fs_info->new_trans_lock); +		spin_lock(&root->fs_info->trans_lock);  		cur = root->fs_info->running_transaction;  		if (!cur) { -			spin_unlock(&root->fs_info->new_trans_lock); +			spin_unlock(&root->fs_info->trans_lock);  			goto sleep;  		}  		now = get_seconds(); -		if (!cur->blocked && -		    (now < cur->start_time || now - cur->start_time < 30)) { -			spin_unlock(&root->fs_info->new_trans_lock); +		if (cur->state < TRANS_STATE_BLOCKED && +		    (now < cur->start_time || +		     now - cur->start_time < root->fs_info->commit_interval)) { +			spin_unlock(&root->fs_info->trans_lock);  			delay = HZ * 5;  			goto sleep;  		}  		transid = cur->transid; -		spin_unlock(&root->fs_info->new_trans_lock); +		spin_unlock(&root->fs_info->trans_lock); -		trans = btrfs_join_transaction(root, 1); +		/* If the file system is aborted, this will always fail. */ +		trans = btrfs_attach_transaction(root); +		if (IS_ERR(trans)) { +			if (PTR_ERR(trans) != -ENOENT) +				cannot_commit = true; +			goto sleep; +		}  		if (transid == trans->transid) { -			ret = btrfs_commit_transaction(trans, root); -			BUG_ON(ret); +			btrfs_commit_transaction(trans, root);  		} else {  			btrfs_end_transaction(trans, root);  		} @@ -1508,12 +1837,14 @@ sleep:  		wake_up_process(root->fs_info->cleaner_kthread);  		mutex_unlock(&root->fs_info->transaction_kthread_mutex); -		if (freezing(current)) { -			refrigerator(); -		} else { +		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, +				      &root->fs_info->fs_state))) +			btrfs_cleanup_transaction(root); +		if (!try_to_freeze()) {  			set_current_state(TASK_INTERRUPTIBLE);  			if (!kthread_should_stop() && -			    !btrfs_transaction_blocked(root->fs_info)) +			    (!btrfs_transaction_blocked(root->fs_info) || +			     cannot_commit))  				schedule_timeout(delay);  			__set_current_state(TASK_RUNNING);  		} @@ -1521,9 +1852,291 @@ sleep:  	return 0;  } -struct btrfs_root *open_ctree(struct super_block *sb, -			      struct btrfs_fs_devices *fs_devices, -			      char *options) +/* + * this will find the highest generation in the array of + * root backups.  The index of the highest array is returned, + * or -1 if we can't find anything. + * + * We check to make sure the array is valid by comparing the + * generation of the latest  root in the array with the generation + * in the super block.  If they don't match we pitch it. + */ +static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) +{ +	u64 cur; +	int newest_index = -1; +	struct btrfs_root_backup *root_backup; +	int i; + +	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { +		root_backup = info->super_copy->super_roots + i; +		cur = btrfs_backup_tree_root_gen(root_backup); +		if (cur == newest_gen) +			newest_index = i; +	} + +	/* check to see if we actually wrapped around */ +	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { +		root_backup = info->super_copy->super_roots; +		cur = btrfs_backup_tree_root_gen(root_backup); +		if (cur == newest_gen) +			newest_index = 0; +	} +	return newest_index; +} + + +/* + * find the oldest backup so we know where to store new entries + * in the backup array.  This will set the backup_root_index + * field in the fs_info struct + */ +static void find_oldest_super_backup(struct btrfs_fs_info *info, +				     u64 newest_gen) +{ +	int newest_index = -1; + +	newest_index = find_newest_super_backup(info, newest_gen); +	/* if there was garbage in there, just move along */ +	if (newest_index == -1) { +		info->backup_root_index = 0; +	} else { +		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; +	} +} + +/* + * copy all the root pointers into the super backup array. + * this will bump the backup pointer by one when it is + * done + */ +static void backup_super_roots(struct btrfs_fs_info *info) +{ +	int next_backup; +	struct btrfs_root_backup *root_backup; +	int last_backup; + +	next_backup = info->backup_root_index; +	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % +		BTRFS_NUM_BACKUP_ROOTS; + +	/* +	 * just overwrite the last backup if we're at the same generation +	 * this happens only at umount +	 */ +	root_backup = info->super_for_commit->super_roots + last_backup; +	if (btrfs_backup_tree_root_gen(root_backup) == +	    btrfs_header_generation(info->tree_root->node)) +		next_backup = last_backup; + +	root_backup = info->super_for_commit->super_roots + next_backup; + +	/* +	 * make sure all of our padding and empty slots get zero filled +	 * regardless of which ones we use today +	 */ +	memset(root_backup, 0, sizeof(*root_backup)); + +	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; + +	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); +	btrfs_set_backup_tree_root_gen(root_backup, +			       btrfs_header_generation(info->tree_root->node)); + +	btrfs_set_backup_tree_root_level(root_backup, +			       btrfs_header_level(info->tree_root->node)); + +	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); +	btrfs_set_backup_chunk_root_gen(root_backup, +			       btrfs_header_generation(info->chunk_root->node)); +	btrfs_set_backup_chunk_root_level(root_backup, +			       btrfs_header_level(info->chunk_root->node)); + +	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); +	btrfs_set_backup_extent_root_gen(root_backup, +			       btrfs_header_generation(info->extent_root->node)); +	btrfs_set_backup_extent_root_level(root_backup, +			       btrfs_header_level(info->extent_root->node)); + +	/* +	 * we might commit during log recovery, which happens before we set +	 * the fs_root.  Make sure it is valid before we fill it in. +	 */ +	if (info->fs_root && info->fs_root->node) { +		btrfs_set_backup_fs_root(root_backup, +					 info->fs_root->node->start); +		btrfs_set_backup_fs_root_gen(root_backup, +			       btrfs_header_generation(info->fs_root->node)); +		btrfs_set_backup_fs_root_level(root_backup, +			       btrfs_header_level(info->fs_root->node)); +	} + +	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); +	btrfs_set_backup_dev_root_gen(root_backup, +			       btrfs_header_generation(info->dev_root->node)); +	btrfs_set_backup_dev_root_level(root_backup, +				       btrfs_header_level(info->dev_root->node)); + +	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); +	btrfs_set_backup_csum_root_gen(root_backup, +			       btrfs_header_generation(info->csum_root->node)); +	btrfs_set_backup_csum_root_level(root_backup, +			       btrfs_header_level(info->csum_root->node)); + +	btrfs_set_backup_total_bytes(root_backup, +			     btrfs_super_total_bytes(info->super_copy)); +	btrfs_set_backup_bytes_used(root_backup, +			     btrfs_super_bytes_used(info->super_copy)); +	btrfs_set_backup_num_devices(root_backup, +			     btrfs_super_num_devices(info->super_copy)); + +	/* +	 * if we don't copy this out to the super_copy, it won't get remembered +	 * for the next commit +	 */ +	memcpy(&info->super_copy->super_roots, +	       &info->super_for_commit->super_roots, +	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); +} + +/* + * this copies info out of the root backup array and back into + * the in-memory super block.  It is meant to help iterate through + * the array, so you send it the number of backups you've already + * tried and the last backup index you used. + * + * this returns -1 when it has tried all the backups + */ +static noinline int next_root_backup(struct btrfs_fs_info *info, +				     struct btrfs_super_block *super, +				     int *num_backups_tried, int *backup_index) +{ +	struct btrfs_root_backup *root_backup; +	int newest = *backup_index; + +	if (*num_backups_tried == 0) { +		u64 gen = btrfs_super_generation(super); + +		newest = find_newest_super_backup(info, gen); +		if (newest == -1) +			return -1; + +		*backup_index = newest; +		*num_backups_tried = 1; +	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { +		/* we've tried all the backups, all done */ +		return -1; +	} else { +		/* jump to the next oldest backup */ +		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % +			BTRFS_NUM_BACKUP_ROOTS; +		*backup_index = newest; +		*num_backups_tried += 1; +	} +	root_backup = super->super_roots + newest; + +	btrfs_set_super_generation(super, +				   btrfs_backup_tree_root_gen(root_backup)); +	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); +	btrfs_set_super_root_level(super, +				   btrfs_backup_tree_root_level(root_backup)); +	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); + +	/* +	 * fixme: the total bytes and num_devices need to match or we should +	 * need a fsck +	 */ +	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); +	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); +	return 0; +} + +/* helper to cleanup workers */ +static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) +{ +	btrfs_destroy_workqueue(fs_info->fixup_workers); +	btrfs_destroy_workqueue(fs_info->delalloc_workers); +	btrfs_destroy_workqueue(fs_info->workers); +	btrfs_destroy_workqueue(fs_info->endio_workers); +	btrfs_destroy_workqueue(fs_info->endio_meta_workers); +	btrfs_destroy_workqueue(fs_info->endio_raid56_workers); +	btrfs_destroy_workqueue(fs_info->rmw_workers); +	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); +	btrfs_destroy_workqueue(fs_info->endio_write_workers); +	btrfs_destroy_workqueue(fs_info->endio_freespace_worker); +	btrfs_destroy_workqueue(fs_info->submit_workers); +	btrfs_destroy_workqueue(fs_info->delayed_workers); +	btrfs_destroy_workqueue(fs_info->caching_workers); +	btrfs_destroy_workqueue(fs_info->readahead_workers); +	btrfs_destroy_workqueue(fs_info->flush_workers); +	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); +	btrfs_destroy_workqueue(fs_info->extent_workers); +} + +static void free_root_extent_buffers(struct btrfs_root *root) +{ +	if (root) { +		free_extent_buffer(root->node); +		free_extent_buffer(root->commit_root); +		root->node = NULL; +		root->commit_root = NULL; +	} +} + +/* helper to cleanup tree roots */ +static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) +{ +	free_root_extent_buffers(info->tree_root); + +	free_root_extent_buffers(info->dev_root); +	free_root_extent_buffers(info->extent_root); +	free_root_extent_buffers(info->csum_root); +	free_root_extent_buffers(info->quota_root); +	free_root_extent_buffers(info->uuid_root); +	if (chunk_root) +		free_root_extent_buffers(info->chunk_root); +} + +void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) +{ +	int ret; +	struct btrfs_root *gang[8]; +	int i; + +	while (!list_empty(&fs_info->dead_roots)) { +		gang[0] = list_entry(fs_info->dead_roots.next, +				     struct btrfs_root, root_list); +		list_del(&gang[0]->root_list); + +		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { +			btrfs_drop_and_free_fs_root(fs_info, gang[0]); +		} else { +			free_extent_buffer(gang[0]->node); +			free_extent_buffer(gang[0]->commit_root); +			btrfs_put_fs_root(gang[0]); +		} +	} + +	while (1) { +		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, +					     (void **)gang, 0, +					     ARRAY_SIZE(gang)); +		if (!ret) +			break; +		for (i = 0; i < ret; i++) +			btrfs_drop_and_free_fs_root(fs_info, gang[i]); +	} + +	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { +		btrfs_free_log_root_tree(NULL, fs_info); +		btrfs_destroy_pinned_extent(fs_info->tree_root, +					    fs_info->pinned_extents); +	} +} + +int open_ctree(struct super_block *sb, +	       struct btrfs_fs_devices *fs_devices, +	       char *options)  {  	u32 sectorsize;  	u32 nodesize; @@ -1534,27 +2147,28 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	u64 features;  	struct btrfs_key location;  	struct buffer_head *bh; -	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), -						 GFP_NOFS); -	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), -						 GFP_NOFS); -	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), -					       GFP_NOFS); -	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info), -						GFP_NOFS); -	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), -						GFP_NOFS); -	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), -					      GFP_NOFS); +	struct btrfs_super_block *disk_super; +	struct btrfs_fs_info *fs_info = btrfs_sb(sb); +	struct btrfs_root *tree_root; +	struct btrfs_root *extent_root; +	struct btrfs_root *csum_root; +	struct btrfs_root *chunk_root; +	struct btrfs_root *dev_root; +	struct btrfs_root *quota_root; +	struct btrfs_root *uuid_root;  	struct btrfs_root *log_tree_root; -  	int ret;  	int err = -EINVAL; - -	struct btrfs_super_block *disk_super; - -	if (!extent_root || !tree_root || !fs_info || -	    !chunk_root || !dev_root || !csum_root) { +	int num_backups_tried = 0; +	int backup_index = 0; +	int max_active; +	int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; +	bool create_uuid_tree; +	bool check_uuid_tree; + +	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info); +	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info); +	if (!tree_root || !chunk_root) {  		err = -ENOMEM;  		goto fail;  	} @@ -1571,63 +2185,129 @@ struct btrfs_root *open_ctree(struct super_block *sb,  		goto fail_srcu;  	} +	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0); +	if (ret) { +		err = ret; +		goto fail_bdi; +	} +	fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * +					(1 + ilog2(nr_cpu_ids)); + +	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0); +	if (ret) { +		err = ret; +		goto fail_dirty_metadata_bytes; +	} + +	ret = percpu_counter_init(&fs_info->bio_counter, 0); +	if (ret) { +		err = ret; +		goto fail_delalloc_bytes; +	} +  	fs_info->btree_inode = new_inode(sb);  	if (!fs_info->btree_inode) {  		err = -ENOMEM; -		goto fail_bdi; +		goto fail_bio_counter;  	} +	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); +  	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); +	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);  	INIT_LIST_HEAD(&fs_info->trans_list);  	INIT_LIST_HEAD(&fs_info->dead_roots);  	INIT_LIST_HEAD(&fs_info->delayed_iputs); -	INIT_LIST_HEAD(&fs_info->hashers); -	INIT_LIST_HEAD(&fs_info->delalloc_inodes); -	INIT_LIST_HEAD(&fs_info->ordered_operations); +	INIT_LIST_HEAD(&fs_info->delalloc_roots);  	INIT_LIST_HEAD(&fs_info->caching_block_groups); -	spin_lock_init(&fs_info->delalloc_lock); -	spin_lock_init(&fs_info->new_trans_lock); -	spin_lock_init(&fs_info->ref_cache_lock); +	spin_lock_init(&fs_info->delalloc_root_lock); +	spin_lock_init(&fs_info->trans_lock);  	spin_lock_init(&fs_info->fs_roots_radix_lock);  	spin_lock_init(&fs_info->delayed_iput_lock); +	spin_lock_init(&fs_info->defrag_inodes_lock); +	spin_lock_init(&fs_info->free_chunk_lock); +	spin_lock_init(&fs_info->tree_mod_seq_lock); +	spin_lock_init(&fs_info->super_lock); +	spin_lock_init(&fs_info->qgroup_op_lock); +	spin_lock_init(&fs_info->buffer_lock); +	rwlock_init(&fs_info->tree_mod_log_lock); +	mutex_init(&fs_info->reloc_mutex); +	mutex_init(&fs_info->delalloc_root_mutex); +	seqlock_init(&fs_info->profiles_lock);  	init_completion(&fs_info->kobj_unregister); -	fs_info->tree_root = tree_root; -	fs_info->extent_root = extent_root; -	fs_info->csum_root = csum_root; -	fs_info->chunk_root = chunk_root; -	fs_info->dev_root = dev_root; -	fs_info->fs_devices = fs_devices;  	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);  	INIT_LIST_HEAD(&fs_info->space_info); +	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);  	btrfs_mapping_init(&fs_info->mapping_tree); -	btrfs_init_block_rsv(&fs_info->global_block_rsv); -	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv); -	btrfs_init_block_rsv(&fs_info->trans_block_rsv); -	btrfs_init_block_rsv(&fs_info->chunk_block_rsv); -	btrfs_init_block_rsv(&fs_info->empty_block_rsv); -	INIT_LIST_HEAD(&fs_info->durable_block_rsv_list); -	mutex_init(&fs_info->durable_block_rsv_mutex); +	btrfs_init_block_rsv(&fs_info->global_block_rsv, +			     BTRFS_BLOCK_RSV_GLOBAL); +	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, +			     BTRFS_BLOCK_RSV_DELALLOC); +	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); +	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); +	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); +	btrfs_init_block_rsv(&fs_info->delayed_block_rsv, +			     BTRFS_BLOCK_RSV_DELOPS);  	atomic_set(&fs_info->nr_async_submits, 0);  	atomic_set(&fs_info->async_delalloc_pages, 0);  	atomic_set(&fs_info->async_submit_draining, 0);  	atomic_set(&fs_info->nr_async_bios, 0); +	atomic_set(&fs_info->defrag_running, 0); +	atomic_set(&fs_info->qgroup_op_seq, 0); +	atomic64_set(&fs_info->tree_mod_seq, 0);  	fs_info->sb = sb;  	fs_info->max_inline = 8192 * 1024;  	fs_info->metadata_ratio = 0; +	fs_info->defrag_inodes = RB_ROOT; +	fs_info->free_chunk_space = 0; +	fs_info->tree_mod_log = RB_ROOT; +	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; +	fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64); +	/* readahead state */ +	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); +	spin_lock_init(&fs_info->reada_lock);  	fs_info->thread_pool_size = min_t(unsigned long,  					  num_online_cpus() + 2, 8); -	INIT_LIST_HEAD(&fs_info->ordered_extents); -	spin_lock_init(&fs_info->ordered_extent_lock); +	INIT_LIST_HEAD(&fs_info->ordered_roots); +	spin_lock_init(&fs_info->ordered_root_lock); +	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), +					GFP_NOFS); +	if (!fs_info->delayed_root) { +		err = -ENOMEM; +		goto fail_iput; +	} +	btrfs_init_delayed_root(fs_info->delayed_root); + +	mutex_init(&fs_info->scrub_lock); +	atomic_set(&fs_info->scrubs_running, 0); +	atomic_set(&fs_info->scrub_pause_req, 0); +	atomic_set(&fs_info->scrubs_paused, 0); +	atomic_set(&fs_info->scrub_cancel_req, 0); +	init_waitqueue_head(&fs_info->replace_wait); +	init_waitqueue_head(&fs_info->scrub_pause_wait); +	fs_info->scrub_workers_refcnt = 0; +#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY +	fs_info->check_integrity_print_mask = 0; +#endif + +	spin_lock_init(&fs_info->balance_lock); +	mutex_init(&fs_info->balance_mutex); +	atomic_set(&fs_info->balance_running, 0); +	atomic_set(&fs_info->balance_pause_req, 0); +	atomic_set(&fs_info->balance_cancel_req, 0); +	fs_info->balance_ctl = NULL; +	init_waitqueue_head(&fs_info->balance_wait_q); +	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);  	sb->s_blocksize = 4096;  	sb->s_blocksize_bits = blksize_bits(4096);  	sb->s_bdi = &fs_info->bdi;  	fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; -	fs_info->btree_inode->i_nlink = 1; +	set_nlink(fs_info->btree_inode, 1);  	/*  	 * we set the i_size on the btree inode to the max possible int.  	 * the real end of the address space is determined by all of @@ -1639,40 +2319,58 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);  	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, -			     fs_info->btree_inode->i_mapping, -			     GFP_NOFS); -	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, -			     GFP_NOFS); +			     fs_info->btree_inode->i_mapping); +	BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; +	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);  	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;  	BTRFS_I(fs_info->btree_inode)->root = tree_root;  	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,  	       sizeof(struct btrfs_key)); -	BTRFS_I(fs_info->btree_inode)->dummy_inode = 1; -	insert_inode_hash(fs_info->btree_inode); +	set_bit(BTRFS_INODE_DUMMY, +		&BTRFS_I(fs_info->btree_inode)->runtime_flags); +	btrfs_insert_inode_hash(fs_info->btree_inode);  	spin_lock_init(&fs_info->block_group_cache_lock);  	fs_info->block_group_cache_tree = RB_ROOT; +	fs_info->first_logical_byte = (u64)-1;  	extent_io_tree_init(&fs_info->freed_extents[0], -			     fs_info->btree_inode->i_mapping, GFP_NOFS); +			     fs_info->btree_inode->i_mapping);  	extent_io_tree_init(&fs_info->freed_extents[1], -			     fs_info->btree_inode->i_mapping, GFP_NOFS); +			     fs_info->btree_inode->i_mapping);  	fs_info->pinned_extents = &fs_info->freed_extents[0];  	fs_info->do_barriers = 1; -	mutex_init(&fs_info->trans_mutex);  	mutex_init(&fs_info->ordered_operations_mutex); +	mutex_init(&fs_info->ordered_extent_flush_mutex);  	mutex_init(&fs_info->tree_log_mutex);  	mutex_init(&fs_info->chunk_mutex);  	mutex_init(&fs_info->transaction_kthread_mutex);  	mutex_init(&fs_info->cleaner_mutex);  	mutex_init(&fs_info->volume_mutex); -	init_rwsem(&fs_info->extent_commit_sem); +	init_rwsem(&fs_info->commit_root_sem);  	init_rwsem(&fs_info->cleanup_work_sem);  	init_rwsem(&fs_info->subvol_sem); +	sema_init(&fs_info->uuid_tree_rescan_sem, 1); +	fs_info->dev_replace.lock_owner = 0; +	atomic_set(&fs_info->dev_replace.nesting_level, 0); +	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); +	mutex_init(&fs_info->dev_replace.lock_management_lock); +	mutex_init(&fs_info->dev_replace.lock); + +	spin_lock_init(&fs_info->qgroup_lock); +	mutex_init(&fs_info->qgroup_ioctl_lock); +	fs_info->qgroup_tree = RB_ROOT; +	fs_info->qgroup_op_tree = RB_ROOT; +	INIT_LIST_HEAD(&fs_info->dirty_qgroups); +	fs_info->qgroup_seq = 1; +	fs_info->quota_enabled = 0; +	fs_info->pending_quota_state = 0; +	fs_info->qgroup_ulist = NULL; +	mutex_init(&fs_info->qgroup_rescan_lock);  	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);  	btrfs_init_free_cluster(&fs_info->data_alloc_cluster); @@ -1682,28 +2380,80 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	init_waitqueue_head(&fs_info->transaction_blocked_wait);  	init_waitqueue_head(&fs_info->async_submit_wait); +	ret = btrfs_alloc_stripe_hash_table(fs_info); +	if (ret) { +		err = ret; +		goto fail_alloc; +	} +  	__setup_root(4096, 4096, 4096, 4096, tree_root,  		     fs_info, BTRFS_ROOT_TREE_OBJECTID); +	invalidate_bdev(fs_devices->latest_bdev); + +	/* +	 * Read super block and check the signature bytes only +	 */  	bh = btrfs_read_dev_super(fs_devices->latest_bdev); -	if (!bh) -		goto fail_iput; +	if (!bh) { +		err = -EINVAL; +		goto fail_alloc; +	} + +	/* +	 * We want to check superblock checksum, the type is stored inside. +	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). +	 */ +	if (btrfs_check_super_csum(bh->b_data)) { +		printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); +		err = -EINVAL; +		goto fail_alloc; +	} -	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); -	memcpy(&fs_info->super_for_commit, &fs_info->super_copy, -	       sizeof(fs_info->super_for_commit)); +	/* +	 * super_copy is zeroed at allocation time and we never touch the +	 * following bytes up to INFO_SIZE, the checksum is calculated from +	 * the whole block of INFO_SIZE +	 */ +	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); +	memcpy(fs_info->super_for_commit, fs_info->super_copy, +	       sizeof(*fs_info->super_for_commit));  	brelse(bh); -	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE); +	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); -	disk_super = &fs_info->super_copy; +	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); +	if (ret) { +		printk(KERN_ERR "BTRFS: superblock contains fatal errors\n"); +		err = -EINVAL; +		goto fail_alloc; +	} + +	disk_super = fs_info->super_copy;  	if (!btrfs_super_root(disk_super)) -		goto fail_iput; +		goto fail_alloc; + +	/* check FS state, whether FS is broken. */ +	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) +		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); + +	/* +	 * run through our array of backup supers and setup +	 * our ring pointer to the oldest one +	 */ +	generation = btrfs_super_generation(disk_super); +	find_oldest_super_backup(fs_info, generation); + +	/* +	 * In the long term, we'll store the compression type in the super +	 * block, and it'll be used for per file compression control. +	 */ +	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;  	ret = btrfs_parse_options(tree_root, options);  	if (ret) {  		err = ret; -		goto fail_iput; +		goto fail_alloc;  	}  	features = btrfs_super_incompat_flags(disk_super) & @@ -1711,101 +2461,156 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	if (features) {  		printk(KERN_ERR "BTRFS: couldn't mount because of "  		       "unsupported optional features (%Lx).\n", -		       (unsigned long long)features); +		       features);  		err = -EINVAL; -		goto fail_iput; +		goto fail_alloc; +	} + +	if (btrfs_super_leafsize(disk_super) != +	    btrfs_super_nodesize(disk_super)) { +		printk(KERN_ERR "BTRFS: couldn't mount because metadata " +		       "blocksizes don't match.  node %d leaf %d\n", +		       btrfs_super_nodesize(disk_super), +		       btrfs_super_leafsize(disk_super)); +		err = -EINVAL; +		goto fail_alloc; +	} +	if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) { +		printk(KERN_ERR "BTRFS: couldn't mount because metadata " +		       "blocksize (%d) was too large\n", +		       btrfs_super_leafsize(disk_super)); +		err = -EINVAL; +		goto fail_alloc;  	}  	features = btrfs_super_incompat_flags(disk_super); -	if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) { -		features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; -		btrfs_set_super_incompat_flags(disk_super, features); +	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; +	if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) +		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + +	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) +		printk(KERN_ERR "BTRFS: has skinny extents\n"); + +	/* +	 * flag our filesystem as having big metadata blocks if +	 * they are bigger than the page size +	 */ +	if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) { +		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) +			printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); +		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;  	} +	nodesize = btrfs_super_nodesize(disk_super); +	leafsize = btrfs_super_leafsize(disk_super); +	sectorsize = btrfs_super_sectorsize(disk_super); +	stripesize = btrfs_super_stripesize(disk_super); +	fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids)); +	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); + +	/* +	 * mixed block groups end up with duplicate but slightly offset +	 * extent buffers for the same range.  It leads to corruptions +	 */ +	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && +	    (sectorsize != leafsize)) { +		printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " +				"are not allowed for mixed block groups on %s\n", +				sb->s_id); +		goto fail_alloc; +	} + +	/* +	 * Needn't use the lock because there is no other task which will +	 * update the flag. +	 */ +	btrfs_set_super_incompat_flags(disk_super, features); +  	features = btrfs_super_compat_ro_flags(disk_super) &  		~BTRFS_FEATURE_COMPAT_RO_SUPP;  	if (!(sb->s_flags & MS_RDONLY) && features) {  		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "  		       "unsupported option features (%Lx).\n", -		       (unsigned long long)features); +		       features);  		err = -EINVAL; -		goto fail_iput; +		goto fail_alloc;  	} -	btrfs_init_workers(&fs_info->generic_worker, -			   "genwork", 1, NULL); +	max_active = fs_info->thread_pool_size; + +	fs_info->workers = +		btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI, +				      max_active, 16); -	btrfs_init_workers(&fs_info->workers, "worker", -			   fs_info->thread_pool_size, -			   &fs_info->generic_worker); +	fs_info->delalloc_workers = +		btrfs_alloc_workqueue("delalloc", flags, max_active, 2); -	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", -			   fs_info->thread_pool_size, -			   &fs_info->generic_worker); +	fs_info->flush_workers = +		btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0); -	btrfs_init_workers(&fs_info->submit_workers, "submit", -			   min_t(u64, fs_devices->num_devices, -			   fs_info->thread_pool_size), -			   &fs_info->generic_worker); +	fs_info->caching_workers = +		btrfs_alloc_workqueue("cache", flags, max_active, 0); -	/* a higher idle thresh on the submit workers makes it much more +	/* +	 * a higher idle thresh on the submit workers makes it much more  	 * likely that bios will be send down in a sane order to the  	 * devices  	 */ -	fs_info->submit_workers.idle_thresh = 64; - -	fs_info->workers.idle_thresh = 16; -	fs_info->workers.ordered = 1; - -	fs_info->delalloc_workers.idle_thresh = 2; -	fs_info->delalloc_workers.ordered = 1; - -	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1, -			   &fs_info->generic_worker); -	btrfs_init_workers(&fs_info->endio_workers, "endio", -			   fs_info->thread_pool_size, -			   &fs_info->generic_worker); -	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", -			   fs_info->thread_pool_size, -			   &fs_info->generic_worker); -	btrfs_init_workers(&fs_info->endio_meta_write_workers, -			   "endio-meta-write", fs_info->thread_pool_size, -			   &fs_info->generic_worker); -	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", -			   fs_info->thread_pool_size, -			   &fs_info->generic_worker); -	btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", -			   1, &fs_info->generic_worker); +	fs_info->submit_workers = +		btrfs_alloc_workqueue("submit", flags, +				      min_t(u64, fs_devices->num_devices, +					    max_active), 64); + +	fs_info->fixup_workers = +		btrfs_alloc_workqueue("fixup", flags, 1, 0);  	/*  	 * endios are largely parallel and should have a very  	 * low idle thresh  	 */ -	fs_info->endio_workers.idle_thresh = 4; -	fs_info->endio_meta_workers.idle_thresh = 4; - -	fs_info->endio_write_workers.idle_thresh = 2; -	fs_info->endio_meta_write_workers.idle_thresh = 2; - -	btrfs_start_workers(&fs_info->workers, 1); -	btrfs_start_workers(&fs_info->generic_worker, 1); -	btrfs_start_workers(&fs_info->submit_workers, 1); -	btrfs_start_workers(&fs_info->delalloc_workers, 1); -	btrfs_start_workers(&fs_info->fixup_workers, 1); -	btrfs_start_workers(&fs_info->endio_workers, 1); -	btrfs_start_workers(&fs_info->endio_meta_workers, 1); -	btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); -	btrfs_start_workers(&fs_info->endio_write_workers, 1); -	btrfs_start_workers(&fs_info->endio_freespace_worker, 1); +	fs_info->endio_workers = +		btrfs_alloc_workqueue("endio", flags, max_active, 4); +	fs_info->endio_meta_workers = +		btrfs_alloc_workqueue("endio-meta", flags, max_active, 4); +	fs_info->endio_meta_write_workers = +		btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2); +	fs_info->endio_raid56_workers = +		btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4); +	fs_info->rmw_workers = +		btrfs_alloc_workqueue("rmw", flags, max_active, 2); +	fs_info->endio_write_workers = +		btrfs_alloc_workqueue("endio-write", flags, max_active, 2); +	fs_info->endio_freespace_worker = +		btrfs_alloc_workqueue("freespace-write", flags, max_active, 0); +	fs_info->delayed_workers = +		btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0); +	fs_info->readahead_workers = +		btrfs_alloc_workqueue("readahead", flags, max_active, 2); +	fs_info->qgroup_rescan_workers = +		btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0); +	fs_info->extent_workers = +		btrfs_alloc_workqueue("extent-refs", flags, +				      min_t(u64, fs_devices->num_devices, +					    max_active), 8); + +	if (!(fs_info->workers && fs_info->delalloc_workers && +	      fs_info->submit_workers && fs_info->flush_workers && +	      fs_info->endio_workers && fs_info->endio_meta_workers && +	      fs_info->endio_meta_write_workers && +	      fs_info->endio_write_workers && fs_info->endio_raid56_workers && +	      fs_info->endio_freespace_worker && fs_info->rmw_workers && +	      fs_info->caching_workers && fs_info->readahead_workers && +	      fs_info->fixup_workers && fs_info->delayed_workers && +	      fs_info->fixup_workers && fs_info->extent_workers && +	      fs_info->qgroup_rescan_workers)) { +		err = -ENOMEM; +		goto fail_sb_buffer; +	}  	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);  	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,  				    4 * 1024 * 1024 / PAGE_CACHE_SIZE); -	nodesize = btrfs_super_nodesize(disk_super); -	leafsize = btrfs_super_leafsize(disk_super); -	sectorsize = btrfs_super_sectorsize(disk_super); -	stripesize = btrfs_super_stripesize(disk_super);  	tree_root->nodesize = nodesize;  	tree_root->leafsize = leafsize;  	tree_root->sectorsize = sectorsize; @@ -1814,9 +2619,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	sb->s_blocksize = sectorsize;  	sb->s_blocksize_bits = blksize_bits(sectorsize); -	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, -		    sizeof(disk_super->magic))) { -		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); +	if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { +		printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); +		goto fail_sb_buffer; +	} + +	if (sectorsize != PAGE_SIZE) { +		printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " +		       "found on %s\n", (unsigned long)sectorsize, sb->s_id);  		goto fail_sb_buffer;  	} @@ -1824,7 +2634,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	ret = btrfs_read_sys_array(tree_root);  	mutex_unlock(&fs_info->chunk_mutex);  	if (ret) { -		printk(KERN_WARNING "btrfs: failed to read the system " +		printk(KERN_WARNING "BTRFS: failed to read the system "  		       "array on %s\n", sb->s_id);  		goto fail_sb_buffer;  	} @@ -1839,30 +2649,38 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	chunk_root->node = read_tree_block(chunk_root,  					   btrfs_super_chunk_root(disk_super),  					   blocksize, generation); -	BUG_ON(!chunk_root->node); -	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { -		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", +	if (!chunk_root->node || +	    !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { +		printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",  		       sb->s_id); -		goto fail_chunk_root; +		goto fail_tree_roots;  	}  	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);  	chunk_root->commit_root = btrfs_root_node(chunk_root);  	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, -	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), -	   BTRFS_UUID_SIZE); +	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); -	mutex_lock(&fs_info->chunk_mutex);  	ret = btrfs_read_chunk_tree(chunk_root); -	mutex_unlock(&fs_info->chunk_mutex);  	if (ret) { -		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", +		printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",  		       sb->s_id); -		goto fail_chunk_root; +		goto fail_tree_roots;  	} -	btrfs_close_extra_devices(fs_devices); +	/* +	 * keep the device that is marked to be the target device for the +	 * dev_replace procedure +	 */ +	btrfs_close_extra_devices(fs_info, fs_devices, 0); + +	if (!fs_devices->latest_bdev) { +		printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", +		       sb->s_id); +		goto fail_tree_roots; +	} +retry_root_backup:  	blocksize = btrfs_level_size(tree_root,  				     btrfs_super_root_level(disk_super));  	generation = btrfs_super_generation(disk_super); @@ -1870,51 +2688,129 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	tree_root->node = read_tree_block(tree_root,  					  btrfs_super_root(disk_super),  					  blocksize, generation); -	if (!tree_root->node) -		goto fail_chunk_root; -	if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { -		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", +	if (!tree_root->node || +	    !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { +		printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",  		       sb->s_id); -		goto fail_tree_root; + +		goto recovery_tree_root;  	} +  	btrfs_set_root_node(&tree_root->root_item, tree_root->node);  	tree_root->commit_root = btrfs_root_node(tree_root); +	btrfs_set_root_refs(&tree_root->root_item, 1); -	ret = find_and_setup_root(tree_root, fs_info, -				  BTRFS_EXTENT_TREE_OBJECTID, extent_root); -	if (ret) -		goto fail_tree_root; -	extent_root->track_dirty = 1; +	location.objectid = BTRFS_EXTENT_TREE_OBJECTID; +	location.type = BTRFS_ROOT_ITEM_KEY; +	location.offset = 0; -	ret = find_and_setup_root(tree_root, fs_info, -				  BTRFS_DEV_TREE_OBJECTID, dev_root); -	if (ret) -		goto fail_extent_root; -	dev_root->track_dirty = 1; +	extent_root = btrfs_read_tree_root(tree_root, &location); +	if (IS_ERR(extent_root)) { +		ret = PTR_ERR(extent_root); +		goto recovery_tree_root; +	} +	set_bit(BTRFS_ROOT_TRACK_DIRTY, &extent_root->state); +	fs_info->extent_root = extent_root; -	ret = find_and_setup_root(tree_root, fs_info, -				  BTRFS_CSUM_TREE_OBJECTID, csum_root); -	if (ret) -		goto fail_dev_root; +	location.objectid = BTRFS_DEV_TREE_OBJECTID; +	dev_root = btrfs_read_tree_root(tree_root, &location); +	if (IS_ERR(dev_root)) { +		ret = PTR_ERR(dev_root); +		goto recovery_tree_root; +	} +	set_bit(BTRFS_ROOT_TRACK_DIRTY, &dev_root->state); +	fs_info->dev_root = dev_root; +	btrfs_init_devices_late(fs_info); + +	location.objectid = BTRFS_CSUM_TREE_OBJECTID; +	csum_root = btrfs_read_tree_root(tree_root, &location); +	if (IS_ERR(csum_root)) { +		ret = PTR_ERR(csum_root); +		goto recovery_tree_root; +	} +	set_bit(BTRFS_ROOT_TRACK_DIRTY, &csum_root->state); +	fs_info->csum_root = csum_root; -	csum_root->track_dirty = 1; +	location.objectid = BTRFS_QUOTA_TREE_OBJECTID; +	quota_root = btrfs_read_tree_root(tree_root, &location); +	if (!IS_ERR(quota_root)) { +		set_bit(BTRFS_ROOT_TRACK_DIRTY, "a_root->state); +		fs_info->quota_enabled = 1; +		fs_info->pending_quota_state = 1; +		fs_info->quota_root = quota_root; +	} + +	location.objectid = BTRFS_UUID_TREE_OBJECTID; +	uuid_root = btrfs_read_tree_root(tree_root, &location); +	if (IS_ERR(uuid_root)) { +		ret = PTR_ERR(uuid_root); +		if (ret != -ENOENT) +			goto recovery_tree_root; +		create_uuid_tree = true; +		check_uuid_tree = false; +	} else { +		set_bit(BTRFS_ROOT_TRACK_DIRTY, &uuid_root->state); +		fs_info->uuid_root = uuid_root; +		create_uuid_tree = false; +		check_uuid_tree = +		    generation != btrfs_super_uuid_tree_generation(disk_super); +	}  	fs_info->generation = generation;  	fs_info->last_trans_committed = generation; -	fs_info->data_alloc_profile = (u64)-1; -	fs_info->metadata_alloc_profile = (u64)-1; -	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; -	ret = btrfs_read_block_groups(extent_root); +	ret = btrfs_recover_balance(fs_info); +	if (ret) { +		printk(KERN_WARNING "BTRFS: failed to recover balance\n"); +		goto fail_block_groups; +	} + +	ret = btrfs_init_dev_stats(fs_info);  	if (ret) { -		printk(KERN_ERR "Failed to read block groups: %d\n", ret); +		printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n", +		       ret);  		goto fail_block_groups;  	} +	ret = btrfs_init_dev_replace(fs_info); +	if (ret) { +		pr_err("BTRFS: failed to init dev_replace: %d\n", ret); +		goto fail_block_groups; +	} + +	btrfs_close_extra_devices(fs_info, fs_devices, 1); + +	ret = btrfs_sysfs_add_one(fs_info); +	if (ret) { +		pr_err("BTRFS: failed to init sysfs interface: %d\n", ret); +		goto fail_block_groups; +	} + +	ret = btrfs_init_space_info(fs_info); +	if (ret) { +		printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret); +		goto fail_sysfs; +	} + +	ret = btrfs_read_block_groups(extent_root); +	if (ret) { +		printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret); +		goto fail_sysfs; +	} +	fs_info->num_tolerated_disk_barrier_failures = +		btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); +	if (fs_info->fs_devices->missing_devices > +	     fs_info->num_tolerated_disk_barrier_failures && +	    !(sb->s_flags & MS_RDONLY)) { +		printk(KERN_WARNING "BTRFS: " +			"too many missing devices, writeable mount is not allowed\n"); +		goto fail_sysfs; +	} +  	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,  					       "btrfs-cleaner");  	if (IS_ERR(fs_info->cleaner_kthread)) -		goto fail_block_groups; +		goto fail_sysfs;  	fs_info->transaction_kthread = kthread_run(transaction_kthread,  						   tree_root, @@ -1925,28 +2821,49 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	if (!btrfs_test_opt(tree_root, SSD) &&  	    !btrfs_test_opt(tree_root, NOSSD) &&  	    !fs_info->fs_devices->rotating) { -		printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD " +		printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "  		       "mode\n");  		btrfs_set_opt(fs_info->mount_opt, SSD);  	} +	/* Set the real inode map cache flag */ +	if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE)) +		btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE); + +#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY +	if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { +		ret = btrfsic_mount(tree_root, fs_devices, +				    btrfs_test_opt(tree_root, +					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? +				    1 : 0, +				    fs_info->check_integrity_print_mask); +		if (ret) +			printk(KERN_WARNING "BTRFS: failed to initialize" +			       " integrity check module %s\n", sb->s_id); +	} +#endif +	ret = btrfs_read_qgroup_config(fs_info); +	if (ret) +		goto fail_trans_kthread; + +	/* do not make disk changes in broken FS */  	if (btrfs_super_log_root(disk_super) != 0) {  		u64 bytenr = btrfs_super_log_root(disk_super);  		if (fs_devices->rw_devices == 0) { -			printk(KERN_WARNING "Btrfs log replay required " +			printk(KERN_WARNING "BTRFS: log replay required "  			       "on RO media\n");  			err = -EIO; -			goto fail_trans_kthread; +			goto fail_qgroup;  		}  		blocksize =  		     btrfs_level_size(tree_root,  				      btrfs_super_log_root_level(disk_super)); -		log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); +		log_tree_root = btrfs_alloc_root(fs_info);  		if (!log_tree_root) {  			err = -ENOMEM; -			goto fail_trans_kthread; +			goto fail_qgroup;  		}  		__setup_root(nodesize, leafsize, sectorsize, stripesize, @@ -1955,54 +2872,119 @@ struct btrfs_root *open_ctree(struct super_block *sb,  		log_tree_root->node = read_tree_block(tree_root, bytenr,  						      blocksize,  						      generation + 1); +		if (!log_tree_root->node || +		    !extent_buffer_uptodate(log_tree_root->node)) { +			printk(KERN_ERR "BTRFS: failed to read log tree\n"); +			free_extent_buffer(log_tree_root->node); +			kfree(log_tree_root); +			goto fail_qgroup; +		} +		/* returns with log_tree_root freed on success */  		ret = btrfs_recover_log_trees(log_tree_root); -		BUG_ON(ret); +		if (ret) { +			btrfs_error(tree_root->fs_info, ret, +				    "Failed to recover log tree"); +			free_extent_buffer(log_tree_root->node); +			kfree(log_tree_root); +			goto fail_qgroup; +		}  		if (sb->s_flags & MS_RDONLY) { -			ret =  btrfs_commit_super(tree_root); -			BUG_ON(ret); +			ret = btrfs_commit_super(tree_root); +			if (ret) +				goto fail_qgroup;  		}  	}  	ret = btrfs_find_orphan_roots(tree_root); -	BUG_ON(ret); +	if (ret) +		goto fail_qgroup;  	if (!(sb->s_flags & MS_RDONLY)) {  		ret = btrfs_cleanup_fs_roots(fs_info); -		BUG_ON(ret); +		if (ret) +			goto fail_qgroup; +		mutex_lock(&fs_info->cleaner_mutex);  		ret = btrfs_recover_relocation(tree_root); +		mutex_unlock(&fs_info->cleaner_mutex);  		if (ret < 0) {  			printk(KERN_WARNING -			       "btrfs: failed to recover relocation\n"); +			       "BTRFS: failed to recover relocation\n");  			err = -EINVAL; -			goto fail_trans_kthread; +			goto fail_qgroup;  		}  	}  	location.objectid = BTRFS_FS_TREE_OBJECTID;  	location.type = BTRFS_ROOT_ITEM_KEY; -	location.offset = (u64)-1; +	location.offset = 0;  	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); -	if (!fs_info->fs_root) -		goto fail_trans_kthread;  	if (IS_ERR(fs_info->fs_root)) {  		err = PTR_ERR(fs_info->fs_root); -		goto fail_trans_kthread; +		goto fail_qgroup;  	} -	if (!(sb->s_flags & MS_RDONLY)) { -		down_read(&fs_info->cleanup_work_sem); -		btrfs_orphan_cleanup(fs_info->fs_root); -		btrfs_orphan_cleanup(fs_info->tree_root); +	if (sb->s_flags & MS_RDONLY) +		return 0; + +	down_read(&fs_info->cleanup_work_sem); +	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || +	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {  		up_read(&fs_info->cleanup_work_sem); +		close_ctree(tree_root); +		return ret;  	} +	up_read(&fs_info->cleanup_work_sem); -	return tree_root; +	ret = btrfs_resume_balance_async(fs_info); +	if (ret) { +		printk(KERN_WARNING "BTRFS: failed to resume balance\n"); +		close_ctree(tree_root); +		return ret; +	} + +	ret = btrfs_resume_dev_replace_async(fs_info); +	if (ret) { +		pr_warn("BTRFS: failed to resume dev_replace\n"); +		close_ctree(tree_root); +		return ret; +	} + +	btrfs_qgroup_rescan_resume(fs_info); + +	if (create_uuid_tree) { +		pr_info("BTRFS: creating UUID tree\n"); +		ret = btrfs_create_uuid_tree(fs_info); +		if (ret) { +			pr_warn("BTRFS: failed to create the UUID tree %d\n", +				ret); +			close_ctree(tree_root); +			return ret; +		} +	} else if (check_uuid_tree || +		   btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) { +		pr_info("BTRFS: checking UUID tree\n"); +		ret = btrfs_check_uuid_tree(fs_info); +		if (ret) { +			pr_warn("BTRFS: failed to check the UUID tree %d\n", +				ret); +			close_ctree(tree_root); +			return ret; +		} +	} else { +		fs_info->update_uuid_tree_gen = 1; +	} + +	return 0; +fail_qgroup: +	btrfs_free_qgroup_config(fs_info);  fail_trans_kthread:  	kthread_stop(fs_info->transaction_kthread); +	btrfs_cleanup_transaction(fs_info->tree_root); +	btrfs_free_fs_roots(fs_info);  fail_cleaner:  	kthread_stop(fs_info->cleaner_kthread); @@ -2011,71 +2993,75 @@ fail_cleaner:  	 * kthreads  	 */  	filemap_write_and_wait(fs_info->btree_inode->i_mapping); -	invalidate_inode_pages2(fs_info->btree_inode->i_mapping); + +fail_sysfs: +	btrfs_sysfs_remove_one(fs_info);  fail_block_groups: +	btrfs_put_block_group_cache(fs_info);  	btrfs_free_block_groups(fs_info); -	free_extent_buffer(csum_root->node); -	free_extent_buffer(csum_root->commit_root); -fail_dev_root: -	free_extent_buffer(dev_root->node); -	free_extent_buffer(dev_root->commit_root); -fail_extent_root: -	free_extent_buffer(extent_root->node); -	free_extent_buffer(extent_root->commit_root); -fail_tree_root: -	free_extent_buffer(tree_root->node); -	free_extent_buffer(tree_root->commit_root); -fail_chunk_root: -	free_extent_buffer(chunk_root->node); -	free_extent_buffer(chunk_root->commit_root); -fail_sb_buffer: -	btrfs_stop_workers(&fs_info->generic_worker); -	btrfs_stop_workers(&fs_info->fixup_workers); -	btrfs_stop_workers(&fs_info->delalloc_workers); -	btrfs_stop_workers(&fs_info->workers); -	btrfs_stop_workers(&fs_info->endio_workers); -	btrfs_stop_workers(&fs_info->endio_meta_workers); -	btrfs_stop_workers(&fs_info->endio_meta_write_workers); -	btrfs_stop_workers(&fs_info->endio_write_workers); -	btrfs_stop_workers(&fs_info->endio_freespace_worker); -	btrfs_stop_workers(&fs_info->submit_workers); -fail_iput: + +fail_tree_roots: +	free_root_pointers(fs_info, 1);  	invalidate_inode_pages2(fs_info->btree_inode->i_mapping); -	iput(fs_info->btree_inode); -	btrfs_close_devices(fs_info->fs_devices); +fail_sb_buffer: +	btrfs_stop_all_workers(fs_info); +fail_alloc: +fail_iput:  	btrfs_mapping_tree_free(&fs_info->mapping_tree); + +	iput(fs_info->btree_inode); +fail_bio_counter: +	percpu_counter_destroy(&fs_info->bio_counter); +fail_delalloc_bytes: +	percpu_counter_destroy(&fs_info->delalloc_bytes); +fail_dirty_metadata_bytes: +	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);  fail_bdi:  	bdi_destroy(&fs_info->bdi);  fail_srcu:  	cleanup_srcu_struct(&fs_info->subvol_srcu);  fail: -	kfree(extent_root); -	kfree(tree_root); -	kfree(fs_info); -	kfree(chunk_root); -	kfree(dev_root); -	kfree(csum_root); -	return ERR_PTR(err); +	btrfs_free_stripe_hash_table(fs_info); +	btrfs_close_devices(fs_info->fs_devices); +	return err; + +recovery_tree_root: +	if (!btrfs_test_opt(tree_root, RECOVERY)) +		goto fail_tree_roots; + +	free_root_pointers(fs_info, 0); + +	/* don't use the log in recovery mode, it won't be valid */ +	btrfs_set_super_log_root(disk_super, 0); + +	/* we can't trust the free space cache either */ +	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); + +	ret = next_root_backup(fs_info, fs_info->super_copy, +			       &num_backups_tried, &backup_index); +	if (ret == -1) +		goto fail_block_groups; +	goto retry_root_backup;  }  static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)  { -	char b[BDEVNAME_SIZE]; -  	if (uptodate) {  		set_buffer_uptodate(bh);  	} else { -		if (printk_ratelimit()) { -			printk(KERN_WARNING "lost page write due to " -					"I/O error on %s\n", -				       bdevname(bh->b_bdev, b)); -		} +		struct btrfs_device *device = (struct btrfs_device *) +			bh->b_private; + +		printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to " +					  "I/O error on %s\n", +					  rcu_str_deref(device->name));  		/* note, we dont' set_buffer_write_io_error because we have  		 * our own ways of dealing with the IO errors  		 */  		clear_buffer_uptodate(bh); +		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);  	}  	unlock_buffer(bh);  	put_bh(bh); @@ -2097,16 +3083,17 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)  	 */  	for (i = 0; i < 1; i++) {  		bytenr = btrfs_sb_offset(i); -		if (bytenr + 4096 >= i_size_read(bdev->bd_inode)) +		if (bytenr + BTRFS_SUPER_INFO_SIZE >= +					i_size_read(bdev->bd_inode))  			break; -		bh = __bread(bdev, bytenr / 4096, 4096); +		bh = __bread(bdev, bytenr / 4096, +					BTRFS_SUPER_INFO_SIZE);  		if (!bh)  			continue;  		super = (struct btrfs_super_block *)bh->b_data;  		if (btrfs_super_bytenr(super) != bytenr || -		    strncmp((char *)(&super->magic), BTRFS_MAGIC, -			    sizeof(super->magic))) { +		    btrfs_super_magic(super) != BTRFS_MAGIC) {  			brelse(bh);  			continue;  		} @@ -2143,22 +3130,10 @@ static int write_dev_supers(struct btrfs_device *device,  	int errors = 0;  	u32 crc;  	u64 bytenr; -	int last_barrier = 0;  	if (max_mirrors == 0)  		max_mirrors = BTRFS_SUPER_MIRROR_MAX; -	/* make sure only the last submit_bh does a barrier */ -	if (do_barriers) { -		for (i = 0; i < max_mirrors; i++) { -			bytenr = btrfs_sb_offset(i); -			if (bytenr + BTRFS_SUPER_INFO_SIZE >= -			    device->total_bytes) -				break; -			last_barrier = i; -		} -	} -  	for (i = 0; i < max_mirrors; i++) {  		bytenr = btrfs_sb_offset(i);  		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) @@ -2167,7 +3142,10 @@ static int write_dev_supers(struct btrfs_device *device,  		if (wait) {  			bh = __find_get_block(device->bdev, bytenr / 4096,  					      BTRFS_SUPER_INFO_SIZE); -			BUG_ON(!bh); +			if (!bh) { +				errors++; +				continue; +			}  			wait_on_buffer(bh);  			if (!buffer_uptodate(bh))  				errors++; @@ -2182,7 +3160,7 @@ static int write_dev_supers(struct btrfs_device *device,  			btrfs_set_super_bytenr(sb, bytenr);  			crc = ~(u32)0; -			crc = btrfs_csum_data(NULL, (char *)sb + +			crc = btrfs_csum_data((char *)sb +  					      BTRFS_CSUM_SIZE, crc,  					      BTRFS_SUPER_INFO_SIZE -  					      BTRFS_CSUM_SIZE); @@ -2194,6 +3172,13 @@ static int write_dev_supers(struct btrfs_device *device,  			 */  			bh = __getblk(device->bdev, bytenr / 4096,  				      BTRFS_SUPER_INFO_SIZE); +			if (!bh) { +				printk(KERN_ERR "BTRFS: couldn't get super " +				       "buffer head for bytenr %Lu\n", bytenr); +				errors++; +				continue; +			} +  			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);  			/* one reference for submit_bh */ @@ -2202,20 +3187,228 @@ static int write_dev_supers(struct btrfs_device *device,  			set_buffer_uptodate(bh);  			lock_buffer(bh);  			bh->b_end_io = btrfs_end_buffer_write_sync; +			bh->b_private = device;  		} -		if (i == last_barrier && do_barriers) -			ret = submit_bh(WRITE_FLUSH_FUA, bh); +		/* +		 * we fua the first super.  The others we allow +		 * to go down lazy. +		 */ +		if (i == 0) +			ret = btrfsic_submit_bh(WRITE_FUA, bh);  		else -			ret = submit_bh(WRITE_SYNC, bh); - +			ret = btrfsic_submit_bh(WRITE_SYNC, bh);  		if (ret)  			errors++;  	}  	return errors < i ? 0 : -1;  } -int write_all_supers(struct btrfs_root *root, int max_mirrors) +/* + * endio for the write_dev_flush, this will wake anyone waiting + * for the barrier when it is done + */ +static void btrfs_end_empty_barrier(struct bio *bio, int err) +{ +	if (err) { +		if (err == -EOPNOTSUPP) +			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); +		clear_bit(BIO_UPTODATE, &bio->bi_flags); +	} +	if (bio->bi_private) +		complete(bio->bi_private); +	bio_put(bio); +} + +/* + * trigger flushes for one the devices.  If you pass wait == 0, the flushes are + * sent down.  With wait == 1, it waits for the previous flush. + * + * any device where the flush fails with eopnotsupp are flagged as not-barrier + * capable + */ +static int write_dev_flush(struct btrfs_device *device, int wait) +{ +	struct bio *bio; +	int ret = 0; + +	if (device->nobarriers) +		return 0; + +	if (wait) { +		bio = device->flush_bio; +		if (!bio) +			return 0; + +		wait_for_completion(&device->flush_wait); + +		if (bio_flagged(bio, BIO_EOPNOTSUPP)) { +			printk_in_rcu("BTRFS: disabling barriers on dev %s\n", +				      rcu_str_deref(device->name)); +			device->nobarriers = 1; +		} else if (!bio_flagged(bio, BIO_UPTODATE)) { +			ret = -EIO; +			btrfs_dev_stat_inc_and_print(device, +				BTRFS_DEV_STAT_FLUSH_ERRS); +		} + +		/* drop the reference from the wait == 0 run */ +		bio_put(bio); +		device->flush_bio = NULL; + +		return ret; +	} + +	/* +	 * one reference for us, and we leave it for the +	 * caller +	 */ +	device->flush_bio = NULL; +	bio = btrfs_io_bio_alloc(GFP_NOFS, 0); +	if (!bio) +		return -ENOMEM; + +	bio->bi_end_io = btrfs_end_empty_barrier; +	bio->bi_bdev = device->bdev; +	init_completion(&device->flush_wait); +	bio->bi_private = &device->flush_wait; +	device->flush_bio = bio; + +	bio_get(bio); +	btrfsic_submit_bio(WRITE_FLUSH, bio); + +	return 0; +} + +/* + * send an empty flush down to each device in parallel, + * then wait for them + */ +static int barrier_all_devices(struct btrfs_fs_info *info) +{ +	struct list_head *head; +	struct btrfs_device *dev; +	int errors_send = 0; +	int errors_wait = 0; +	int ret; + +	/* send down all the barriers */ +	head = &info->fs_devices->devices; +	list_for_each_entry_rcu(dev, head, dev_list) { +		if (dev->missing) +			continue; +		if (!dev->bdev) { +			errors_send++; +			continue; +		} +		if (!dev->in_fs_metadata || !dev->writeable) +			continue; + +		ret = write_dev_flush(dev, 0); +		if (ret) +			errors_send++; +	} + +	/* wait for all the barriers */ +	list_for_each_entry_rcu(dev, head, dev_list) { +		if (dev->missing) +			continue; +		if (!dev->bdev) { +			errors_wait++; +			continue; +		} +		if (!dev->in_fs_metadata || !dev->writeable) +			continue; + +		ret = write_dev_flush(dev, 1); +		if (ret) +			errors_wait++; +	} +	if (errors_send > info->num_tolerated_disk_barrier_failures || +	    errors_wait > info->num_tolerated_disk_barrier_failures) +		return -EIO; +	return 0; +} + +int btrfs_calc_num_tolerated_disk_barrier_failures( +	struct btrfs_fs_info *fs_info) +{ +	struct btrfs_ioctl_space_info space; +	struct btrfs_space_info *sinfo; +	u64 types[] = {BTRFS_BLOCK_GROUP_DATA, +		       BTRFS_BLOCK_GROUP_SYSTEM, +		       BTRFS_BLOCK_GROUP_METADATA, +		       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; +	int num_types = 4; +	int i; +	int c; +	int num_tolerated_disk_barrier_failures = +		(int)fs_info->fs_devices->num_devices; + +	for (i = 0; i < num_types; i++) { +		struct btrfs_space_info *tmp; + +		sinfo = NULL; +		rcu_read_lock(); +		list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { +			if (tmp->flags == types[i]) { +				sinfo = tmp; +				break; +			} +		} +		rcu_read_unlock(); + +		if (!sinfo) +			continue; + +		down_read(&sinfo->groups_sem); +		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { +			if (!list_empty(&sinfo->block_groups[c])) { +				u64 flags; + +				btrfs_get_block_group_info( +					&sinfo->block_groups[c], &space); +				if (space.total_bytes == 0 || +				    space.used_bytes == 0) +					continue; +				flags = space.flags; +				/* +				 * return +				 * 0: if dup, single or RAID0 is configured for +				 *    any of metadata, system or data, else +				 * 1: if RAID5 is configured, or if RAID1 or +				 *    RAID10 is configured and only two mirrors +				 *    are used, else +				 * 2: if RAID6 is configured, else +				 * num_mirrors - 1: if RAID1 or RAID10 is +				 *                  configured and more than +				 *                  2 mirrors are used. +				 */ +				if (num_tolerated_disk_barrier_failures > 0 && +				    ((flags & (BTRFS_BLOCK_GROUP_DUP | +					       BTRFS_BLOCK_GROUP_RAID0)) || +				     ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) +				      == 0))) +					num_tolerated_disk_barrier_failures = 0; +				else if (num_tolerated_disk_barrier_failures > 1) { +					if (flags & (BTRFS_BLOCK_GROUP_RAID1 | +					    BTRFS_BLOCK_GROUP_RAID5 | +					    BTRFS_BLOCK_GROUP_RAID10)) { +						num_tolerated_disk_barrier_failures = 1; +					} else if (flags & +						   BTRFS_BLOCK_GROUP_RAID6) { +						num_tolerated_disk_barrier_failures = 2; +					} +				} +			} +		} +		up_read(&sinfo->groups_sem); +	} + +	return num_tolerated_disk_barrier_failures; +} + +static int write_all_supers(struct btrfs_root *root, int max_mirrors)  {  	struct list_head *head;  	struct btrfs_device *dev; @@ -2227,15 +3420,28 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  	int total_errors = 0;  	u64 flags; -	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;  	do_barriers = !btrfs_test_opt(root, NOBARRIER); +	backup_super_roots(root->fs_info); -	sb = &root->fs_info->super_for_commit; +	sb = root->fs_info->super_for_commit;  	dev_item = &sb->dev_item;  	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);  	head = &root->fs_info->fs_devices->devices; -	list_for_each_entry(dev, head, dev_list) { +	max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; + +	if (do_barriers) { +		ret = barrier_all_devices(root->fs_info); +		if (ret) { +			mutex_unlock( +				&root->fs_info->fs_devices->device_list_mutex); +			btrfs_error(root->fs_info, ret, +				    "errors while submitting device barriers."); +			return ret; +		} +	} + +	list_for_each_entry_rcu(dev, head, dev_list) {  		if (!dev->bdev) {  			total_errors++;  			continue; @@ -2262,13 +3468,18 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  			total_errors++;  	}  	if (total_errors > max_errors) { -		printk(KERN_ERR "btrfs: %d errors while writing supers\n", +		btrfs_err(root->fs_info, "%d errors while writing supers",  		       total_errors); -		BUG(); +		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + +		/* FUA is masked off if unsupported and can't be the reason */ +		btrfs_error(root->fs_info, -EIO, +			    "%d errors while writing supers", total_errors); +		return -EIO;  	}  	total_errors = 0; -	list_for_each_entry(dev, head, dev_list) { +	list_for_each_entry_rcu(dev, head, dev_list) {  		if (!dev->bdev)  			continue;  		if (!dev->in_fs_metadata || !dev->writeable) @@ -2280,9 +3491,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  	}  	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);  	if (total_errors > max_errors) { -		printk(KERN_ERR "btrfs: %d errors while writing supers\n", -		       total_errors); -		BUG(); +		btrfs_error(root->fs_info, -EIO, +			    "%d errors while writing supers", total_errors); +		return -EIO;  	}  	return 0;  } @@ -2290,13 +3501,12 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  int write_ctree_super(struct btrfs_trans_handle *trans,  		      struct btrfs_root *root, int max_mirrors)  { -	int ret; - -	ret = write_all_supers(root, max_mirrors); -	return ret; +	return write_all_supers(root, max_mirrors);  } -int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +/* Drop a fs root from the radix tree and free it. */ +void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, +				  struct btrfs_root *root)  {  	spin_lock(&fs_info->fs_roots_radix_lock);  	radix_tree_delete(&fs_info->fs_roots_radix, @@ -2306,104 +3516,107 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)  	if (btrfs_root_refs(&root->root_item) == 0)  		synchronize_srcu(&fs_info->subvol_srcu); +	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) +		btrfs_free_log(NULL, root); + +	if (root->free_ino_pinned) +		__btrfs_remove_free_space_cache(root->free_ino_pinned); +	if (root->free_ino_ctl) +		__btrfs_remove_free_space_cache(root->free_ino_ctl);  	free_fs_root(root); -	return 0;  }  static void free_fs_root(struct btrfs_root *root)  { +	iput(root->cache_inode);  	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); -	if (root->anon_super.s_dev) { -		down_write(&root->anon_super.s_umount); -		kill_anon_super(&root->anon_super); -	} +	btrfs_free_block_rsv(root, root->orphan_block_rsv); +	root->orphan_block_rsv = NULL; +	if (root->anon_dev) +		free_anon_bdev(root->anon_dev); +	if (root->subv_writers) +		btrfs_free_subvolume_writers(root->subv_writers);  	free_extent_buffer(root->node);  	free_extent_buffer(root->commit_root); +	kfree(root->free_ino_ctl); +	kfree(root->free_ino_pinned);  	kfree(root->name); -	kfree(root); +	btrfs_put_fs_root(root);  } -static int del_fs_roots(struct btrfs_fs_info *fs_info) +void btrfs_free_fs_root(struct btrfs_root *root)  { -	int ret; -	struct btrfs_root *gang[8]; -	int i; - -	while (!list_empty(&fs_info->dead_roots)) { -		gang[0] = list_entry(fs_info->dead_roots.next, -				     struct btrfs_root, root_list); -		list_del(&gang[0]->root_list); - -		if (gang[0]->in_radix) { -			btrfs_free_fs_root(fs_info, gang[0]); -		} else { -			free_extent_buffer(gang[0]->node); -			free_extent_buffer(gang[0]->commit_root); -			kfree(gang[0]); -		} -	} - -	while (1) { -		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, -					     (void **)gang, 0, -					     ARRAY_SIZE(gang)); -		if (!ret) -			break; -		for (i = 0; i < ret; i++) -			btrfs_free_fs_root(fs_info, gang[i]); -	} -	return 0; +	free_fs_root(root);  }  int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)  {  	u64 root_objectid = 0;  	struct btrfs_root *gang[8]; -	int i; -	int ret; +	int i = 0; +	int err = 0; +	unsigned int ret = 0; +	int index;  	while (1) { +		index = srcu_read_lock(&fs_info->subvol_srcu);  		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,  					     (void **)gang, root_objectid,  					     ARRAY_SIZE(gang)); -		if (!ret) +		if (!ret) { +			srcu_read_unlock(&fs_info->subvol_srcu, index);  			break; - +		}  		root_objectid = gang[ret - 1]->root_key.objectid + 1; + +		for (i = 0; i < ret; i++) { +			/* Avoid to grab roots in dead_roots */ +			if (btrfs_root_refs(&gang[i]->root_item) == 0) { +				gang[i] = NULL; +				continue; +			} +			/* grab all the search result for later use */ +			gang[i] = btrfs_grab_fs_root(gang[i]); +		} +		srcu_read_unlock(&fs_info->subvol_srcu, index); +  		for (i = 0; i < ret; i++) { +			if (!gang[i]) +				continue;  			root_objectid = gang[i]->root_key.objectid; -			btrfs_orphan_cleanup(gang[i]); +			err = btrfs_orphan_cleanup(gang[i]); +			if (err) +				break; +			btrfs_put_fs_root(gang[i]);  		}  		root_objectid++;  	} -	return 0; + +	/* release the uncleaned roots due to error */ +	for (; i < ret; i++) { +		if (gang[i]) +			btrfs_put_fs_root(gang[i]); +	} +	return err;  }  int btrfs_commit_super(struct btrfs_root *root)  {  	struct btrfs_trans_handle *trans; -	int ret;  	mutex_lock(&root->fs_info->cleaner_mutex);  	btrfs_run_delayed_iputs(root); -	btrfs_clean_old_snapshots(root);  	mutex_unlock(&root->fs_info->cleaner_mutex); +	wake_up_process(root->fs_info->cleaner_kthread);  	/* wait until ongoing cleanup work done */  	down_write(&root->fs_info->cleanup_work_sem);  	up_write(&root->fs_info->cleanup_work_sem); -	trans = btrfs_join_transaction(root, 1); -	ret = btrfs_commit_transaction(trans, root); -	BUG_ON(ret); -	/* run commit again to drop the original snapshot */ -	trans = btrfs_join_transaction(root, 1); -	btrfs_commit_transaction(trans, root); -	ret = btrfs_write_and_wait_transaction(NULL, root); -	BUG_ON(ret); - -	ret = write_ctree_super(NULL, root, 0); -	return ret; +	trans = btrfs_join_transaction(root); +	if (IS_ERR(trans)) +		return PTR_ERR(trans); +	return btrfs_commit_transaction(trans, root);  }  int close_ctree(struct btrfs_root *root) @@ -2414,187 +3627,555 @@ int close_ctree(struct btrfs_root *root)  	fs_info->closing = 1;  	smp_mb(); -	btrfs_put_block_group_cache(fs_info); +	/* wait for the uuid_scan task to finish */ +	down(&fs_info->uuid_tree_rescan_sem); +	/* avoid complains from lockdep et al., set sem back to initial state */ +	up(&fs_info->uuid_tree_rescan_sem); + +	/* pause restriper - we want to resume on mount */ +	btrfs_pause_balance(fs_info); + +	btrfs_dev_replace_suspend_for_unmount(fs_info); + +	btrfs_scrub_cancel(fs_info); + +	/* wait for any defraggers to finish */ +	wait_event(fs_info->transaction_wait, +		   (atomic_read(&fs_info->defrag_running) == 0)); + +	/* clear out the rbtree of defraggable inodes */ +	btrfs_cleanup_defrag_inodes(fs_info); + +	cancel_work_sync(&fs_info->async_reclaim_work); +  	if (!(fs_info->sb->s_flags & MS_RDONLY)) { -		ret =  btrfs_commit_super(root); +		ret = btrfs_commit_super(root);  		if (ret) -			printk(KERN_ERR "btrfs: commit super ret %d\n", ret); +			btrfs_err(root->fs_info, "commit super ret %d", ret);  	} -	kthread_stop(root->fs_info->transaction_kthread); -	kthread_stop(root->fs_info->cleaner_kthread); +	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) +		btrfs_error_commit_super(root); + +	kthread_stop(fs_info->transaction_kthread); +	kthread_stop(fs_info->cleaner_kthread);  	fs_info->closing = 2;  	smp_mb(); -	if (fs_info->delalloc_bytes) { -		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", -		       (unsigned long long)fs_info->delalloc_bytes); -	} -	if (fs_info->total_ref_cache_size) { -		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", -		       (unsigned long long)fs_info->total_ref_cache_size); +	btrfs_free_qgroup_config(root->fs_info); + +	if (percpu_counter_sum(&fs_info->delalloc_bytes)) { +		btrfs_info(root->fs_info, "at unmount delalloc count %lld", +		       percpu_counter_sum(&fs_info->delalloc_bytes));  	} -	free_extent_buffer(fs_info->extent_root->node); -	free_extent_buffer(fs_info->extent_root->commit_root); -	free_extent_buffer(fs_info->tree_root->node); -	free_extent_buffer(fs_info->tree_root->commit_root); -	free_extent_buffer(root->fs_info->chunk_root->node); -	free_extent_buffer(root->fs_info->chunk_root->commit_root); -	free_extent_buffer(root->fs_info->dev_root->node); -	free_extent_buffer(root->fs_info->dev_root->commit_root); -	free_extent_buffer(root->fs_info->csum_root->node); -	free_extent_buffer(root->fs_info->csum_root->commit_root); +	btrfs_sysfs_remove_one(fs_info); -	btrfs_free_block_groups(root->fs_info); +	btrfs_free_fs_roots(fs_info); + +	btrfs_put_block_group_cache(fs_info); + +	btrfs_free_block_groups(fs_info); + +	/* +	 * we must make sure there is not any read request to +	 * submit after we stopping all workers. +	 */ +	invalidate_inode_pages2(fs_info->btree_inode->i_mapping); +	btrfs_stop_all_workers(fs_info); -	del_fs_roots(fs_info); +	free_root_pointers(fs_info, 1);  	iput(fs_info->btree_inode); -	btrfs_stop_workers(&fs_info->generic_worker); -	btrfs_stop_workers(&fs_info->fixup_workers); -	btrfs_stop_workers(&fs_info->delalloc_workers); -	btrfs_stop_workers(&fs_info->workers); -	btrfs_stop_workers(&fs_info->endio_workers); -	btrfs_stop_workers(&fs_info->endio_meta_workers); -	btrfs_stop_workers(&fs_info->endio_meta_write_workers); -	btrfs_stop_workers(&fs_info->endio_write_workers); -	btrfs_stop_workers(&fs_info->endio_freespace_worker); -	btrfs_stop_workers(&fs_info->submit_workers); +#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY +	if (btrfs_test_opt(root, CHECK_INTEGRITY)) +		btrfsic_unmount(root, fs_info->fs_devices); +#endif  	btrfs_close_devices(fs_info->fs_devices);  	btrfs_mapping_tree_free(&fs_info->mapping_tree); +	percpu_counter_destroy(&fs_info->dirty_metadata_bytes); +	percpu_counter_destroy(&fs_info->delalloc_bytes); +	percpu_counter_destroy(&fs_info->bio_counter);  	bdi_destroy(&fs_info->bdi);  	cleanup_srcu_struct(&fs_info->subvol_srcu); -	kfree(fs_info->extent_root); -	kfree(fs_info->tree_root); -	kfree(fs_info->chunk_root); -	kfree(fs_info->dev_root); -	kfree(fs_info->csum_root); +	btrfs_free_stripe_hash_table(fs_info); + +	btrfs_free_block_rsv(root, root->orphan_block_rsv); +	root->orphan_block_rsv = NULL; +  	return 0;  } -int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) +int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, +			  int atomic)  {  	int ret; -	struct inode *btree_inode = buf->first_page->mapping->host; +	struct inode *btree_inode = buf->pages[0]->mapping->host; -	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, -				     NULL); +	ret = extent_buffer_uptodate(buf);  	if (!ret)  		return ret;  	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, -				    parent_transid); +				    parent_transid, atomic); +	if (ret == -EAGAIN) +		return ret;  	return !ret;  }  int btrfs_set_buffer_uptodate(struct extent_buffer *buf)  { -	struct inode *btree_inode = buf->first_page->mapping->host; -	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, -					  buf); +	return set_extent_buffer_uptodate(buf);  }  void btrfs_mark_buffer_dirty(struct extent_buffer *buf)  { -	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; +	struct btrfs_root *root;  	u64 transid = btrfs_header_generation(buf); -	struct inode *btree_inode = root->fs_info->btree_inode;  	int was_dirty; +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS +	/* +	 * This is a fast path so only do this check if we have sanity tests +	 * enabled.  Normal people shouldn't be marking dummy buffers as dirty +	 * outside of the sanity tests. +	 */ +	if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) +		return; +#endif +	root = BTRFS_I(buf->pages[0]->mapping->host)->root;  	btrfs_assert_tree_locked(buf); -	if (transid != root->fs_info->generation) { -		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " +	if (transid != root->fs_info->generation) +		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "  		       "found %llu running %llu\n", -			(unsigned long long)buf->start, -			(unsigned long long)transid, -			(unsigned long long)root->fs_info->generation); -		WARN_ON(1); -	} -	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, -					    buf); -	if (!was_dirty) { -		spin_lock(&root->fs_info->delalloc_lock); -		root->fs_info->dirty_metadata_bytes += buf->len; -		spin_unlock(&root->fs_info->delalloc_lock); +			buf->start, transid, root->fs_info->generation); +	was_dirty = set_extent_buffer_dirty(buf); +	if (!was_dirty) +		__percpu_counter_add(&root->fs_info->dirty_metadata_bytes, +				     buf->len, +				     root->fs_info->dirty_metadata_batch); +#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY +	if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { +		btrfs_print_leaf(root, buf); +		ASSERT(0);  	} +#endif  } -void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) +static void __btrfs_btree_balance_dirty(struct btrfs_root *root, +					int flush_delayed)  {  	/*  	 * looks as though older kernels can get into trouble with  	 * this code, they end up stuck in balance_dirty_pages forever  	 */ -	u64 num_dirty; -	unsigned long thresh = 32 * 1024 * 1024; +	int ret;  	if (current->flags & PF_MEMALLOC)  		return; -	num_dirty = root->fs_info->dirty_metadata_bytes; +	if (flush_delayed) +		btrfs_balance_delayed_items(root); -	if (num_dirty > thresh) { -		balance_dirty_pages_ratelimited_nr( -				   root->fs_info->btree_inode->i_mapping, 1); +	ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, +				     BTRFS_DIRTY_METADATA_THRESH); +	if (ret > 0) { +		balance_dirty_pages_ratelimited( +				   root->fs_info->btree_inode->i_mapping);  	}  	return;  } +void btrfs_btree_balance_dirty(struct btrfs_root *root) +{ +	__btrfs_btree_balance_dirty(root, 1); +} + +void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) +{ +	__btrfs_btree_balance_dirty(root, 0); +} +  int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)  { -	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; -	int ret; -	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); -	if (ret == 0) -		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags); +	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; +	return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); +} + +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, +			      int read_only) +{ +	/* +	 * Placeholder for checks +	 */ +	return 0; +} + +static void btrfs_error_commit_super(struct btrfs_root *root) +{ +	mutex_lock(&root->fs_info->cleaner_mutex); +	btrfs_run_delayed_iputs(root); +	mutex_unlock(&root->fs_info->cleaner_mutex); + +	down_write(&root->fs_info->cleanup_work_sem); +	up_write(&root->fs_info->cleanup_work_sem); + +	/* cleanup FS via transaction */ +	btrfs_cleanup_transaction(root); +} + +static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, +					     struct btrfs_root *root) +{ +	struct btrfs_inode *btrfs_inode; +	struct list_head splice; + +	INIT_LIST_HEAD(&splice); + +	mutex_lock(&root->fs_info->ordered_operations_mutex); +	spin_lock(&root->fs_info->ordered_root_lock); + +	list_splice_init(&t->ordered_operations, &splice); +	while (!list_empty(&splice)) { +		btrfs_inode = list_entry(splice.next, struct btrfs_inode, +					 ordered_operations); + +		list_del_init(&btrfs_inode->ordered_operations); +		spin_unlock(&root->fs_info->ordered_root_lock); + +		btrfs_invalidate_inodes(btrfs_inode->root); + +		spin_lock(&root->fs_info->ordered_root_lock); +	} + +	spin_unlock(&root->fs_info->ordered_root_lock); +	mutex_unlock(&root->fs_info->ordered_operations_mutex); +} + +static void btrfs_destroy_ordered_extents(struct btrfs_root *root) +{ +	struct btrfs_ordered_extent *ordered; + +	spin_lock(&root->ordered_extent_lock); +	/* +	 * This will just short circuit the ordered completion stuff which will +	 * make sure the ordered extent gets properly cleaned up. +	 */ +	list_for_each_entry(ordered, &root->ordered_extents, +			    root_extent_list) +		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); +	spin_unlock(&root->ordered_extent_lock); +} + +static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) +{ +	struct btrfs_root *root; +	struct list_head splice; + +	INIT_LIST_HEAD(&splice); + +	spin_lock(&fs_info->ordered_root_lock); +	list_splice_init(&fs_info->ordered_roots, &splice); +	while (!list_empty(&splice)) { +		root = list_first_entry(&splice, struct btrfs_root, +					ordered_root); +		list_move_tail(&root->ordered_root, +			       &fs_info->ordered_roots); + +		spin_unlock(&fs_info->ordered_root_lock); +		btrfs_destroy_ordered_extents(root); + +		cond_resched(); +		spin_lock(&fs_info->ordered_root_lock); +	} +	spin_unlock(&fs_info->ordered_root_lock); +} + +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, +				      struct btrfs_root *root) +{ +	struct rb_node *node; +	struct btrfs_delayed_ref_root *delayed_refs; +	struct btrfs_delayed_ref_node *ref; +	int ret = 0; + +	delayed_refs = &trans->delayed_refs; + +	spin_lock(&delayed_refs->lock); +	if (atomic_read(&delayed_refs->num_entries) == 0) { +		spin_unlock(&delayed_refs->lock); +		btrfs_info(root->fs_info, "delayed_refs has NO entry"); +		return ret; +	} + +	while ((node = rb_first(&delayed_refs->href_root)) != NULL) { +		struct btrfs_delayed_ref_head *head; +		bool pin_bytes = false; + +		head = rb_entry(node, struct btrfs_delayed_ref_head, +				href_node); +		if (!mutex_trylock(&head->mutex)) { +			atomic_inc(&head->node.refs); +			spin_unlock(&delayed_refs->lock); + +			mutex_lock(&head->mutex); +			mutex_unlock(&head->mutex); +			btrfs_put_delayed_ref(&head->node); +			spin_lock(&delayed_refs->lock); +			continue; +		} +		spin_lock(&head->lock); +		while ((node = rb_first(&head->ref_root)) != NULL) { +			ref = rb_entry(node, struct btrfs_delayed_ref_node, +				       rb_node); +			ref->in_tree = 0; +			rb_erase(&ref->rb_node, &head->ref_root); +			atomic_dec(&delayed_refs->num_entries); +			btrfs_put_delayed_ref(ref); +		} +		if (head->must_insert_reserved) +			pin_bytes = true; +		btrfs_free_delayed_extent_op(head->extent_op); +		delayed_refs->num_heads--; +		if (head->processing == 0) +			delayed_refs->num_heads_ready--; +		atomic_dec(&delayed_refs->num_entries); +		head->node.in_tree = 0; +		rb_erase(&head->href_node, &delayed_refs->href_root); +		spin_unlock(&head->lock); +		spin_unlock(&delayed_refs->lock); +		mutex_unlock(&head->mutex); + +		if (pin_bytes) +			btrfs_pin_extent(root, head->node.bytenr, +					 head->node.num_bytes, 1); +		btrfs_put_delayed_ref(&head->node); +		cond_resched(); +		spin_lock(&delayed_refs->lock); +	} + +	spin_unlock(&delayed_refs->lock); +  	return ret;  } -int btree_lock_page_hook(struct page *page) +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) +{ +	struct btrfs_inode *btrfs_inode; +	struct list_head splice; + +	INIT_LIST_HEAD(&splice); + +	spin_lock(&root->delalloc_lock); +	list_splice_init(&root->delalloc_inodes, &splice); + +	while (!list_empty(&splice)) { +		btrfs_inode = list_first_entry(&splice, struct btrfs_inode, +					       delalloc_inodes); + +		list_del_init(&btrfs_inode->delalloc_inodes); +		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, +			  &btrfs_inode->runtime_flags); +		spin_unlock(&root->delalloc_lock); + +		btrfs_invalidate_inodes(btrfs_inode->root); + +		spin_lock(&root->delalloc_lock); +	} + +	spin_unlock(&root->delalloc_lock); +} + +static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) +{ +	struct btrfs_root *root; +	struct list_head splice; + +	INIT_LIST_HEAD(&splice); + +	spin_lock(&fs_info->delalloc_root_lock); +	list_splice_init(&fs_info->delalloc_roots, &splice); +	while (!list_empty(&splice)) { +		root = list_first_entry(&splice, struct btrfs_root, +					 delalloc_root); +		list_del_init(&root->delalloc_root); +		root = btrfs_grab_fs_root(root); +		BUG_ON(!root); +		spin_unlock(&fs_info->delalloc_root_lock); + +		btrfs_destroy_delalloc_inodes(root); +		btrfs_put_fs_root(root); + +		spin_lock(&fs_info->delalloc_root_lock); +	} +	spin_unlock(&fs_info->delalloc_root_lock); +} + +static int btrfs_destroy_marked_extents(struct btrfs_root *root, +					struct extent_io_tree *dirty_pages, +					int mark)  { -	struct inode *inode = page->mapping->host; -	struct btrfs_root *root = BTRFS_I(inode)->root; -	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; +	int ret;  	struct extent_buffer *eb; -	unsigned long len; -	u64 bytenr = page_offset(page); +	u64 start = 0; +	u64 end; -	if (page->private == EXTENT_PAGE_PRIVATE) -		goto out; +	while (1) { +		ret = find_first_extent_bit(dirty_pages, start, &start, &end, +					    mark, NULL); +		if (ret) +			break; -	len = page->private >> 2; -	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); -	if (!eb) -		goto out; +		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); +		while (start <= end) { +			eb = btrfs_find_tree_block(root, start, +						   root->leafsize); +			start += root->leafsize; +			if (!eb) +				continue; +			wait_on_extent_buffer_writeback(eb); + +			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, +					       &eb->bflags)) +				clear_extent_buffer_dirty(eb); +			free_extent_buffer_stale(eb); +		} +	} + +	return ret; +} + +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, +				       struct extent_io_tree *pinned_extents) +{ +	struct extent_io_tree *unpin; +	u64 start; +	u64 end; +	int ret; +	bool loop = true; -	btrfs_tree_lock(eb); -	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); +	unpin = pinned_extents; +again: +	while (1) { +		ret = find_first_extent_bit(unpin, 0, &start, &end, +					    EXTENT_DIRTY, NULL); +		if (ret) +			break; + +		/* opt_discard */ +		if (btrfs_test_opt(root, DISCARD)) +			ret = btrfs_error_discard_extent(root, start, +							 end + 1 - start, +							 NULL); + +		clear_extent_dirty(unpin, start, end, GFP_NOFS); +		btrfs_error_unpin_extent_range(root, start, end); +		cond_resched(); +	} -	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { -		spin_lock(&root->fs_info->delalloc_lock); -		if (root->fs_info->dirty_metadata_bytes >= eb->len) -			root->fs_info->dirty_metadata_bytes -= eb->len; +	if (loop) { +		if (unpin == &root->fs_info->freed_extents[0]) +			unpin = &root->fs_info->freed_extents[1];  		else -			WARN_ON(1); -		spin_unlock(&root->fs_info->delalloc_lock); +			unpin = &root->fs_info->freed_extents[0]; +		loop = false; +		goto again;  	} -	btrfs_tree_unlock(eb); -	free_extent_buffer(eb); -out: -	lock_page(page); +	return 0; +} + +void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, +				   struct btrfs_root *root) +{ +	btrfs_destroy_ordered_operations(cur_trans, root); + +	btrfs_destroy_delayed_refs(cur_trans, root); + +	cur_trans->state = TRANS_STATE_COMMIT_START; +	wake_up(&root->fs_info->transaction_blocked_wait); + +	cur_trans->state = TRANS_STATE_UNBLOCKED; +	wake_up(&root->fs_info->transaction_wait); + +	btrfs_destroy_delayed_inodes(root); +	btrfs_assert_delayed_root_empty(root); + +	btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, +				     EXTENT_DIRTY); +	btrfs_destroy_pinned_extent(root, +				    root->fs_info->pinned_extents); + +	cur_trans->state =TRANS_STATE_COMPLETED; +	wake_up(&cur_trans->commit_wait); + +	/* +	memset(cur_trans, 0, sizeof(*cur_trans)); +	kmem_cache_free(btrfs_transaction_cachep, cur_trans); +	*/ +} + +static int btrfs_cleanup_transaction(struct btrfs_root *root) +{ +	struct btrfs_transaction *t; + +	mutex_lock(&root->fs_info->transaction_kthread_mutex); + +	spin_lock(&root->fs_info->trans_lock); +	while (!list_empty(&root->fs_info->trans_list)) { +		t = list_first_entry(&root->fs_info->trans_list, +				     struct btrfs_transaction, list); +		if (t->state >= TRANS_STATE_COMMIT_START) { +			atomic_inc(&t->use_count); +			spin_unlock(&root->fs_info->trans_lock); +			btrfs_wait_for_commit(root, t->transid); +			btrfs_put_transaction(t); +			spin_lock(&root->fs_info->trans_lock); +			continue; +		} +		if (t == root->fs_info->running_transaction) { +			t->state = TRANS_STATE_COMMIT_DOING; +			spin_unlock(&root->fs_info->trans_lock); +			/* +			 * We wait for 0 num_writers since we don't hold a trans +			 * handle open currently for this transaction. +			 */ +			wait_event(t->writer_wait, +				   atomic_read(&t->num_writers) == 0); +		} else { +			spin_unlock(&root->fs_info->trans_lock); +		} +		btrfs_cleanup_one_transaction(t, root); + +		spin_lock(&root->fs_info->trans_lock); +		if (t == root->fs_info->running_transaction) +			root->fs_info->running_transaction = NULL; +		list_del_init(&t->list); +		spin_unlock(&root->fs_info->trans_lock); + +		btrfs_put_transaction(t); +		trace_btrfs_transaction_commit(root); +		spin_lock(&root->fs_info->trans_lock); +	} +	spin_unlock(&root->fs_info->trans_lock); +	btrfs_destroy_all_ordered_extents(root->fs_info); +	btrfs_destroy_delayed_inodes(root); +	btrfs_assert_delayed_root_empty(root); +	btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); +	btrfs_destroy_all_delalloc_inodes(root->fs_info); +	mutex_unlock(&root->fs_info->transaction_kthread_mutex); +  	return 0;  }  static struct extent_io_ops btree_extent_io_ops = { -	.write_cache_pages_lock_hook = btree_lock_page_hook,  	.readpage_end_io_hook = btree_readpage_end_io_hook, +	.readpage_io_failed_hook = btree_io_failed_hook,  	.submit_bio_hook = btree_submit_bio_hook,  	/* note we're sharing with inode.c for the merge bio hook */  	.merge_bio_hook = btrfs_merge_bio_hook,  | 
