aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c850
1 files changed, 599 insertions, 251 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0be7a8742a4..2b0a627cb5f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -104,7 +104,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) {
- printk(KERN_INFO "Old style space inode found, converting.\n");
+ btrfs_info(root->fs_info,
+ "Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR;
@@ -119,9 +120,10 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
return inode;
}
-int __create_free_space_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u64 ino, u64 offset)
+static int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ u64 ino, u64 offset)
{
struct btrfs_key key;
struct btrfs_disk_key disk_key;
@@ -195,34 +197,33 @@ int create_free_space_inode(struct btrfs_root *root,
block_group->key.objectid);
}
-int btrfs_truncate_free_space_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct inode *inode)
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv)
{
- struct btrfs_block_rsv *rsv;
u64 needed_bytes;
- loff_t oldsize;
- int ret = 0;
-
- rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->global_block_rsv;
+ int ret;
/* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
btrfs_calc_trans_metadata_size(root, 1);
- spin_lock(&trans->block_rsv->lock);
- if (trans->block_rsv->reserved < needed_bytes) {
- spin_unlock(&trans->block_rsv->lock);
- trans->block_rsv = rsv;
- return -ENOSPC;
- }
- spin_unlock(&trans->block_rsv->lock);
+ spin_lock(&rsv->lock);
+ if (rsv->reserved < needed_bytes)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&rsv->lock);
+ return ret;
+}
+
+int btrfs_truncate_free_space_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct inode *inode)
+{
+ int ret = 0;
- oldsize = i_size_read(inode);
btrfs_i_size_write(inode, 0);
- truncate_pagecache(inode, oldsize, 0);
+ truncate_pagecache(inode, 0);
/*
* We don't need an orphan item because truncating the free space cache
@@ -230,9 +231,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
*/
ret = btrfs_truncate_inode_items(trans, root, inode,
0, BTRFS_EXTENT_DATA_KEY);
-
if (ret) {
- trans->block_rsv = rsv;
btrfs_abort_transaction(trans, root, ret);
return ret;
}
@@ -240,7 +239,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
ret = btrfs_update_inode(trans, root, inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
- trans->block_rsv = rsv;
return ret;
}
@@ -276,18 +274,32 @@ struct io_ctl {
};
static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
- struct btrfs_root *root)
+ struct btrfs_root *root, int write)
{
+ int num_pages;
+ int check_crcs = 0;
+
+ num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+
+ if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
+ check_crcs = 1;
+
+ /* Make sure we can fit our crcs into the first page */
+ if (write && check_crcs &&
+ (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+ return -ENOSPC;
+
memset(io_ctl, 0, sizeof(struct io_ctl));
- io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
- GFP_NOFS);
+
+ io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
if (!io_ctl->pages)
return -ENOMEM;
+
+ io_ctl->num_pages = num_pages;
io_ctl->root = root;
- if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
- io_ctl->check_crcs = 1;
+ io_ctl->check_crcs = check_crcs;
+
return 0;
}
@@ -307,7 +319,7 @@ static void io_ctl_unmap_page(struct io_ctl *io_ctl)
static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
{
- BUG_ON(io_ctl->index >= io_ctl->num_pages);
+ ASSERT(io_ctl->index < io_ctl->num_pages);
io_ctl->page = io_ctl->pages[io_ctl->index++];
io_ctl->cur = kmap(io_ctl->page);
io_ctl->orig = io_ctl->cur;
@@ -349,8 +361,8 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
btrfs_readpage(NULL, page);
lock_page(page);
if (!PageUptodate(page)) {
- printk(KERN_ERR "btrfs: error reading free "
- "space cache\n");
+ btrfs_err(BTRFS_I(inode)->root->fs_info,
+ "error reading free space cache");
io_ctl_drop_pages(io_ctl);
return -EIO;
}
@@ -407,7 +419,7 @@ static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
gen = io_ctl->cur;
if (le64_to_cpu(*gen) != generation) {
- printk_ratelimited(KERN_ERR "btrfs: space cache generation "
+ printk_ratelimited(KERN_ERR "BTRFS: space cache generation "
"(%Lu) does not match inode (%Lu)\n", *gen,
generation);
io_ctl_unmap_page(io_ctl);
@@ -431,7 +443,7 @@ static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
- crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+ crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_CACHE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
io_ctl_unmap_page(io_ctl);
@@ -461,11 +473,11 @@ static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
kunmap(io_ctl->pages[0]);
io_ctl_map_page(io_ctl, 0);
- crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+ crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_CACHE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
if (val != crc) {
- printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
+ printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free "
"space cache\n");
io_ctl_unmap_page(io_ctl);
return -EIO;
@@ -624,9 +636,9 @@ next:
spin_unlock(&ctl->tree_lock);
}
-int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
- struct btrfs_free_space_ctl *ctl,
- struct btrfs_path *path, u64 offset)
+static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_path *path, u64 offset)
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
@@ -668,18 +680,25 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
generation = btrfs_free_space_generation(leaf, header);
btrfs_release_path(path);
+ if (!BTRFS_I(inode)->generation) {
+ btrfs_info(root->fs_info,
+ "The free space cache file (%llu) is invalid. skip it\n",
+ offset);
+ return 0;
+ }
+
if (BTRFS_I(inode)->generation != generation) {
- printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
- " not match free space cache generation (%llu)\n",
- (unsigned long long)BTRFS_I(inode)->generation,
- (unsigned long long)generation);
+ btrfs_err(root->fs_info,
+ "free space inode generation (%llu) "
+ "did not match free space cache generation (%llu)",
+ BTRFS_I(inode)->generation, generation);
return 0;
}
if (!num_entries)
return 0;
- ret = io_ctl_init(&io_ctl, inode, root);
+ ret = io_ctl_init(&io_ctl, inode, root, 0);
if (ret)
return ret;
@@ -721,13 +740,13 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_ERR "Duplicate entries in "
- "free space cache, dumping\n");
+ btrfs_err(root->fs_info,
+ "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
} else {
- BUG_ON(!num_bitmaps);
+ ASSERT(num_bitmaps);
num_bitmaps--;
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) {
@@ -741,8 +760,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_ERR "Duplicate entries in "
- "free space cache, dumping\n");
+ btrfs_err(root->fs_info,
+ "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
@@ -833,8 +852,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
if (!matched) {
__btrfs_remove_free_space_cache(ctl);
- printk(KERN_ERR "block group %llu has an wrong amount of free "
- "space\n", block_group->key.objectid);
+ btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
+ block_group->key.objectid);
ret = -1;
}
out:
@@ -845,100 +864,52 @@ out:
spin_unlock(&block_group->lock);
ret = 0;
- printk(KERN_ERR "btrfs: failed to load free space cache "
- "for block group %llu\n", block_group->key.objectid);
+ btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
+ block_group->key.objectid);
}
iput(inode);
return ret;
}
-/**
- * __btrfs_write_out_cache - write out cached info to an inode
- * @root - the root the inode belongs to
- * @ctl - the free space cache we are going to write out
- * @block_group - the block_group for this cache if it belongs to a block_group
- * @trans - the trans handle
- * @path - the path to use
- * @offset - the offset for the key we'll insert
- *
- * This function writes out a free space cache struct to disk for quick recovery
- * on mount. This will return 0 if it was successfull in writing the cache out,
- * and -1 if it was not.
- */
-int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
- struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u64 offset)
+static noinline_for_stack
+int write_cache_extent_entries(struct io_ctl *io_ctl,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_block_group_cache *block_group,
+ int *entries, int *bitmaps,
+ struct list_head *bitmap_list)
{
- struct btrfs_free_space_header *header;
- struct extent_buffer *leaf;
- struct rb_node *node;
- struct list_head *pos, *n;
- struct extent_state *cached_state = NULL;
- struct btrfs_free_cluster *cluster = NULL;
- struct extent_io_tree *unpin = NULL;
- struct io_ctl io_ctl;
- struct list_head bitmap_list;
- struct btrfs_key key;
- u64 start, extent_start, extent_end, len;
- int entries = 0;
- int bitmaps = 0;
int ret;
- int err = -1;
-
- INIT_LIST_HEAD(&bitmap_list);
-
- if (!i_size_read(inode))
- return -1;
-
- ret = io_ctl_init(&io_ctl, inode, root);
- if (ret)
- return -1;
+ struct btrfs_free_cluster *cluster = NULL;
+ struct rb_node *node = rb_first(&ctl->free_space_offset);
/* Get the cluster for this block_group if it exists */
- if (block_group && !list_empty(&block_group->cluster_list))
+ if (block_group && !list_empty(&block_group->cluster_list)) {
cluster = list_entry(block_group->cluster_list.next,
struct btrfs_free_cluster,
block_group_list);
+ }
- /* Lock all pages first so we can lock the extent safely. */
- io_ctl_prepare_pages(&io_ctl, inode, 0);
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- 0, &cached_state);
-
- node = rb_first(&ctl->free_space_offset);
if (!node && cluster) {
node = rb_first(&cluster->root);
cluster = NULL;
}
- /* Make sure we can fit our crcs into the first page */
- if (io_ctl.check_crcs &&
- (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
- WARN_ON(1);
- goto out_nospc;
- }
-
- io_ctl_set_generation(&io_ctl, trans->transid);
-
/* Write out the extent entries */
while (node) {
struct btrfs_free_space *e;
e = rb_entry(node, struct btrfs_free_space, offset_index);
- entries++;
+ *entries += 1;
- ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
+ ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
e->bitmap);
if (ret)
- goto out_nospc;
+ goto fail;
if (e->bitmap) {
- list_add_tail(&e->list, &bitmap_list);
- bitmaps++;
+ list_add_tail(&e->list, bitmap_list);
+ *bitmaps += 1;
}
node = rb_next(node);
if (!node && cluster) {
@@ -946,131 +917,289 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
cluster = NULL;
}
}
+ return 0;
+fail:
+ return -ENOSPC;
+}
+
+static noinline_for_stack int
+update_cache_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_path *path, u64 offset,
+ int entries, int bitmaps)
+{
+ struct btrfs_key key;
+ struct btrfs_free_space_header *header;
+ struct extent_buffer *leaf;
+ int ret;
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+ key.offset = offset;
+ key.type = 0;
+
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret < 0) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
+ GFP_NOFS);
+ goto fail;
+ }
+ leaf = path->nodes[0];
+ if (ret > 0) {
+ struct btrfs_key found_key;
+ ASSERT(path->slots[0]);
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
+ found_key.offset != offset) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
+ inode->i_size - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
+ NULL, GFP_NOFS);
+ btrfs_release_path(path);
+ goto fail;
+ }
+ }
+
+ BTRFS_I(inode)->generation = trans->transid;
+ header = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_free_space_header);
+ btrfs_set_free_space_entries(leaf, header, entries);
+ btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
+ btrfs_set_free_space_generation(leaf, header, trans->transid);
+ btrfs_mark_buffer_dirty(leaf);
+ btrfs_release_path(path);
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+static noinline_for_stack int
+write_pinned_extent_entries(struct btrfs_root *root,
+ struct btrfs_block_group_cache *block_group,
+ struct io_ctl *io_ctl,
+ int *entries)
+{
+ u64 start, extent_start, extent_end, len;
+ struct extent_io_tree *unpin = NULL;
+ int ret;
+
+ if (!block_group)
+ return 0;
/*
* We want to add any pinned extents to our free space cache
* so we don't leak the space
- */
-
- /*
+ *
* We shouldn't have switched the pinned extents yet so this is the
* right one
*/
unpin = root->fs_info->pinned_extents;
- if (block_group)
- start = block_group->key.objectid;
+ start = block_group->key.objectid;
- while (block_group && (start < block_group->key.objectid +
- block_group->key.offset)) {
+ while (start < block_group->key.objectid + block_group->key.offset) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL);
- if (ret) {
- ret = 0;
- break;
- }
+ if (ret)
+ return 0;
/* This pinned extent is out of our range */
if (extent_start >= block_group->key.objectid +
block_group->key.offset)
- break;
+ return 0;
extent_start = max(extent_start, start);
extent_end = min(block_group->key.objectid +
block_group->key.offset, extent_end + 1);
len = extent_end - extent_start;
- entries++;
- ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
+ *entries += 1;
+ ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
if (ret)
- goto out_nospc;
+ return -ENOSPC;
start = extent_end;
}
+ return 0;
+}
+
+static noinline_for_stack int
+write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
+{
+ struct list_head *pos, *n;
+ int ret;
+
/* Write out the bitmaps */
- list_for_each_safe(pos, n, &bitmap_list) {
+ list_for_each_safe(pos, n, bitmap_list) {
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
- ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
+ ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
if (ret)
- goto out_nospc;
+ return -ENOSPC;
list_del_init(&entry->list);
}
- /* Zero out the rest of the pages just to make sure */
- io_ctl_zero_remaining_pages(&io_ctl);
+ return 0;
+}
- ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
- 0, i_size_read(inode), &cached_state);
- io_ctl_drop_pages(&io_ctl);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+static int flush_dirty_cache(struct inode *inode)
+{
+ int ret;
+ ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret)
- goto out;
-
-
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
-
- key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = offset;
- key.type = 0;
-
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
GFP_NOFS);
- goto out;
+
+ return ret;
+}
+
+static void noinline_for_stack
+cleanup_write_cache_enospc(struct inode *inode,
+ struct io_ctl *io_ctl,
+ struct extent_state **cached_state,
+ struct list_head *bitmap_list)
+{
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, bitmap_list) {
+ struct btrfs_free_space *entry =
+ list_entry(pos, struct btrfs_free_space, list);
+ list_del_init(&entry->list);
}
- leaf = path->nodes[0];
- if (ret > 0) {
- struct btrfs_key found_key;
- BUG_ON(!path->slots[0]);
- path->slots[0]--;
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
- found_key.offset != offset) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
- inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
- NULL, GFP_NOFS);
- btrfs_release_path(path);
+ io_ctl_drop_pages(io_ctl);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, cached_state,
+ GFP_NOFS);
+}
+
+/**
+ * __btrfs_write_out_cache - write out cached info to an inode
+ * @root - the root the inode belongs to
+ * @ctl - the free space cache we are going to write out
+ * @block_group - the block_group for this cache if it belongs to a block_group
+ * @trans - the trans handle
+ * @path - the path to use
+ * @offset - the offset for the key we'll insert
+ *
+ * This function writes out a free space cache struct to disk for quick recovery
+ * on mount. This will return 0 if it was successfull in writing the cache out,
+ * and -1 if it was not.
+ */
+static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 offset)
+{
+ struct extent_state *cached_state = NULL;
+ struct io_ctl io_ctl;
+ LIST_HEAD(bitmap_list);
+ int entries = 0;
+ int bitmaps = 0;
+ int ret;
+
+ if (!i_size_read(inode))
+ return -1;
+
+ ret = io_ctl_init(&io_ctl, inode, root, 1);
+ if (ret)
+ return -1;
+
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
+ down_write(&block_group->data_rwsem);
+ spin_lock(&block_group->lock);
+ if (block_group->delalloc_bytes) {
+ block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+ spin_unlock(&block_group->lock);
+ up_write(&block_group->data_rwsem);
+ BTRFS_I(inode)->generation = 0;
+ ret = 0;
goto out;
}
+ spin_unlock(&block_group->lock);
}
- BTRFS_I(inode)->generation = trans->transid;
- header = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_free_space_header);
- btrfs_set_free_space_entries(leaf, header, entries);
- btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
- btrfs_set_free_space_generation(leaf, header, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(path);
+ /* Lock all pages first so we can lock the extent safely. */
+ io_ctl_prepare_pages(&io_ctl, inode, 0);
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ 0, &cached_state);
+
+ io_ctl_set_generation(&io_ctl, trans->transid);
+
+ /* Write out the extent entries in the free space cache */
+ ret = write_cache_extent_entries(&io_ctl, ctl,
+ block_group, &entries, &bitmaps,
+ &bitmap_list);
+ if (ret)
+ goto out_nospc;
+
+ /*
+ * Some spaces that are freed in the current transaction are pinned,
+ * they will be added into free space cache after the transaction is
+ * committed, we shouldn't lose them.
+ */
+ ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
+ if (ret)
+ goto out_nospc;
- err = 0;
+ /* At last, we write out all the bitmaps. */
+ ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+ if (ret)
+ goto out_nospc;
+
+ /* Zero out the rest of the pages just to make sure */
+ io_ctl_zero_remaining_pages(&io_ctl);
+
+ /* Everything is written out, now we dirty the pages in the file. */
+ ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+ 0, i_size_read(inode), &cached_state);
+ if (ret)
+ goto out_nospc;
+
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ up_write(&block_group->data_rwsem);
+ /*
+ * Release the pages and unlock the extent, we will flush
+ * them out later
+ */
+ io_ctl_drop_pages(&io_ctl);
+
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+
+ /* Flush the dirty pages in the cache file. */
+ ret = flush_dirty_cache(inode);
+ if (ret)
+ goto out;
+
+ /* Update the cache item to tell everyone this cache file is valid. */
+ ret = update_cache_item(trans, root, inode, path, offset,
+ entries, bitmaps);
out:
io_ctl_free(&io_ctl);
- if (err) {
+ if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
btrfs_update_inode(trans, root, inode);
- return err;
+ return ret;
out_nospc:
- list_for_each_safe(pos, n, &bitmap_list) {
- struct btrfs_free_space *entry =
- list_entry(pos, struct btrfs_free_space, list);
- list_del_init(&entry->list);
- }
- io_ctl_drop_pages(&io_ctl);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
+
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ up_write(&block_group->data_rwsem);
+
goto out;
}
@@ -1090,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
spin_unlock(&block_group->lock);
return 0;
}
+
+ if (block_group->delalloc_bytes) {
+ block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
spin_unlock(&block_group->lock);
inode = lookup_free_space_inode(root, block_group, path);
@@ -1104,8 +1239,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
spin_unlock(&block_group->lock);
ret = 0;
#ifdef DEBUG
- printk(KERN_ERR "btrfs: failed to write free space cache "
- "for block group %llu\n", block_group->key.objectid);
+ btrfs_err(root->fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
}
@@ -1116,7 +1252,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
u64 offset)
{
- BUG_ON(offset < bitmap_start);
+ ASSERT(offset >= bitmap_start);
offset -= bitmap_start;
return (unsigned long)(div_u64(offset, unit));
}
@@ -1271,7 +1407,7 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
if (n) {
entry = rb_entry(n, struct btrfs_free_space,
offset_index);
- BUG_ON(entry->offset > offset);
+ ASSERT(entry->offset <= offset);
} else {
if (fuzzy)
return entry;
@@ -1335,7 +1471,7 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
{
int ret = 0;
- BUG_ON(!info->bitmap && !info->bytes);
+ ASSERT(info->bytes || info->bitmap);
ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
&info->offset_index, (info->bitmap != NULL));
if (ret)
@@ -1356,7 +1492,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
- BUG_ON(ctl->total_bitmaps > max_bitmaps);
+ max_bitmaps = max(max_bitmaps, 1);
+
+ ASSERT(ctl->total_bitmaps <= max_bitmaps);
/*
* The goal is to keep the total amount of memory used per 1gb of space
@@ -1400,7 +1538,7 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
start = offset_to_bit(info->offset, ctl->unit, offset);
count = bytes_to_bits(bytes, ctl->unit);
- BUG_ON(start + count > BITS_PER_BITMAP);
+ ASSERT(start + count <= BITS_PER_BITMAP);
bitmap_clear(info->bitmap, start, count);
@@ -1423,7 +1561,7 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
start = offset_to_bit(info->offset, ctl->unit, offset);
count = bytes_to_bits(bytes, ctl->unit);
- BUG_ON(start + count > BITS_PER_BITMAP);
+ ASSERT(start + count <= BITS_PER_BITMAP);
bitmap_set(info->bitmap, start, count);
@@ -1431,13 +1569,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
ctl->free_space += bytes;
}
+/*
+ * If we can not find suitable extent, we will use bytes to record
+ * the size of the max extent.
+ */
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes)
{
unsigned long found_bits = 0;
+ unsigned long max_bits = 0;
unsigned long bits, i;
unsigned long next_zero;
+ unsigned long extent_bits;
i = offset_to_bit(bitmap_info->offset, ctl->unit,
max_t(u64, *offset, bitmap_info->offset));
@@ -1446,9 +1590,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
next_zero = find_next_zero_bit(bitmap_info->bitmap,
BITS_PER_BITMAP, i);
- if ((next_zero - i) >= bits) {
- found_bits = next_zero - i;
+ extent_bits = next_zero - i;
+ if (extent_bits >= bits) {
+ found_bits = extent_bits;
break;
+ } else if (extent_bits > max_bits) {
+ max_bits = extent_bits;
}
i = next_zero;
}
@@ -1459,40 +1606,74 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
return 0;
}
+ *bytes = (u64)(max_bits) * ctl->unit;
return -1;
}
+/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
-find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ unsigned long align, u64 *max_extent_size)
{
struct btrfs_free_space *entry;
struct rb_node *node;
+ u64 tmp;
+ u64 align_off;
int ret;
if (!ctl->free_space_offset.rb_node)
- return NULL;
+ goto out;
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
if (!entry)
- return NULL;
+ goto out;
for (node = &entry->offset_index; node; node = rb_next(node)) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- if (entry->bytes < *bytes)
+ if (entry->bytes < *bytes) {
+ if (entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
+ continue;
+ }
+
+ /* make sure the space returned is big enough
+ * to match our requested alignment
+ */
+ if (*bytes >= align) {
+ tmp = entry->offset - ctl->start + align - 1;
+ do_div(tmp, align);
+ tmp = tmp * align + ctl->start;
+ align_off = tmp - entry->offset;
+ } else {
+ align_off = 0;
+ tmp = entry->offset;
+ }
+
+ if (entry->bytes < *bytes + align_off) {
+ if (entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
continue;
+ }
if (entry->bitmap) {
- ret = search_bitmap(ctl, entry, offset, bytes);
- if (!ret)
+ u64 size = *bytes;
+
+ ret = search_bitmap(ctl, entry, &tmp, &size);
+ if (!ret) {
+ *offset = tmp;
+ *bytes = size;
return entry;
+ } else if (size > *max_extent_size) {
+ *max_extent_size = size;
+ }
continue;
}
- *offset = entry->offset;
- *bytes = entry->bytes;
+ *offset = tmp;
+ *bytes = entry->bytes - align_off;
return entry;
}
-
+out:
return NULL;
}
@@ -1539,7 +1720,8 @@ again:
search_bytes = ctl->unit;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
- BUG_ON(ret < 0 || search_start != *offset);
+ if (ret < 0 || search_start != *offset)
+ return -EINVAL;
/* We may have found more bits than what we need */
search_bytes = min(search_bytes, *bytes);
@@ -1636,10 +1818,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
}
/*
- * some block groups are so tiny they can't be enveloped by a bitmap, so
- * don't even bother to create a bitmap for this
+ * The original block groups from mkfs can be really small, like 8
+ * megabytes, so don't bother with a bitmap for those entries. However
+ * some block groups can be smaller than what a bitmap would cover but
+ * are still large enough that they could overflow the 32k memory limit,
+ * so allow those block groups to still be allowed to have a bitmap
+ * entry.
*/
- if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
+ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
return false;
return true;
@@ -1711,7 +1897,7 @@ no_cluster_bitmap:
bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!bitmap_info) {
- BUG_ON(added);
+ ASSERT(added == 0);
goto new_bitmap;
}
@@ -1850,8 +2036,8 @@ out:
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
- BUG_ON(ret == -EEXIST);
+ printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
+ ASSERT(ret != -EEXIST);
}
return ret;
@@ -1941,7 +2127,6 @@ again:
re_search = true;
goto again;
}
- BUG_ON(ret); /* logic error */
out_lock:
spin_unlock(&ctl->tree_lock);
out:
@@ -1960,15 +2145,15 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
count++;
- printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
- (unsigned long long)info->offset,
- (unsigned long long)info->bytes,
+ btrfs_crit(block_group->fs_info,
+ "entry offset %llu, bytes %llu, bitmap %s",
+ info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
- printk(KERN_INFO "block group has cluster?: %s\n",
+ btrfs_info(block_group->fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
- printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
- "\n", count);
+ btrfs_info(block_group->fs_info,
+ "%d blocks of free space at or bigger than bytes is", count);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
@@ -2035,7 +2220,8 @@ out:
return 0;
}
-void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
+static void __btrfs_remove_free_space_cache_locked(
+ struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
struct rb_node *node;
@@ -2089,15 +2275,19 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
}
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes, u64 empty_size)
+ u64 offset, u64 bytes, u64 empty_size,
+ u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size;
u64 ret = 0;
+ u64 align_gap = 0;
+ u64 align_gap_len = 0;
spin_lock(&ctl->tree_lock);
- entry = find_free_space(ctl, &offset, &bytes_search);
+ entry = find_free_space(ctl, &offset, &bytes_search,
+ block_group->full_stripe_len, max_extent_size);
if (!entry)
goto out;
@@ -2108,17 +2298,23 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
free_bitmap(ctl, entry);
} else {
unlink_free_space(ctl, entry);
- entry->offset += bytes;
- entry->bytes -= bytes;
+ align_gap_len = offset - entry->offset;
+ align_gap = entry->offset;
+
+ entry->offset = offset + bytes;
+ WARN_ON(entry->bytes < bytes + align_gap_len);
+
+ entry->bytes -= bytes + align_gap_len;
if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry);
else
link_free_space(ctl, entry);
}
-
out:
spin_unlock(&ctl->tree_lock);
+ if (align_gap_len)
+ __btrfs_add_free_space(ctl, align_gap, align_gap_len);
return ret;
}
@@ -2168,7 +2364,8 @@ int btrfs_return_cluster_to_free_space(
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
struct btrfs_free_space *entry,
- u64 bytes, u64 min_start)
+ u64 bytes, u64 min_start,
+ u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int err;
@@ -2180,8 +2377,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
search_bytes = bytes;
err = search_bitmap(ctl, entry, &search_start, &search_bytes);
- if (err)
+ if (err) {
+ if (search_bytes > *max_extent_size)
+ *max_extent_size = search_bytes;
return 0;
+ }
ret = search_start;
__bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2196,7 +2396,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
*/
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
- u64 min_start)
+ u64 min_start, u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
@@ -2215,7 +2415,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
goto out;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- while(1) {
+ while (1) {
+ if (entry->bytes < bytes && entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
+
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
node = rb_next(&entry->offset_index);
@@ -2229,7 +2432,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
if (entry->bitmap) {
ret = btrfs_alloc_from_bitmap(block_group,
cluster, entry, bytes,
- cluster->window_start);
+ cluster->window_start,
+ max_extent_size);
if (ret == 0) {
node = rb_next(&entry->offset_index);
if (!node)
@@ -2329,7 +2533,7 @@ again:
rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 1);
- BUG_ON(ret); /* -EEXIST; Logic error */
+ ASSERT(!ret); /* -EEXIST; Logic error */
trace_btrfs_setup_cluster(block_group, cluster,
total_found * ctl->unit, 1);
@@ -2352,7 +2556,6 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry = NULL;
struct btrfs_free_space *last;
struct rb_node *node;
- u64 window_start;
u64 window_free;
u64 max_extent;
u64 total_size = 0;
@@ -2374,7 +2577,6 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
entry = rb_entry(node, struct btrfs_free_space, offset_index);
}
- window_start = entry->offset;
window_free = entry->bytes;
max_extent = entry->bytes;
first = entry;
@@ -2422,7 +2624,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
total_size += entry->bytes;
- BUG_ON(ret); /* -EEXIST; Logic error */
+ ASSERT(!ret); /* -EEXIST; Logic error */
} while (node && entry != last);
cluster->max_size = max_extent;
@@ -2483,8 +2685,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
-int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
@@ -2814,7 +3015,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
ret = search_bitmap(ctl, entry, &offset, &count);
/* Logic error; Should be empty if it can't find anything */
- BUG_ON(ret);
+ ASSERT(!ret);
ino = offset;
bitmap_clear_bits(ctl, entry, offset, 1);
@@ -2891,8 +3092,9 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
ret = __load_free_space_cache(root, inode, ctl, path, 0);
if (ret < 0)
- printk(KERN_ERR "btrfs: failed to load free ino cache for "
- "root %llu\n", root->root_key.objectid);
+ btrfs_err(fs_info,
+ "failed to load free ino cache for root %llu",
+ root->root_key.objectid);
out_put:
iput(inode);
out:
@@ -2902,28 +3104,174 @@ out:
int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ struct inode *inode)
{
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct inode *inode;
int ret;
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
return 0;
- inode = lookup_free_ino_inode(root, path);
- if (IS_ERR(inode))
- return 0;
-
ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
if (ret) {
btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
- printk(KERN_ERR "btrfs: failed to write free ino cache "
- "for root %llu\n", root->root_key.objectid);
+ btrfs_err(root->fs_info,
+ "failed to write free ino cache for root %llu",
+ root->root_key.objectid);
#endif
}
- iput(inode);
return ret;
}
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+/*
+ * Use this if you need to make a bitmap or extent entry specifically, it
+ * doesn't do any of the merging that add_free_space does, this acts a lot like
+ * how the free space cache loading stuff works, so you can get really weird
+ * configurations.
+ */
+int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
+ u64 offset, u64 bytes, bool bitmap)
+{
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *info = NULL, *bitmap_info;
+ void *map = NULL;
+ u64 bytes_added;
+ int ret;
+
+again:
+ if (!info) {
+ info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
+ if (!info)
+ return -ENOMEM;
+ }
+
+ if (!bitmap) {
+ spin_lock(&ctl->tree_lock);
+ info->offset = offset;
+ info->bytes = bytes;
+ ret = link_free_space(ctl, info);
+ spin_unlock(&ctl->tree_lock);
+ if (ret)
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return ret;
+ }
+
+ if (!map) {
+ map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ if (!map) {
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock(&ctl->tree_lock);
+ bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+ 1, 0);
+ if (!bitmap_info) {
+ info->bitmap = map;
+ map = NULL;
+ add_new_bitmap(ctl, info, offset);
+ bitmap_info = info;
+ }
+
+ bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ spin_unlock(&ctl->tree_lock);
+
+ if (bytes)
+ goto again;
+
+ if (map)
+ kfree(map);
+ return 0;
+}
+
+/*
+ * Checks to see if the given range is in the free space cache. This is really
+ * just used to check the absence of space, so if there is free space in the
+ * range at all we will return 1.
+ */
+int test_check_exists(struct btrfs_block_group_cache *cache,
+ u64 offset, u64 bytes)
+{
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *info;
+ int ret = 0;
+
+ spin_lock(&ctl->tree_lock);
+ info = tree_search_offset(ctl, offset, 0, 0);
+ if (!info) {
+ info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+ 1, 0);
+ if (!info)
+ goto out;
+ }
+
+have_info:
+ if (info->bitmap) {
+ u64 bit_off, bit_bytes;
+ struct rb_node *n;
+ struct btrfs_free_space *tmp;
+
+ bit_off = offset;
+ bit_bytes = ctl->unit;
+ ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
+ if (!ret) {
+ if (bit_off == offset) {
+ ret = 1;
+ goto out;
+ } else if (bit_off > offset &&
+ offset + bytes > bit_off) {
+ ret = 1;
+ goto out;
+ }
+ }
+
+ n = rb_prev(&info->offset_index);
+ while (n) {
+ tmp = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (tmp->offset + tmp->bytes < offset)
+ break;
+ if (offset + bytes < tmp->offset) {
+ n = rb_prev(&info->offset_index);
+ continue;
+ }
+ info = tmp;
+ goto have_info;
+ }
+
+ n = rb_next(&info->offset_index);
+ while (n) {
+ tmp = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (offset + bytes < tmp->offset)
+ break;
+ if (tmp->offset + tmp->bytes < offset) {
+ n = rb_next(&info->offset_index);
+ continue;
+ }
+ info = tmp;
+ goto have_info;
+ }
+
+ goto out;
+ }
+
+ if (info->offset == offset) {
+ ret = 1;
+ goto out;
+ }
+
+ if (offset > info->offset && offset < info->offset + info->bytes)
+ ret = 1;
+out:
+ spin_unlock(&ctl->tree_lock);
+ return ret;
+}
+#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */