aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c675
1 files changed, 400 insertions, 275 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e807b143b85..6d16bea94e1 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -23,6 +23,10 @@
#include "delayed-ref.h"
#include "transaction.h"
+struct kmem_cache *btrfs_delayed_ref_head_cachep;
+struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* delayed back reference update tracking. For subvolume trees
* we queue up extent allocations and backref maintenance for
@@ -36,9 +40,9 @@
* compare two delayed tree backrefs with same bytenr and type
*/
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
- struct btrfs_delayed_tree_ref *ref1)
+ struct btrfs_delayed_tree_ref *ref1, int type)
{
- if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
if (ref1->root < ref2->root)
return -1;
if (ref1->root > ref2->root)
@@ -85,7 +89,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
* type of the delayed backrefs and content of delayed backrefs.
*/
static int comp_entry(struct btrfs_delayed_ref_node *ref2,
- struct btrfs_delayed_ref_node *ref1)
+ struct btrfs_delayed_ref_node *ref1,
+ bool compare_seq)
{
if (ref1->bytenr < ref2->bytenr)
return -1;
@@ -101,10 +106,22 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
return -1;
if (ref1->type > ref2->type)
return 1;
+ if (ref1->no_quota > ref2->no_quota)
+ return 1;
+ if (ref1->no_quota < ref2->no_quota)
+ return -1;
+ /* merging of sequenced refs is not allowed */
+ if (compare_seq) {
+ if (ref1->seq < ref2->seq)
+ return -1;
+ if (ref1->seq > ref2->seq)
+ return 1;
+ }
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
- btrfs_delayed_node_to_tree_ref(ref1));
+ btrfs_delayed_node_to_tree_ref(ref1),
+ ref1->type);
} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
@@ -134,7 +151,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
rb_node);
- cmp = comp_entry(entry, ins);
+ cmp = comp_entry(entry, ins, 1);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
@@ -148,40 +165,72 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
return NULL;
}
+/* insert a new ref to head ref rbtree */
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+ struct rb_node *node)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent_node = NULL;
+ struct btrfs_delayed_ref_head *entry;
+ struct btrfs_delayed_ref_head *ins;
+ u64 bytenr;
+
+ ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
+ bytenr = ins->node.bytenr;
+ while (*p) {
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
+ href_node);
+
+ if (bytenr < entry->node.bytenr)
+ p = &(*p)->rb_left;
+ else if (bytenr > entry->node.bytenr)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(node, parent_node, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
/*
* find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot
+ * head if it was able to find one, or NULL if nothing was in that spot.
+ * If return_bigger is given, the next bigger entry is returned if no exact
+ * match is found.
*/
-static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
- u64 bytenr,
- struct btrfs_delayed_ref_node **last)
+static struct btrfs_delayed_ref_head *
+find_ref_head(struct rb_root *root, u64 bytenr,
+ int return_bigger)
{
- struct rb_node *n = root->rb_node;
- struct btrfs_delayed_ref_node *entry;
- int cmp;
+ struct rb_node *n;
+ struct btrfs_delayed_ref_head *entry;
+ n = root->rb_node;
+ entry = NULL;
while (n) {
- entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
- WARN_ON(!entry->in_tree);
- if (last)
- *last = entry;
-
- if (bytenr < entry->bytenr)
- cmp = -1;
- else if (bytenr > entry->bytenr)
- cmp = 1;
- else if (!btrfs_delayed_ref_is_head(entry))
- cmp = 1;
- else
- cmp = 0;
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
- if (cmp < 0)
+ if (bytenr < entry->node.bytenr)
n = n->rb_left;
- else if (cmp > 0)
+ else if (bytenr > entry->node.bytenr)
n = n->rb_right;
else
return entry;
}
+ if (entry && return_bigger) {
+ if (bytenr > entry->node.bytenr) {
+ n = rb_next(&entry->href_node);
+ if (!n)
+ n = rb_first(root);
+ entry = rb_entry(n, struct btrfs_delayed_ref_head,
+ href_node);
+ return entry;
+ }
+ return entry;
+ }
return NULL;
}
@@ -209,115 +258,190 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
- struct list_head *cluster, u64 start)
+static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *ref)
{
- int count = 0;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct rb_node *node;
- struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_ref_head *head;
-
- delayed_refs = &trans->transaction->delayed_refs;
- if (start == 0) {
- node = rb_first(&delayed_refs->root);
+ if (btrfs_delayed_ref_is_head(ref)) {
+ head = btrfs_delayed_node_to_head(ref);
+ rb_erase(&head->href_node, &delayed_refs->href_root);
} else {
- ref = NULL;
- find_ref_head(&delayed_refs->root, start, &ref);
- if (ref) {
- struct btrfs_delayed_ref_node *tmp;
-
- node = rb_prev(&ref->rb_node);
- while (node) {
- tmp = rb_entry(node,
- struct btrfs_delayed_ref_node,
- rb_node);
- if (tmp->bytenr < start)
- break;
- ref = tmp;
- node = rb_prev(&ref->rb_node);
- }
- node = &ref->rb_node;
- } else
- node = rb_first(&delayed_refs->root);
+ assert_spin_locked(&head->lock);
+ rb_erase(&ref->rb_node, &head->ref_root);
}
-again:
- while (node && count < 32) {
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
- if (btrfs_delayed_ref_is_head(ref)) {
- head = btrfs_delayed_node_to_head(ref);
- if (list_empty(&head->cluster)) {
- list_add_tail(&head->cluster, cluster);
- delayed_refs->run_delayed_start =
- head->node.bytenr;
- count++;
-
- WARN_ON(delayed_refs->num_heads_ready == 0);
- delayed_refs->num_heads_ready--;
- } else if (count) {
- /* the goal of the clustering is to find extents
- * that are likely to end up in the same extent
- * leaf on disk. So, we don't want them spread
- * all over the tree. Stop now if we've hit
- * a head that was already in use
- */
- break;
+ ref->in_tree = 0;
+ btrfs_put_delayed_ref(ref);
+ atomic_dec(&delayed_refs->num_entries);
+ if (trans->delayed_ref_updates)
+ trans->delayed_ref_updates--;
+}
+
+static int merge_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *ref, u64 seq)
+{
+ struct rb_node *node;
+ int mod = 0;
+ int done = 0;
+
+ node = rb_next(&ref->rb_node);
+ while (!done && node) {
+ struct btrfs_delayed_ref_node *next;
+
+ next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ node = rb_next(node);
+ if (seq && next->seq >= seq)
+ break;
+ if (comp_entry(ref, next, 0))
+ continue;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+ } else {
+ if (ref->ref_mod < next->ref_mod) {
+ struct btrfs_delayed_ref_node *tmp;
+
+ tmp = ref;
+ ref = next;
+ next = tmp;
+ done = 1;
}
+ mod = -next->ref_mod;
+ }
+
+ drop_delayed_ref(trans, delayed_refs, head, next);
+ ref->ref_mod += mod;
+ if (ref->ref_mod == 0) {
+ drop_delayed_ref(trans, delayed_refs, head, ref);
+ done = 1;
+ } else {
+ /*
+ * You can't have multiples of the same ref on a tree
+ * block.
+ */
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
}
- node = rb_next(node);
- }
- if (count) {
- return 0;
- } else if (start) {
- /*
- * we've gone to the end of the rbtree without finding any
- * clusters. start from the beginning and try again
- */
- start = 0;
- node = rb_first(&delayed_refs->root);
- goto again;
}
- return 1;
+ return done;
}
-/*
- * This checks to see if there are any delayed refs in the
- * btree for a given bytenr. It returns one if it finds any
- * and zero otherwise.
- *
- * If it only finds a head node, it returns 0.
- *
- * The idea is to use this when deciding if you can safely delete an
- * extent from the extent allocation tree. There may be a pending
- * ref in the rbtree that adds or removes references, so as long as this
- * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
- * allocation tree.
- */
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
- struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct rb_node *prev_node;
- int ret = 0;
+ struct rb_node *node;
+ u64 seq = 0;
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
+ assert_spin_locked(&head->lock);
+ /*
+ * We don't have too much refs to merge in the case of delayed data
+ * refs.
+ */
+ if (head->is_data)
+ return;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ struct seq_list *elem;
+
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ seq = elem->seq;
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
- ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
- if (ref) {
- prev_node = rb_prev(&ref->rb_node);
- if (!prev_node)
- goto out;
- ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
+ node = rb_first(&head->ref_root);
+ while (node) {
+ struct btrfs_delayed_ref_node *ref;
+
+ ref = rb_entry(node, struct btrfs_delayed_ref_node,
rb_node);
- if (ref->bytenr == bytenr)
+ /* We can't merge refs that are outside of our seq count */
+ if (seq && ref->seq >= seq)
+ break;
+ if (merge_ref(trans, delayed_refs, head, ref, seq))
+ node = rb_first(&head->ref_root);
+ else
+ node = rb_next(&ref->rb_node);
+ }
+}
+
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 seq)
+{
+ struct seq_list *elem;
+ int ret = 0;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ if (seq >= elem->seq) {
+ pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
+ (u32)(seq >> 32), (u32)seq,
+ (u32)(elem->seq >> 32), (u32)elem->seq,
+ delayed_refs);
ret = 1;
+ }
}
-out:
- spin_unlock(&delayed_refs->lock);
+
+ spin_unlock(&fs_info->tree_mod_seq_lock);
return ret;
}
+struct btrfs_delayed_ref_head *
+btrfs_select_ref_head(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_head *head;
+ u64 start;
+ bool loop = false;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+again:
+ start = delayed_refs->run_delayed_start;
+ head = find_ref_head(&delayed_refs->href_root, start, 1);
+ if (!head && !loop) {
+ delayed_refs->run_delayed_start = 0;
+ start = 0;
+ loop = true;
+ head = find_ref_head(&delayed_refs->href_root, start, 1);
+ if (!head)
+ return NULL;
+ } else if (!head && loop) {
+ return NULL;
+ }
+
+ while (head->processing) {
+ struct rb_node *node;
+
+ node = rb_next(&head->href_node);
+ if (!node) {
+ if (loop)
+ return NULL;
+ delayed_refs->run_delayed_start = 0;
+ start = 0;
+ loop = true;
+ goto again;
+ }
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+ href_node);
+ }
+
+ head->processing = 1;
+ WARN_ON(delayed_refs->num_heads_ready == 0);
+ delayed_refs->num_heads_ready--;
+ delayed_refs->run_delayed_start = head->node.bytenr +
+ head->node.num_bytes;
+ return head;
+}
+
/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
@@ -329,6 +453,7 @@ out:
static noinline void
update_existing_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *existing,
struct btrfs_delayed_ref_node *update)
{
@@ -340,18 +465,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
* every changing the extent allocation tree.
*/
existing->ref_mod--;
- if (existing->ref_mod == 0) {
- rb_erase(&existing->rb_node,
- &delayed_refs->root);
- existing->in_tree = 0;
- btrfs_put_delayed_ref(existing);
- delayed_refs->num_entries--;
- if (trans->delayed_ref_updates)
- trans->delayed_ref_updates--;
- } else {
+ if (existing->ref_mod == 0)
+ drop_delayed_ref(trans, delayed_refs, head, existing);
+ else
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
- }
} else {
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -381,6 +499,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
ref = btrfs_delayed_node_to_head(update);
BUG_ON(existing_ref->is_data != ref->is_data);
+ spin_lock(&existing_ref->lock);
if (ref->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
@@ -414,13 +533,16 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
ref->extent_op->flags_to_set;
existing_ref->extent_op->update_flags = 1;
}
- kfree(ref->extent_op);
+ btrfs_free_delayed_extent_op(ref->extent_op);
}
}
/*
- * update the reference mod on the head to reflect this new operation
+ * update the reference mod on the head to reflect this new operation,
+ * only need the lock for this case cause we could be processing it
+ * currently, for refs we just added we know we're a-ok.
*/
existing->ref_mod += update->ref_mod;
+ spin_unlock(&existing_ref->lock);
}
/*
@@ -428,12 +550,13 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
* this does all the dirty work in terms of maintaining the correct
* overall modification count.
*/
-static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *ref,
- u64 bytenr, u64 num_bytes,
- int action, int is_data)
+static noinline struct btrfs_delayed_ref_head *
+add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *ref, u64 bytenr,
+ u64 num_bytes, int action, int is_data)
{
- struct btrfs_delayed_ref_node *existing;
+ struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL;
struct btrfs_delayed_ref_root *delayed_refs;
int count_mod = 1;
@@ -475,47 +598,59 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
ref->action = 0;
ref->is_head = 1;
ref->in_tree = 1;
+ ref->seq = 0;
head_ref = btrfs_delayed_node_to_head(ref);
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
+ head_ref->ref_root = RB_ROOT;
+ head_ref->processing = 0;
- INIT_LIST_HEAD(&head_ref->cluster);
+ spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
- existing = tree_insert(&delayed_refs->root, &ref->rb_node);
+ trace_add_delayed_ref_head(ref, head_ref, action);
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
if (existing) {
- update_existing_head_ref(existing, ref);
+ update_existing_head_ref(&existing->node, ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ head_ref = existing;
} else {
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
- delayed_refs->num_entries++;
+ atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
- return 0;
+ return head_ref;
}
/*
* helper to insert a delayed tree ref into the rbtree.
*/
-static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *ref,
- u64 bytenr, u64 num_bytes, u64 parent,
- u64 ref_root, int level, int action)
+static noinline void
+add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_delayed_ref_node *ref, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 ref_root, int level,
+ int action, int no_quota)
{
struct btrfs_delayed_ref_node *existing;
struct btrfs_delayed_tree_ref *full_ref;
struct btrfs_delayed_ref_root *delayed_refs;
+ u64 seq = 0;
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
+ if (is_fstree(ref_root))
+ seq = atomic64_read(&fs_info->tree_mod_seq);
delayed_refs = &trans->transaction->delayed_refs;
/* first set the basic ref node struct up */
@@ -526,51 +661,61 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
ref->action = action;
ref->is_head = 0;
ref->in_tree = 1;
+ ref->no_quota = no_quota;
+ ref->seq = seq;
full_ref = btrfs_delayed_node_to_tree_ref(ref);
- if (parent) {
- full_ref->parent = parent;
+ full_ref->parent = parent;
+ full_ref->root = ref_root;
+ if (parent)
ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
- } else {
- full_ref->root = ref_root;
+ else
ref->type = BTRFS_TREE_BLOCK_REF_KEY;
- }
full_ref->level = level;
- existing = tree_insert(&delayed_refs->root, &ref->rb_node);
+ trace_add_delayed_tree_ref(ref, full_ref, action);
+ spin_lock(&head_ref->lock);
+ existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
if (existing) {
- update_existing_ref(trans, delayed_refs, existing, ref);
+ update_existing_ref(trans, delayed_refs, head_ref, existing,
+ ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
} else {
- delayed_refs->num_entries++;
+ atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
- return 0;
+ spin_unlock(&head_ref->lock);
}
/*
* helper to insert a delayed data ref into the rbtree.
*/
-static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *ref,
- u64 bytenr, u64 num_bytes, u64 parent,
- u64 ref_root, u64 owner, u64 offset,
- int action)
+static noinline void
+add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_delayed_ref_node *ref, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+ u64 offset, int action, int no_quota)
{
struct btrfs_delayed_ref_node *existing;
struct btrfs_delayed_data_ref *full_ref;
struct btrfs_delayed_ref_root *delayed_refs;
+ u64 seq = 0;
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
delayed_refs = &trans->transaction->delayed_refs;
+ if (is_fstree(ref_root))
+ seq = atomic64_read(&fs_info->tree_mod_seq);
+
/* first set the basic ref node struct up */
atomic_set(&ref->refs, 1);
ref->bytenr = bytenr;
@@ -579,32 +724,37 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
ref->action = action;
ref->is_head = 0;
ref->in_tree = 1;
+ ref->no_quota = no_quota;
+ ref->seq = seq;
full_ref = btrfs_delayed_node_to_data_ref(ref);
- if (parent) {
- full_ref->parent = parent;
+ full_ref->parent = parent;
+ full_ref->root = ref_root;
+ if (parent)
ref->type = BTRFS_SHARED_DATA_REF_KEY;
- } else {
- full_ref->root = ref_root;
+ else
ref->type = BTRFS_EXTENT_DATA_REF_KEY;
- }
+
full_ref->objectid = owner;
full_ref->offset = offset;
- existing = tree_insert(&delayed_refs->root, &ref->rb_node);
+ trace_add_delayed_data_ref(ref, full_ref, action);
+ spin_lock(&head_ref->lock);
+ existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
if (existing) {
- update_existing_ref(trans, delayed_refs, existing, ref);
+ update_existing_ref(trans, delayed_refs, head_ref, existing,
+ ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
} else {
- delayed_refs->num_entries++;
+ atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
- return 0;
+ spin_unlock(&head_ref->lock);
}
/*
@@ -612,24 +762,28 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
* to make sure the delayed ref is eventually processed before this
* transaction commits.
*/
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
- struct btrfs_delayed_extent_op *extent_op)
+ struct btrfs_delayed_extent_op *extent_op,
+ int no_quota)
{
struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
+
+ if (!is_fstree(ref_root) || !fs_info->quota_enabled)
+ no_quota = 0;
BUG_ON(extent_op && extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
return -ENOMEM;
}
@@ -642,39 +796,43 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
- action, 0);
- BUG_ON(ret);
+ head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+ bytenr, num_bytes, action, 0);
- ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
- parent, ref_root, level, action);
- BUG_ON(ret);
+ add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+ num_bytes, parent, ref_root, level, action,
+ no_quota);
spin_unlock(&delayed_refs->lock);
+
return 0;
}
/*
* add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
*/
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, int action,
- struct btrfs_delayed_extent_op *extent_op)
+ struct btrfs_delayed_extent_op *extent_op,
+ int no_quota)
{
struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
+
+ if (!is_fstree(ref_root) || !fs_info->quota_enabled)
+ no_quota = 0;
BUG_ON(extent_op && !extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
return -ENOMEM;
}
@@ -687,26 +845,26 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
- action, 1);
- BUG_ON(ret);
+ head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+ bytenr, num_bytes, action, 1);
- ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
- parent, ref_root, owner, offset, action);
- BUG_ON(ret);
+ add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+ num_bytes, parent, ref_root, owner, offset,
+ action, no_quota);
spin_unlock(&delayed_refs->lock);
+
return 0;
}
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
return -ENOMEM;
@@ -715,10 +873,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
+ add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);
- BUG_ON(ret);
spin_unlock(&delayed_refs->lock);
return 0;
@@ -732,88 +889,56 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
{
- struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
delayed_refs = &trans->transaction->delayed_refs;
- ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
- if (ref)
- return btrfs_delayed_node_to_head(ref);
- return NULL;
+ return find_ref_head(&delayed_refs->href_root, bytenr, 0);
}
-/*
- * add a delayed ref to the tree. This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- *
- * The main point of this call is to add and remove a backreference in a single
- * shot, taking the lock only once, and only searching for the head node once.
- *
- * It is the same as doing a ref add and delete in two separate calls.
- */
-#if 0
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, u64 orig_parent,
- u64 parent, u64 orig_ref_root, u64 ref_root,
- u64 orig_ref_generation, u64 ref_generation,
- u64 owner_objectid, int pin)
+void btrfs_delayed_ref_exit(void)
{
- struct btrfs_delayed_ref *ref;
- struct btrfs_delayed_ref *old_ref;
- struct btrfs_delayed_ref_head *head_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- int ret;
-
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
- if (!ref)
- return -ENOMEM;
-
- old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
- if (!old_ref) {
- kfree(ref);
- return -ENOMEM;
- }
-
- /*
- * the parent = 0 case comes from cases where we don't actually
- * know the parent yet. It will get updated later via a add/drop
- * pair.
- */
- if (parent == 0)
- parent = bytenr;
- if (orig_parent == 0)
- orig_parent = bytenr;
+ if (btrfs_delayed_ref_head_cachep)
+ kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
+ if (btrfs_delayed_tree_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
+ if (btrfs_delayed_data_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+ if (btrfs_delayed_extent_op_cachep)
+ kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
+}
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
- if (!head_ref) {
- kfree(ref);
- kfree(old_ref);
- return -ENOMEM;
- }
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
+int btrfs_delayed_ref_init(void)
+{
+ btrfs_delayed_ref_head_cachep = kmem_cache_create(
+ "btrfs_delayed_ref_head",
+ sizeof(struct btrfs_delayed_ref_head), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_ref_head_cachep)
+ goto fail;
+
+ btrfs_delayed_tree_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_tree_ref",
+ sizeof(struct btrfs_delayed_tree_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_tree_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_data_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_data_ref",
+ sizeof(struct btrfs_delayed_data_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_data_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_extent_op_cachep = kmem_cache_create(
+ "btrfs_delayed_extent_op",
+ sizeof(struct btrfs_delayed_extent_op), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_extent_op_cachep)
+ goto fail;
- /*
- * insert both the head node and the new ref without dropping
- * the spin lock
- */
- ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
- (u64)-1, 0, 0, 0,
- BTRFS_UPDATE_DELAYED_HEAD, 0);
- BUG_ON(ret);
-
- ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
- parent, ref_root, ref_generation,
- owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
- BUG_ON(ret);
-
- ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
- orig_parent, orig_ref_root,
- orig_ref_generation, owner_objectid,
- BTRFS_DROP_DELAYED_REF, pin);
- BUG_ON(ret);
- spin_unlock(&delayed_refs->lock);
return 0;
+fail:
+ btrfs_delayed_ref_exit();
+ return -ENOMEM;
}
-#endif