Commit 097b8a7c authored by Jan Schmidt's avatar Jan Schmidt

Btrfs: join tree mod log code with the code holding back delayed refs

We've got two mechanisms both required for reliable backref resolving (tree
mod log and holding back delayed refs). You cannot make use of one without
the other. So instead of requiring the user of this mechanism to setup both
correctly, we join them into a single interface.

Additionally, we stop inserting non-blockers into fs_info->tree_mod_seq_list
as we did before, which was of no value.
Signed-off-by: default avatarJan Schmidt <list.btrfs@jan-o-sch.net>
parent cf538830
...@@ -773,9 +773,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info, ...@@ -773,9 +773,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
*/ */
static int find_parent_nodes(struct btrfs_trans_handle *trans, static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_fs_info *fs_info, u64 bytenr,
u64 delayed_ref_seq, u64 time_seq, u64 time_seq, struct ulist *refs,
struct ulist *refs, struct ulist *roots, struct ulist *roots, const u64 *extent_item_pos)
const u64 *extent_item_pos)
{ {
struct btrfs_key key; struct btrfs_key key;
struct btrfs_path *path; struct btrfs_path *path;
...@@ -837,7 +836,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -837,7 +836,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
btrfs_put_delayed_ref(&head->node); btrfs_put_delayed_ref(&head->node);
goto again; goto again;
} }
ret = __add_delayed_refs(head, delayed_ref_seq, ret = __add_delayed_refs(head, time_seq,
&prefs_delayed); &prefs_delayed);
mutex_unlock(&head->mutex); mutex_unlock(&head->mutex);
if (ret) { if (ret) {
...@@ -981,8 +980,7 @@ static void free_leaf_list(struct ulist *blocks) ...@@ -981,8 +980,7 @@ static void free_leaf_list(struct ulist *blocks)
*/ */
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_fs_info *fs_info, u64 bytenr,
u64 delayed_ref_seq, u64 time_seq, u64 time_seq, struct ulist **leafs,
struct ulist **leafs,
const u64 *extent_item_pos) const u64 *extent_item_pos)
{ {
struct ulist *tmp; struct ulist *tmp;
...@@ -997,7 +995,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, ...@@ -997,7 +995,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
return -ENOMEM; return -ENOMEM;
} }
ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq, ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, *leafs, tmp, extent_item_pos); time_seq, *leafs, tmp, extent_item_pos);
ulist_free(tmp); ulist_free(tmp);
...@@ -1024,8 +1022,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, ...@@ -1024,8 +1022,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
*/ */
int btrfs_find_all_roots(struct btrfs_trans_handle *trans, int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_fs_info *fs_info, u64 bytenr,
u64 delayed_ref_seq, u64 time_seq, u64 time_seq, struct ulist **roots)
struct ulist **roots)
{ {
struct ulist *tmp; struct ulist *tmp;
struct ulist_node *node = NULL; struct ulist_node *node = NULL;
...@@ -1043,7 +1040,7 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans, ...@@ -1043,7 +1040,7 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while (1) { while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq, ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, tmp, *roots, NULL); time_seq, tmp, *roots, NULL);
if (ret < 0 && ret != -ENOENT) { if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp); ulist_free(tmp);
...@@ -1376,11 +1373,9 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1376,11 +1373,9 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist *roots = NULL; struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL; struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL; struct ulist_node *root_node = NULL;
struct seq_list seq_elem = {};
struct seq_list tree_mod_seq_elem = {}; struct seq_list tree_mod_seq_elem = {};
struct ulist_iterator ref_uiter; struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter; struct ulist_iterator root_uiter;
struct btrfs_delayed_ref_root *delayed_refs = NULL;
pr_debug("resolving all inodes for extent %llu\n", pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid); extent_item_objectid);
...@@ -1391,16 +1386,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1391,16 +1386,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
trans = btrfs_join_transaction(fs_info->extent_root); trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans)) if (IS_ERR(trans))
return PTR_ERR(trans); return PTR_ERR(trans);
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
btrfs_get_delayed_seq(delayed_refs, &seq_elem);
spin_unlock(&delayed_refs->lock);
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
} }
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
seq_elem.seq, tree_mod_seq_elem.seq, &refs, tree_mod_seq_elem.seq, &refs,
&extent_item_pos); &extent_item_pos);
if (ret) if (ret)
goto out; goto out;
...@@ -1408,7 +1398,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1408,7 +1398,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&ref_uiter); ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
seq_elem.seq,
tree_mod_seq_elem.seq, &roots); tree_mod_seq_elem.seq, &roots);
if (ret) if (ret)
break; break;
...@@ -1431,7 +1420,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1431,7 +1420,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
out: out:
if (!search_commit_root) { if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_put_delayed_seq(delayed_refs, &seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root); btrfs_end_transaction(trans, fs_info->extent_root);
} }
......
...@@ -58,8 +58,7 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); ...@@ -58,8 +58,7 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans, int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_fs_info *fs_info, u64 bytenr,
u64 delayed_ref_seq, u64 time_seq, u64 time_seq, struct ulist **roots);
struct ulist **roots);
struct btrfs_data_container *init_data_container(u32 total_bytes); struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
......
...@@ -321,7 +321,7 @@ struct tree_mod_root { ...@@ -321,7 +321,7 @@ struct tree_mod_root {
struct tree_mod_elem { struct tree_mod_elem {
struct rb_node node; struct rb_node node;
u64 index; /* shifted logical */ u64 index; /* shifted logical */
struct seq_list elem; u64 seq;
enum mod_log_op op; enum mod_log_op op;
/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
...@@ -341,20 +341,50 @@ struct tree_mod_elem { ...@@ -341,20 +341,50 @@ struct tree_mod_elem {
struct tree_mod_root old_root; struct tree_mod_root old_root;
}; };
static inline void static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
{ {
elem->seq = atomic_inc_return(&fs_info->tree_mod_seq); read_lock(&fs_info->tree_mod_log_lock);
list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); }
static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
{
read_unlock(&fs_info->tree_mod_log_lock);
}
static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
{
write_lock(&fs_info->tree_mod_log_lock);
} }
void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
{
write_unlock(&fs_info->tree_mod_log_lock);
}
/*
* This adds a new blocker to the tree mod log's blocker list if the @elem
* passed does not already have a sequence number set. So when a caller expects
* to record tree modifications, it should ensure to set elem->seq to zero
* before calling btrfs_get_tree_mod_seq.
* Returns a fresh, unused tree log modification sequence number, even if no new
* blocker was added.
*/
u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem) struct seq_list *elem)
{ {
elem->flags = 1; u64 seq;
tree_mod_log_write_lock(fs_info);
spin_lock(&fs_info->tree_mod_seq_lock); spin_lock(&fs_info->tree_mod_seq_lock);
__get_tree_mod_seq(fs_info, elem); if (!elem->seq) {
elem->seq = btrfs_inc_tree_mod_seq(fs_info);
list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
}
seq = btrfs_inc_tree_mod_seq(fs_info);
spin_unlock(&fs_info->tree_mod_seq_lock); spin_unlock(&fs_info->tree_mod_seq_lock);
tree_mod_log_write_unlock(fs_info);
return seq;
} }
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
...@@ -371,41 +401,46 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, ...@@ -371,41 +401,46 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
if (!seq_putting) if (!seq_putting)
return; return;
BUG_ON(!(elem->flags & 1));
spin_lock(&fs_info->tree_mod_seq_lock); spin_lock(&fs_info->tree_mod_seq_lock);
list_del(&elem->list); list_del(&elem->list);
elem->seq = 0;
list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) { if (cur_elem->seq < min_seq) {
if (seq_putting > cur_elem->seq) { if (seq_putting > cur_elem->seq) {
/* /*
* blocker with lower sequence number exists, we * blocker with lower sequence number exists, we
* cannot remove anything from the log * cannot remove anything from the log
*/ */
goto out; spin_unlock(&fs_info->tree_mod_seq_lock);
return;
} }
min_seq = cur_elem->seq; min_seq = cur_elem->seq;
} }
} }
spin_unlock(&fs_info->tree_mod_seq_lock);
/*
* we removed the lowest blocker from the blocker list, so there may be
* more processible delayed refs.
*/
wake_up(&fs_info->tree_mod_seq_wait);
/* /*
* anything that's lower than the lowest existing (read: blocked) * anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree. * sequence number can be removed from the tree.
*/ */
write_lock(&fs_info->tree_mod_log_lock); tree_mod_log_write_lock(fs_info);
tm_root = &fs_info->tree_mod_log; tm_root = &fs_info->tree_mod_log;
for (node = rb_first(tm_root); node; node = next) { for (node = rb_first(tm_root); node; node = next) {
next = rb_next(node); next = rb_next(node);
tm = container_of(node, struct tree_mod_elem, node); tm = container_of(node, struct tree_mod_elem, node);
if (tm->elem.seq > min_seq) if (tm->seq > min_seq)
continue; continue;
rb_erase(node, tm_root); rb_erase(node, tm_root);
list_del(&tm->elem.list);
kfree(tm); kfree(tm);
} }
write_unlock(&fs_info->tree_mod_log_lock); tree_mod_log_write_unlock(fs_info);
out:
spin_unlock(&fs_info->tree_mod_seq_lock);
} }
/* /*
...@@ -423,11 +458,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) ...@@ -423,11 +458,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
struct rb_node **new; struct rb_node **new;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct tree_mod_elem *cur; struct tree_mod_elem *cur;
int ret = 0;
BUG_ON(!tm || !tm->elem.seq); BUG_ON(!tm || !tm->seq);
write_lock(&fs_info->tree_mod_log_lock);
tm_root = &fs_info->tree_mod_log; tm_root = &fs_info->tree_mod_log;
new = &tm_root->rb_node; new = &tm_root->rb_node;
while (*new) { while (*new) {
...@@ -437,88 +470,81 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) ...@@ -437,88 +470,81 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
new = &((*new)->rb_left); new = &((*new)->rb_left);
else if (cur->index > tm->index) else if (cur->index > tm->index)
new = &((*new)->rb_right); new = &((*new)->rb_right);
else if (cur->elem.seq < tm->elem.seq) else if (cur->seq < tm->seq)
new = &((*new)->rb_left); new = &((*new)->rb_left);
else if (cur->elem.seq > tm->elem.seq) else if (cur->seq > tm->seq)
new = &((*new)->rb_right); new = &((*new)->rb_right);
else { else {
kfree(tm); kfree(tm);
ret = -EEXIST; return -EEXIST;
goto unlock;
} }
} }
rb_link_node(&tm->node, parent, new); rb_link_node(&tm->node, parent, new);
rb_insert_color(&tm->node, tm_root); rb_insert_color(&tm->node, tm_root);
unlock: return 0;
write_unlock(&fs_info->tree_mod_log_lock);
return ret;
} }
/*
* Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
* returns zero with the tree_mod_log_lock acquired. The caller must hold
* this until all tree mod log insertions are recorded in the rb tree and then
* call tree_mod_log_write_unlock() to release.
*/
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb) { struct extent_buffer *eb) {
smp_mb(); smp_mb();
if (list_empty(&(fs_info)->tree_mod_seq_list)) if (list_empty(&(fs_info)->tree_mod_seq_list))
return 1; return 1;
if (!eb) if (eb && btrfs_header_level(eb) == 0)
return 0;
if (btrfs_header_level(eb) == 0)
return 1; return 1;
tree_mod_log_write_lock(fs_info);
if (list_empty(&fs_info->tree_mod_seq_list)) {
/*
* someone emptied the list while we were waiting for the lock.
* we must not add to the list when no blocker exists.
*/
tree_mod_log_write_unlock(fs_info);
return 1;
}
return 0; return 0;
} }
/* /*
* This allocates memory and gets a tree modification sequence number when * This allocates memory and gets a tree modification sequence number.
* needed.
* *
* Returns 0 when no sequence number is needed, < 0 on error. * Returns <0 on error.
* Returns 1 when a sequence number was added. In this case, * Returns >0 (the added sequence number) on success.
* fs_info->tree_mod_seq_lock was acquired and must be released by the caller
* after inserting into the rb tree.
*/ */
static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
struct tree_mod_elem **tm_ret) struct tree_mod_elem **tm_ret)
{ {
struct tree_mod_elem *tm; struct tree_mod_elem *tm;
int seq;
if (tree_mod_dont_log(fs_info, NULL))
return 0;
tm = *tm_ret = kzalloc(sizeof(*tm), flags);
if (!tm)
return -ENOMEM;
tm->elem.flags = 0;
spin_lock(&fs_info->tree_mod_seq_lock);
if (list_empty(&fs_info->tree_mod_seq_list)) {
/* /*
* someone emptied the list while we were waiting for the lock. * once we switch from spin locks to something different, we should
* we must not add to the list, because no blocker exists. items * honor the flags parameter here.
* are removed from the list only when the existing blocker is
* removed from the list.
*/ */
kfree(tm); tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
seq = 0; if (!tm)
spin_unlock(&fs_info->tree_mod_seq_lock); return -ENOMEM;
} else {
__get_tree_mod_seq(fs_info, &tm->elem);
seq = tm->elem.seq;
}
return seq; tm->seq = btrfs_inc_tree_mod_seq(fs_info);
return tm->seq;
} }
static noinline int static inline int
tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int slot, struct extent_buffer *eb, int slot,
enum mod_log_op op, gfp_t flags) enum mod_log_op op, gfp_t flags)
{ {
struct tree_mod_elem *tm;
int ret; int ret;
struct tree_mod_elem *tm;
ret = tree_mod_alloc(fs_info, flags, &tm); ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret <= 0) if (ret < 0)
return ret; return ret;
tm->index = eb->start >> PAGE_CACHE_SHIFT; tm->index = eb->start >> PAGE_CACHE_SHIFT;
...@@ -530,8 +556,22 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, ...@@ -530,8 +556,22 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
tm->slot = slot; tm->slot = slot;
tm->generation = btrfs_node_ptr_generation(eb, slot); tm->generation = btrfs_node_ptr_generation(eb, slot);
ret = __tree_mod_log_insert(fs_info, tm); return __tree_mod_log_insert(fs_info, tm);
spin_unlock(&fs_info->tree_mod_seq_lock); }
static noinline int
tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int slot,
enum mod_log_op op, gfp_t flags)
{
int ret;
if (tree_mod_dont_log(fs_info, eb))
return 0;
ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
tree_mod_log_write_unlock(fs_info);
return ret; return ret;
} }
...@@ -542,6 +582,14 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -542,6 +582,14 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS); return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
} }
static noinline int
tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int slot,
enum mod_log_op op)
{
return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
}
static noinline int static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int dst_slot, int src_slot, struct extent_buffer *eb, int dst_slot, int src_slot,
...@@ -555,14 +603,14 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, ...@@ -555,14 +603,14 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
return 0; return 0;
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot, ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
MOD_LOG_KEY_REMOVE_WHILE_MOVING); MOD_LOG_KEY_REMOVE_WHILE_MOVING);
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
ret = tree_mod_alloc(fs_info, flags, &tm); ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret <= 0) if (ret < 0)
return ret; goto out;
tm->index = eb->start >> PAGE_CACHE_SHIFT; tm->index = eb->start >> PAGE_CACHE_SHIFT;
tm->slot = src_slot; tm->slot = src_slot;
...@@ -571,10 +619,26 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, ...@@ -571,10 +619,26 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_MOVE_KEYS; tm->op = MOD_LOG_MOVE_KEYS;
ret = __tree_mod_log_insert(fs_info, tm); ret = __tree_mod_log_insert(fs_info, tm);
spin_unlock(&fs_info->tree_mod_seq_lock); out:
tree_mod_log_write_unlock(fs_info);
return ret; return ret;
} }
static inline void
__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
{
int i;
u32 nritems;
int ret;
nritems = btrfs_header_nritems(eb);
for (i = nritems - 1; i >= 0; i--) {
ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
MOD_LOG_KEY_REMOVE_WHILE_FREEING);
BUG_ON(ret < 0);
}
}
static noinline int static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *old_root, struct extent_buffer *old_root,
...@@ -583,9 +647,14 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, ...@@ -583,9 +647,14 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct tree_mod_elem *tm; struct tree_mod_elem *tm;
int ret; int ret;
if (tree_mod_dont_log(fs_info, NULL))
return 0;
__tree_mod_log_free_eb(fs_info, old_root);
ret = tree_mod_alloc(fs_info, flags, &tm); ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret <= 0) if (ret < 0)
return ret; goto out;
tm->index = new_root->start >> PAGE_CACHE_SHIFT; tm->index = new_root->start >> PAGE_CACHE_SHIFT;
tm->old_root.logical = old_root->start; tm->old_root.logical = old_root->start;
...@@ -594,7 +663,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, ...@@ -594,7 +663,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_ROOT_REPLACE; tm->op = MOD_LOG_ROOT_REPLACE;
ret = __tree_mod_log_insert(fs_info, tm); ret = __tree_mod_log_insert(fs_info, tm);
spin_unlock(&fs_info->tree_mod_seq_lock); out:
tree_mod_log_write_unlock(fs_info);
return ret; return ret;
} }
...@@ -608,7 +678,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, ...@@ -608,7 +678,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct tree_mod_elem *found = NULL; struct tree_mod_elem *found = NULL;
u64 index = start >> PAGE_CACHE_SHIFT; u64 index = start >> PAGE_CACHE_SHIFT;
read_lock(&fs_info->tree_mod_log_lock); tree_mod_log_read_lock(fs_info);
tm_root = &fs_info->tree_mod_log; tm_root = &fs_info->tree_mod_log;
node = tm_root->rb_node; node = tm_root->rb_node;
while (node) { while (node) {
...@@ -617,18 +687,18 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, ...@@ -617,18 +687,18 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
node = node->rb_left; node = node->rb_left;
} else if (cur->index > index) { } else if (cur->index > index) {
node = node->rb_right; node = node->rb_right;
} else if (cur->elem.seq < min_seq) { } else if (cur->seq < min_seq) {
node = node->rb_left; node = node->rb_left;
} else if (!smallest) { } else if (!smallest) {
/* we want the node with the highest seq */ /* we want the node with the highest seq */
if (found) if (found)
BUG_ON(found->elem.seq > cur->elem.seq); BUG_ON(found->seq > cur->seq);
found = cur; found = cur;
node = node->rb_left; node = node->rb_left;
} else if (cur->elem.seq > min_seq) { } else if (cur->seq > min_seq) {
/* we want the node with the smallest seq */ /* we want the node with the smallest seq */
if (found) if (found)
BUG_ON(found->elem.seq < cur->elem.seq); BUG_ON(found->seq < cur->seq);
found = cur; found = cur;
node = node->rb_right; node = node->rb_right;
} else { } else {
...@@ -636,7 +706,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, ...@@ -636,7 +706,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
break; break;
} }
} }
read_unlock(&fs_info->tree_mod_log_lock); tree_mod_log_read_unlock(fs_info);
return found; return found;
} }
...@@ -664,7 +734,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) ...@@ -664,7 +734,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
return __tree_mod_log_search(fs_info, start, min_seq, 0); return __tree_mod_log_search(fs_info, start, min_seq, 0);
} }
static inline void static noinline void
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
struct extent_buffer *src, unsigned long dst_offset, struct extent_buffer *src, unsigned long dst_offset,
unsigned long src_offset, int nr_items) unsigned long src_offset, int nr_items)
...@@ -675,18 +745,23 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, ...@@ -675,18 +745,23 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
if (tree_mod_dont_log(fs_info, NULL)) if (tree_mod_dont_log(fs_info, NULL))
return; return;
if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
tree_mod_log_write_unlock(fs_info);
return; return;
}
/* speed this up by single seq for all operations? */
for (i = 0; i < nr_items; i++) { for (i = 0; i < nr_items; i++) {
ret = tree_mod_log_insert_key(fs_info, src, i + src_offset, ret = tree_mod_log_insert_key_locked(fs_info, src,
i + src_offset,
MOD_LOG_KEY_REMOVE); MOD_LOG_KEY_REMOVE);
BUG_ON(ret < 0); BUG_ON(ret < 0);
ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset, ret = tree_mod_log_insert_key_locked(fs_info, dst,
i + dst_offset,
MOD_LOG_KEY_ADD); MOD_LOG_KEY_ADD);
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
tree_mod_log_write_unlock(fs_info);
} }
static inline void static inline void
...@@ -699,7 +774,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, ...@@ -699,7 +774,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
static inline void static noinline void
tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int slot, int atomic) struct btrfs_disk_key *disk_key, int slot, int atomic)
...@@ -712,30 +787,22 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, ...@@ -712,30 +787,22 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, static noinline void
struct extent_buffer *eb) tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
{ {
int i;
int ret;
u32 nritems;
if (tree_mod_dont_log(fs_info, eb)) if (tree_mod_dont_log(fs_info, eb))
return; return;
nritems = btrfs_header_nritems(eb); __tree_mod_log_free_eb(fs_info, eb);
for (i = nritems - 1; i >= 0; i--) {
ret = tree_mod_log_insert_key(fs_info, eb, i, tree_mod_log_write_unlock(fs_info);
MOD_LOG_KEY_REMOVE_WHILE_FREEING);
BUG_ON(ret < 0);
}
} }
static inline void static noinline void
tree_mod_log_set_root_pointer(struct btrfs_root *root, tree_mod_log_set_root_pointer(struct btrfs_root *root,
struct extent_buffer *new_root_node) struct extent_buffer *new_root_node)
{ {
int ret; int ret;
tree_mod_log_free_eb(root->fs_info, root->node);
ret = tree_mod_log_insert_root(root->fs_info, root->node, ret = tree_mod_log_insert_root(root->fs_info, root->node,
new_root_node, GFP_NOFS); new_root_node, GFP_NOFS);
BUG_ON(ret < 0); BUG_ON(ret < 0);
...@@ -1069,7 +1136,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, ...@@ -1069,7 +1136,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
unsigned long p_size = sizeof(struct btrfs_key_ptr); unsigned long p_size = sizeof(struct btrfs_key_ptr);
n = btrfs_header_nritems(eb); n = btrfs_header_nritems(eb);
while (tm && tm->elem.seq >= time_seq) { while (tm && tm->seq >= time_seq) {
/* /*
* all the operations are recorded with the operator used for * all the operations are recorded with the operator used for
* the modification. as we're going backwards, we do the * the modification. as we're going backwards, we do the
......
...@@ -1030,6 +1030,13 @@ struct btrfs_block_group_cache { ...@@ -1030,6 +1030,13 @@ struct btrfs_block_group_cache {
struct list_head cluster_list; struct list_head cluster_list;
}; };
/* delayed seq elem */
struct seq_list {
struct list_head list;
u64 seq;
};
/* fs_info */
struct reloc_control; struct reloc_control;
struct btrfs_device; struct btrfs_device;
struct btrfs_fs_devices; struct btrfs_fs_devices;
...@@ -1144,6 +1151,8 @@ struct btrfs_fs_info { ...@@ -1144,6 +1151,8 @@ struct btrfs_fs_info {
spinlock_t tree_mod_seq_lock; spinlock_t tree_mod_seq_lock;
atomic_t tree_mod_seq; atomic_t tree_mod_seq;
struct list_head tree_mod_seq_list; struct list_head tree_mod_seq_list;
struct seq_list tree_mod_seq_elem;
wait_queue_head_t tree_mod_seq_wait;
/* this protects tree_mod_log */ /* this protects tree_mod_log */
rwlock_t tree_mod_log_lock; rwlock_t tree_mod_log_lock;
...@@ -2798,6 +2807,16 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) ...@@ -2798,6 +2807,16 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree(fs_info); kfree(fs_info);
} }
/* tree mod log functions from ctree.c */
u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem);
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem);
static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
{
return atomic_inc_return(&fs_info->tree_mod_seq);
}
/* root-item.c */ /* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root, int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path, struct btrfs_path *path,
...@@ -3157,18 +3176,6 @@ void btrfs_reada_detach(void *handle); ...@@ -3157,18 +3176,6 @@ void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
u64 start, int err); u64 start, int err);
/* delayed seq elem */
struct seq_list {
struct list_head list;
u64 seq;
u32 flags;
};
void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem);
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem);
static inline int is_fstree(u64 rootid) static inline int is_fstree(u64 rootid)
{ {
if (rootid == BTRFS_FS_TREE_OBJECTID || if (rootid == BTRFS_FS_TREE_OBJECTID ||
......
...@@ -233,22 +233,26 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, ...@@ -233,22 +233,26 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
u64 seq) u64 seq)
{ {
struct seq_list *elem; struct seq_list *elem;
int ret = 0;
assert_spin_locked(&delayed_refs->lock); spin_lock(&fs_info->tree_mod_seq_lock);
if (list_empty(&delayed_refs->seq_head)) if (!list_empty(&fs_info->tree_mod_seq_list)) {
return 0; elem = list_first_entry(&fs_info->tree_mod_seq_list,
struct seq_list, list);
elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
if (seq >= elem->seq) { if (seq >= elem->seq) {
pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n", pr_debug("holding back delayed_ref %llu, lowest is "
seq, elem->seq, delayed_refs); "%llu (%p)\n", seq, elem->seq, delayed_refs);
return 1; ret = 1;
} }
return 0; }
spin_unlock(&fs_info->tree_mod_seq_lock);
return ret;
} }
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
...@@ -526,7 +530,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -526,7 +530,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->in_tree = 1; ref->in_tree = 1;
if (is_fstree(ref_root)) if (is_fstree(ref_root))
seq = inc_delayed_seq(delayed_refs); seq = btrfs_inc_tree_mod_seq(fs_info);
ref->seq = seq; ref->seq = seq;
full_ref = btrfs_delayed_node_to_tree_ref(ref); full_ref = btrfs_delayed_node_to_tree_ref(ref);
...@@ -585,7 +589,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -585,7 +589,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->in_tree = 1; ref->in_tree = 1;
if (is_fstree(ref_root)) if (is_fstree(ref_root))
seq = inc_delayed_seq(delayed_refs); seq = btrfs_inc_tree_mod_seq(fs_info);
ref->seq = seq; ref->seq = seq;
full_ref = btrfs_delayed_node_to_data_ref(ref); full_ref = btrfs_delayed_node_to_data_ref(ref);
...@@ -659,8 +663,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -659,8 +663,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
num_bytes, parent, ref_root, level, action, num_bytes, parent, ref_root, level, action,
for_cow); for_cow);
if (!is_fstree(ref_root) && if (!is_fstree(ref_root) &&
waitqueue_active(&delayed_refs->seq_wait)) waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
return 0; return 0;
...@@ -708,8 +712,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -708,8 +712,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
num_bytes, parent, ref_root, owner, offset, num_bytes, parent, ref_root, owner, offset,
action, for_cow); action, for_cow);
if (!is_fstree(ref_root) && if (!is_fstree(ref_root) &&
waitqueue_active(&delayed_refs->seq_wait)) waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
return 0; return 0;
...@@ -736,8 +740,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, ...@@ -736,8 +740,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD, num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data); extent_op->is_data);
if (waitqueue_active(&delayed_refs->seq_wait)) if (waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
return 0; return 0;
} }
......
...@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root { ...@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root {
int flushing; int flushing;
u64 run_delayed_start; u64 run_delayed_start;
/*
* seq number of delayed refs. We need to know if a backref was being
* added before the currently processed ref or afterwards.
*/
u64 seq;
/*
* seq_list holds a list of all seq numbers that are currently being
* added to the list. While walking backrefs (btrfs_find_all_roots,
* qgroups), which might take some time, no newer ref must be processed,
* as it might influence the outcome of the walk.
*/
struct list_head seq_head;
/*
* when the only refs we have in the list must not be processed, we want
* to wait for more refs to show up or for the end of backref walking.
*/
wait_queue_head_t seq_wait;
}; };
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
...@@ -195,33 +175,8 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, ...@@ -195,33 +175,8 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start); struct list_head *cluster, u64 search_start);
static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs) int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
{ struct btrfs_delayed_ref_root *delayed_refs,
assert_spin_locked(&delayed_refs->lock);
++delayed_refs->seq;
return delayed_refs->seq;
}
static inline void
btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
struct seq_list *elem)
{
assert_spin_locked(&delayed_refs->lock);
elem->seq = delayed_refs->seq;
list_add_tail(&elem->list, &delayed_refs->seq_head);
}
static inline void
btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
struct seq_list *elem)
{
spin_lock(&delayed_refs->lock);
list_del(&elem->list);
wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
}
int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
u64 seq); u64 seq);
/* /*
......
...@@ -1944,6 +1944,8 @@ int open_ctree(struct super_block *sb, ...@@ -1944,6 +1944,8 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0; fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT; fs_info->tree_mod_log = RB_ROOT;
init_waitqueue_head(&fs_info->tree_mod_seq_wait);
/* readahead state */ /* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock); spin_lock_init(&fs_info->reada_lock);
......
...@@ -2217,6 +2217,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ...@@ -2217,6 +2217,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL; struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op; struct btrfs_delayed_extent_op *extent_op;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
int count = 0; int count = 0;
int must_insert_reserved = 0; int must_insert_reserved = 0;
...@@ -2255,7 +2256,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ...@@ -2255,7 +2256,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref = select_delayed_ref(locked_ref); ref = select_delayed_ref(locked_ref);
if (ref && ref->seq && if (ref && ref->seq &&
btrfs_check_delayed_seq(delayed_refs, ref->seq)) { btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
/* /*
* there are still refs with lower seq numbers in the * there are still refs with lower seq numbers in the
* process of being added. Don't run this ref yet. * process of being added. Don't run this ref yet.
...@@ -2337,7 +2338,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ...@@ -2337,7 +2338,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
} }
next: next:
do_chunk_alloc(trans, root->fs_info->extent_root, do_chunk_alloc(trans, fs_info->extent_root,
2 * 1024 * 1024, 2 * 1024 * 1024,
btrfs_get_alloc_profile(root, 0), btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE); CHUNK_ALLOC_NO_FORCE);
...@@ -2347,18 +2348,19 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ...@@ -2347,18 +2348,19 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
return count; return count;
} }
static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs, static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
unsigned long num_refs, unsigned long num_refs,
struct list_head *first_seq) struct list_head *first_seq)
{ {
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
pr_debug("waiting for more refs (num %ld, first %p)\n", pr_debug("waiting for more refs (num %ld, first %p)\n",
num_refs, first_seq); num_refs, first_seq);
wait_event(delayed_refs->seq_wait, wait_event(fs_info->tree_mod_seq_wait,
num_refs != delayed_refs->num_entries || num_refs != delayed_refs->num_entries ||
delayed_refs->seq_head.next != first_seq); fs_info->tree_mod_seq_list.next != first_seq);
pr_debug("done waiting for more refs (num %ld, first %p)\n", pr_debug("done waiting for more refs (num %ld, first %p)\n",
delayed_refs->num_entries, delayed_refs->seq_head.next); delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
} }
...@@ -2403,6 +2405,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2403,6 +2405,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
again: again:
consider_waiting = 0; consider_waiting = 0;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
if (count == 0) { if (count == 0) {
count = delayed_refs->num_entries * 2; count = delayed_refs->num_entries * 2;
run_most = 1; run_most = 1;
...@@ -2437,7 +2440,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2437,7 +2440,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
num_refs = delayed_refs->num_entries; num_refs = delayed_refs->num_entries;
first_seq = root->fs_info->tree_mod_seq_list.next; first_seq = root->fs_info->tree_mod_seq_list.next;
} else { } else {
wait_for_more_refs(delayed_refs, wait_for_more_refs(root->fs_info, delayed_refs,
num_refs, first_seq); num_refs, first_seq);
/* /*
* after waiting, things have changed. we * after waiting, things have changed. we
...@@ -5190,8 +5193,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, ...@@ -5190,8 +5193,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root); rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--; delayed_refs->num_entries--;
if (waitqueue_active(&delayed_refs->seq_wait)) if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&root->fs_info->tree_mod_seq_wait);
/* /*
* we don't take a ref on the node because we're removing it from the * we don't take a ref on the node because we're removing it from the
......
...@@ -38,7 +38,6 @@ void put_transaction(struct btrfs_transaction *transaction) ...@@ -38,7 +38,6 @@ void put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) { if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list)); BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node); WARN_ON(transaction->delayed_refs.root.rb_node);
WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
memset(transaction, 0, sizeof(*transaction)); memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction); kmem_cache_free(btrfs_transaction_cachep, transaction);
} }
...@@ -126,7 +125,6 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) ...@@ -126,7 +125,6 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
cur_trans->delayed_refs.num_heads = 0; cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0; cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0; cur_trans->delayed_refs.run_delayed_start = 0;
cur_trans->delayed_refs.seq = 1;
/* /*
* although the tree mod log is per file system and not per transaction, * although the tree mod log is per file system and not per transaction,
...@@ -145,10 +143,8 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) ...@@ -145,10 +143,8 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
} }
atomic_set(&fs_info->tree_mod_seq, 0); atomic_set(&fs_info->tree_mod_seq, 0);
init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
spin_lock_init(&cur_trans->commit_lock); spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock); spin_lock_init(&cur_trans->delayed_refs.lock);
INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
INIT_LIST_HEAD(&cur_trans->pending_snapshots); INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &fs_info->trans_list); list_add_tail(&cur_trans->list, &fs_info->trans_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment