Commit 82732ef5 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Improve btree_node_write_if_need()

btree_node_write_if_need() kicks off a btree node write only if
need_write is set; this makes the locking easier to reason about by
moving the check into the cmpxchg loop in __bch2_btree_node_write().
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 39dcace8
...@@ -239,9 +239,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) ...@@ -239,9 +239,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
* the post write cleanup: * the post write cleanup:
*/ */
if (bch2_verify_btree_ondisk) if (bch2_verify_btree_ondisk)
bch2_btree_node_write(c, b, SIX_LOCK_intent); bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
else else
__bch2_btree_node_write(c, b, false); __bch2_btree_node_write(c, b, 0);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
...@@ -1064,7 +1064,7 @@ void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k) ...@@ -1064,7 +1064,7 @@ void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
six_lock_write(&b->c.lock, NULL, NULL); six_lock_write(&b->c.lock, NULL, NULL);
if (btree_node_dirty(b)) { if (btree_node_dirty(b)) {
__bch2_btree_node_write(c, b, false); __bch2_btree_node_write(c, b, 0);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
goto wait_on_io; goto wait_on_io;
......
...@@ -471,7 +471,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) ...@@ -471,7 +471,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
}; };
if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) { if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
bch2_btree_node_write(c, b, SIX_LOCK_write); bch2_btree_node_write(c, b, SIX_LOCK_write, 0);
reinit_iter = true; reinit_iter = true;
} }
} }
...@@ -1620,7 +1620,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b) ...@@ -1620,7 +1620,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
} while ((v = cmpxchg(&b->flags, old, new)) != old); } while ((v = cmpxchg(&b->flags, old, new)) != old);
if (new & (1U << BTREE_NODE_write_in_flight)) if (new & (1U << BTREE_NODE_write_in_flight))
__bch2_btree_node_write(c, b, true); __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED);
} }
static void btree_node_write_done(struct bch_fs *c, struct btree *b) static void btree_node_write_done(struct bch_fs *c, struct btree *b)
...@@ -1741,7 +1741,7 @@ static void btree_write_submit(struct work_struct *work) ...@@ -1741,7 +1741,7 @@ static void btree_write_submit(struct work_struct *work)
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k); bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
} }
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started) void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{ {
struct btree_write_bio *wbio; struct btree_write_bio *wbio;
struct bset_tree *t; struct bset_tree *t;
...@@ -1758,7 +1758,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta ...@@ -1758,7 +1758,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
void *data; void *data;
int ret; int ret;
if (already_started) if (flags & BTREE_WRITE_ALREADY_STARTED)
goto do_write; goto do_write;
/* /*
...@@ -1774,13 +1774,18 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta ...@@ -1774,13 +1774,18 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
if (!(old & (1 << BTREE_NODE_dirty))) if (!(old & (1 << BTREE_NODE_dirty)))
return; return;
if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
!(old & (1 << BTREE_NODE_need_write)))
return;
if (!btree_node_may_write(b)) if (!btree_node_may_write(b))
return; return;
if (old & (1 << BTREE_NODE_never_write)) if (old & (1 << BTREE_NODE_never_write))
return; return;
BUG_ON(old & (1 << BTREE_NODE_write_in_flight)); if (old & (1 << BTREE_NODE_write_in_flight))
return;
new &= ~(1 << BTREE_NODE_dirty); new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write); new &= ~(1 << BTREE_NODE_need_write);
...@@ -2044,12 +2049,13 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) ...@@ -2044,12 +2049,13 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
* Use this one if the node is intent locked: * Use this one if the node is intent locked:
*/ */
void bch2_btree_node_write(struct bch_fs *c, struct btree *b, void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
enum six_lock_type lock_type_held) enum six_lock_type lock_type_held,
unsigned flags)
{ {
if (lock_type_held == SIX_LOCK_intent || if (lock_type_held == SIX_LOCK_intent ||
(lock_type_held == SIX_LOCK_read && (lock_type_held == SIX_LOCK_read &&
six_lock_tryupgrade(&b->c.lock))) { six_lock_tryupgrade(&b->c.lock))) {
__bch2_btree_node_write(c, b, false); __bch2_btree_node_write(c, b, flags);
/* don't cycle lock unnecessarily: */ /* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) && if (btree_node_just_written(b) &&
...@@ -2061,7 +2067,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, ...@@ -2061,7 +2067,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
if (lock_type_held == SIX_LOCK_read) if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock); six_lock_downgrade(&b->c.lock);
} else { } else {
__bch2_btree_node_write(c, b, false); __bch2_btree_node_write(c, b, flags);
if (lock_type_held == SIX_LOCK_write && if (lock_type_held == SIX_LOCK_write &&
btree_node_just_written(b)) btree_node_just_written(b))
bch2_btree_post_write_cleanup(c, b); bch2_btree_post_write_cleanup(c, b);
......
...@@ -143,20 +143,19 @@ int bch2_btree_root_read(struct bch_fs *, enum btree_id, ...@@ -143,20 +143,19 @@ int bch2_btree_root_read(struct bch_fs *, enum btree_id,
void bch2_btree_complete_write(struct bch_fs *, struct btree *, void bch2_btree_complete_write(struct bch_fs *, struct btree *,
struct btree_write *); struct btree_write *);
void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool);
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *); bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
#define BTREE_WRITE_ONLY_IF_NEED (1U << 0)
#define BTREE_WRITE_ALREADY_STARTED (1U << 1)
void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
void bch2_btree_node_write(struct bch_fs *, struct btree *, void bch2_btree_node_write(struct bch_fs *, struct btree *,
enum six_lock_type); enum six_lock_type, unsigned);
static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b, static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
enum six_lock_type lock_held) enum six_lock_type lock_held)
{ {
if (b->written && bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
btree_node_need_write(b) &&
btree_node_may_write(b) &&
!btree_node_write_in_flight(b))
bch2_btree_node_write(c, b, lock_held);
} }
#define bch2_btree_node_write_cond(_c, _b, cond) \ #define bch2_btree_node_write_cond(_c, _b, cond) \
......
...@@ -1393,8 +1393,8 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1393,8 +1393,8 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
six_unlock_write(&n2->c.lock); six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock); six_unlock_write(&n1->c.lock);
bch2_btree_node_write(c, n1, SIX_LOCK_intent); bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
bch2_btree_node_write(c, n2, SIX_LOCK_intent); bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
/* /*
* Note that on recursive parent_keys == keys, so we * Note that on recursive parent_keys == keys, so we
...@@ -1413,7 +1413,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1413,7 +1413,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
bch2_btree_node_write(c, n3, SIX_LOCK_intent); bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
} }
} else { } else {
trace_btree_compact(c, b); trace_btree_compact(c, b);
...@@ -1421,7 +1421,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1421,7 +1421,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_build_aux_trees(n1); bch2_btree_build_aux_trees(n1);
six_unlock_write(&n1->c.lock); six_unlock_write(&n1->c.lock);
bch2_btree_node_write(c, n1, SIX_LOCK_intent); bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
if (parent) if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key); bch2_keylist_add(&as->parent_keys, &n1->key);
...@@ -1709,7 +1709,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1709,7 +1709,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_build_aux_trees(n); bch2_btree_build_aux_trees(n);
six_unlock_write(&n->c.lock); six_unlock_write(&n->c.lock);
bch2_btree_node_write(c, n, SIX_LOCK_intent); bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bkey_init(&delete.k); bkey_init(&delete.k);
delete.k.p = prev->key.k.p; delete.k.p = prev->key.k.p;
...@@ -1783,7 +1783,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, ...@@ -1783,7 +1783,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
trace_btree_gc_rewrite_node(c, b); trace_btree_gc_rewrite_node(c, b);
bch2_btree_node_write(c, n, SIX_LOCK_intent); bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
if (parent) { if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key); bch2_keylist_add(&as->parent_keys, &n->key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment