Commit 183bcc89 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Clean up btree write buffer write ref handling

__bch2_btree_write_buffer_flush() now assumes a write ref is already
held (as called by the transaction commit path); and the wrappers
bch2_write_buffer_flush() and flush_sync() take an explicit write ref.

This means internally the write buffer code can always use
BTREE_INSERT_NOCHECK_RW, instead of in the previous code passing flags
around and hoping the NOCHECK_RW flag was always carried around
correctly.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent cf5bacb6
...@@ -672,7 +672,8 @@ struct btree_trans_buf { ...@@ -672,7 +672,8 @@ struct btree_trans_buf {
x(invalidate) \ x(invalidate) \
x(delete_dead_snapshots) \ x(delete_dead_snapshots) \
x(snapshot_delete_pagecache) \ x(snapshot_delete_pagecache) \
x(sysfs) x(sysfs) \
x(btree_write_buffer)
enum bch_write_ref { enum bch_write_ref {
#define x(n) BCH_WRITE_REF_##n, #define x(n) BCH_WRITE_REF_##n,
......
...@@ -961,8 +961,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags, ...@@ -961,8 +961,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
if (wb->state.nr > wb->size * 3 / 4) { if (wb->state.nr > wb->size * 3 / 4) {
bch2_trans_begin(trans); bch2_trans_begin(trans);
ret = __bch2_btree_write_buffer_flush(trans, ret = __bch2_btree_write_buffer_flush(trans, true);
flags|BCH_TRANS_COMMIT_no_check_rw, true);
if (!ret) { if (!ret) {
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_); trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
...@@ -1077,8 +1076,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) ...@@ -1077,8 +1076,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
bch2_trans_begin(trans); bch2_trans_begin(trans);
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
ret = __bch2_btree_write_buffer_flush(trans, ret = __bch2_btree_write_buffer_flush(trans, true);
flags|BCH_TRANS_COMMIT_no_check_rw, true);
if (!ret) { if (!ret) {
trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_); trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
......
...@@ -137,8 +137,7 @@ btree_write_buffered_insert(struct btree_trans *trans, ...@@ -137,8 +137,7 @@ btree_write_buffered_insert(struct btree_trans *trans,
return ret; return ret;
} }
int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags, int __bch2_btree_write_buffer_flush(struct btree_trans *trans, bool locked)
bool locked)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct journal *j = &c->journal; struct journal *j = &c->journal;
...@@ -210,8 +209,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f ...@@ -210,8 +209,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
iter.path->preserve = false; iter.path->preserve = false;
do { do {
ret = bch2_btree_write_buffer_flush_one(trans, &iter, i, ret = bch2_btree_write_buffer_flush_one(trans, &iter, i, 0,
commit_flags, &write_locked, &fast); &write_locked, &fast);
if (!write_locked) if (!write_locked)
bch2_trans_begin(trans); bch2_trans_begin(trans);
} while (bch2_err_matches(ret, BCH_ERR_transaction_restart)); } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
...@@ -252,9 +251,6 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f ...@@ -252,9 +251,6 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
btree_write_buffered_journal_cmp, btree_write_buffered_journal_cmp,
NULL); NULL);
commit_flags &= ~BCH_WATERMARK_MASK;
commit_flags |= BCH_WATERMARK_reclaim;
for (i = keys; i < keys + nr; i++) { for (i = keys; i < keys + nr; i++) {
if (!i->journal_seq) if (!i->journal_seq)
continue; continue;
...@@ -263,7 +259,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f ...@@ -263,7 +259,8 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
bch2_btree_write_buffer_journal_flush); bch2_btree_write_buffer_journal_flush);
ret = commit_do(trans, NULL, NULL, ret = commit_do(trans, NULL, NULL,
commit_flags| BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc| BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_journal_res| BCH_TRANS_COMMIT_no_journal_res|
BCH_TRANS_COMMIT_journal_reclaim, BCH_TRANS_COMMIT_journal_reclaim,
...@@ -279,16 +276,33 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) ...@@ -279,16 +276,33 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
return -BCH_ERR_erofs_no_writes;
trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_); trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
mutex_lock(&c->btree_write_buffer.flush_lock); mutex_lock(&trans->c->btree_write_buffer.flush_lock);
return __bch2_btree_write_buffer_flush(trans, 0, true); int ret = __bch2_btree_write_buffer_flush(trans, true);
bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
return ret;
}
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
{
return __bch2_btree_write_buffer_flush(trans, false);
} }
int bch2_btree_write_buffer_flush(struct btree_trans *trans) int bch2_btree_write_buffer_flush(struct btree_trans *trans)
{ {
return __bch2_btree_write_buffer_flush(trans, 0, false); struct bch_fs *c = trans->c;
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
return -BCH_ERR_erofs_no_writes;
int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
return ret;
} }
static int bch2_btree_write_buffer_journal_flush(struct journal *j, static int bch2_btree_write_buffer_journal_flush(struct journal *j,
...@@ -300,7 +314,7 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, ...@@ -300,7 +314,7 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j,
mutex_lock(&wb->flush_lock); mutex_lock(&wb->flush_lock);
return bch2_trans_run(c, return bch2_trans_run(c,
__bch2_btree_write_buffer_flush(trans, BCH_TRANS_COMMIT_no_check_rw, true)); __bch2_btree_write_buffer_flush(trans, true));
} }
static inline u64 btree_write_buffer_ref(int idx) static inline u64 btree_write_buffer_ref(int idx)
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H #ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
#define _BCACHEFS_BTREE_WRITE_BUFFER_H #define _BCACHEFS_BTREE_WRITE_BUFFER_H
int __bch2_btree_write_buffer_flush(struct btree_trans *, unsigned, bool); int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
int __bch2_btree_write_buffer_flush(struct btree_trans *, bool);
int bch2_btree_write_buffer_flush_sync(struct btree_trans *); int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
int bch2_btree_write_buffer_flush(struct btree_trans *); int bch2_btree_write_buffer_flush(struct btree_trans *);
......
...@@ -1005,7 +1005,7 @@ static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) ...@@ -1005,7 +1005,7 @@ static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
unsigned i, nr_data = v->nr_blocks - v->nr_redundant; unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
int ret = 0; int ret = 0;
ret = bch2_btree_write_buffer_flush(trans); ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
if (ret) if (ret)
goto err; goto err;
......
...@@ -1162,10 +1162,6 @@ int bch2_delete_dead_inodes(struct bch_fs *c) ...@@ -1162,10 +1162,6 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
again: again:
need_another_pass = false; need_another_pass = false;
ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
goto err;
/* /*
* Weird transaction restart handling here because on successful delete, * Weird transaction restart handling here because on successful delete,
* bch2_inode_rm_snapshot() will return a nested transaction restart, * bch2_inode_rm_snapshot() will return a nested transaction restart,
...@@ -1196,8 +1192,12 @@ int bch2_delete_dead_inodes(struct bch_fs *c) ...@@ -1196,8 +1192,12 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
} }
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
if (!ret && need_another_pass) if (!ret && need_another_pass) {
ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
goto err;
goto again; goto again;
}
err: err:
bch2_trans_put(trans); bch2_trans_put(trans);
......
...@@ -154,6 +154,9 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, ...@@ -154,6 +154,9 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
move_buckets_wait(ctxt, buckets_in_flight, false); move_buckets_wait(ctxt, buckets_in_flight, false);
ret = bch2_btree_write_buffer_flush(trans); ret = bch2_btree_write_buffer_flush(trans);
if (bch2_err_matches(ret, EROFS))
return ret;
if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()", if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
__func__, bch2_err_str(ret))) __func__, bch2_err_str(ret)))
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment