Commit 87ced107 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Convert EAGAIN errors to private error codes

More error code cleanup, for better error messages and debugability.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 858536c7
...@@ -1219,7 +1219,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, ...@@ -1219,7 +1219,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) || if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty)) bch2_err_matches(ret, BCH_ERR_freelist_empty))
return cl return cl
? -EAGAIN ? -BCH_ERR_bucket_alloc_blocked
: -BCH_ERR_ENOSPC_bucket_alloc; : -BCH_ERR_ENOSPC_bucket_alloc;
return ret; return ret;
......
...@@ -531,7 +531,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) ...@@ -531,7 +531,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
} }
trace_and_count(c, btree_cache_cannibalize_lock_fail, c); trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
return -EAGAIN; return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
success: success:
trace_and_count(c, btree_cache_cannibalize_lock, c); trace_and_count(c, btree_cache_cannibalize_lock, c);
...@@ -906,8 +906,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr ...@@ -906,8 +906,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
* bch_btree_node_get - find a btree node in the cache and lock it, reading it * bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary. * in from disk if necessary.
* *
* If IO is necessary and running under generic_make_request, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on * The btree node will have either a read or a write lock held, depending on
* the @write parameter. * the @write parameter.
*/ */
......
...@@ -1163,7 +1163,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1163,7 +1163,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
closure_sync(&cl); closure_sync(&cl);
} while (ret == -EAGAIN); } while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
} }
if (ret) { if (ret) {
......
...@@ -332,15 +332,10 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s, ...@@ -332,15 +332,10 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans, static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c; return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
int ret;
ret = bch2_journal_res_get(&c->journal, &trans->journal_res,
trans->journal_u64s, trans->journal_u64s,
flags| flags|
(trans->flags & JOURNAL_WATERMARK_MASK)); (trans->flags & JOURNAL_WATERMARK_MASK));
return ret == -EAGAIN ? -BCH_ERR_btree_insert_need_journal_res : ret;
} }
#define JSET_ENTRY_LOG_U64s 4 #define JSET_ENTRY_LOG_U64s 4
...@@ -864,7 +859,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -864,7 +859,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
&trans->journal_preres, trans->journal_preres_u64s, &trans->journal_preres, trans->journal_preres_u64s,
JOURNAL_RES_GET_NONBLOCK| JOURNAL_RES_GET_NONBLOCK|
(trans->flags & JOURNAL_WATERMARK_MASK)); (trans->flags & JOURNAL_WATERMARK_MASK));
if (unlikely(ret == -EAGAIN)) if (unlikely(ret == -BCH_ERR_journal_preres_get_blocked))
ret = bch2_trans_journal_preres_get_cold(trans, ret = bch2_trans_journal_preres_get_cold(trans,
trans->journal_preres_u64s, trace_ip); trans->journal_preres_u64s, trace_ip);
if (unlikely(ret)) if (unlikely(ret))
...@@ -936,7 +931,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -936,7 +931,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
if (ret) if (ret)
trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip); trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip);
break; break;
case -BCH_ERR_btree_insert_need_journal_res: case -BCH_ERR_journal_res_get_blocked:
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) && if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
......
...@@ -84,6 +84,11 @@ ...@@ -84,6 +84,11 @@
x(EROFS, erofs_journal_err) \ x(EROFS, erofs_journal_err) \
x(EROFS, erofs_sb_err) \ x(EROFS, erofs_sb_err) \
x(EROFS, insufficient_devices) \ x(EROFS, insufficient_devices) \
x(0, operation_blocked) \
x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \
x(BCH_ERR_operation_blocked, journal_res_get_blocked) \
x(BCH_ERR_operation_blocked, journal_preres_get_blocked) \
x(BCH_ERR_operation_blocked, bucket_alloc_blocked) \
x(BCH_ERR_invalid, invalid_sb) \ x(BCH_ERR_invalid, invalid_sb) \
x(BCH_ERR_invalid_sb, invalid_sb_magic) \ x(BCH_ERR_invalid_sb, invalid_sb_magic) \
x(BCH_ERR_invalid_sb, invalid_sb_version) \ x(BCH_ERR_invalid_sb, invalid_sb_version) \
......
...@@ -1264,7 +1264,7 @@ static void __bch2_write(struct bch_write_op *op) ...@@ -1264,7 +1264,7 @@ static void __bch2_write(struct bch_write_op *op)
BCH_WRITE_ONLY_SPECIFIED_DEVS)) BCH_WRITE_ONLY_SPECIFIED_DEVS))
? NULL : &op->cl, &wp)); ? NULL : &op->cl, &wp));
if (unlikely(ret)) { if (unlikely(ret)) {
if (ret == -EAGAIN) if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
break; break;
goto err; goto err;
......
...@@ -198,12 +198,6 @@ static bool journal_entry_close(struct journal *j) ...@@ -198,12 +198,6 @@ static bool journal_entry_close(struct journal *j)
/* /*
* should _only_ called from journal_res_get() - when we actually want a * should _only_ called from journal_res_get() - when we actually want a
* journal reservation - journal entry is open means journal is dirty: * journal reservation - journal entry is open means journal is dirty:
*
* returns:
* 0: success
* -ENOSPC: journal currently full, must invoke reclaim
* -EAGAIN: journal blocked, must wait
* -EROFS: insufficient rw devices or journal error
*/ */
static int journal_entry_open(struct journal *j) static int journal_entry_open(struct journal *j)
{ {
...@@ -455,7 +449,9 @@ static int __journal_res_get(struct journal *j, struct journal_res *res, ...@@ -455,7 +449,9 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
} }
} }
return ret == JOURNAL_ERR_insufficient_devices ? -EROFS : -EAGAIN; return ret == JOURNAL_ERR_insufficient_devices
? -EROFS
: -BCH_ERR_journal_res_get_blocked;
} }
/* /*
...@@ -474,7 +470,8 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, ...@@ -474,7 +470,8 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
int ret; int ret;
closure_wait_event(&j->async_wait, closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -EAGAIN || (ret = __journal_res_get(j, res, flags)) !=
-BCH_ERR_journal_res_get_blocked||
(flags & JOURNAL_RES_GET_NONBLOCK)); (flags & JOURNAL_RES_GET_NONBLOCK));
return ret; return ret;
} }
...@@ -792,12 +789,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -792,12 +789,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
} else { } else {
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none,
false, cl); false, cl);
if (IS_ERR(ob[nr_got])) { ret = PTR_ERR_OR_ZERO(ob[nr_got]);
ret = cl if (ret)
? -EAGAIN
: -BCH_ERR_ENOSPC_bucket_alloc;
break; break;
}
bu[nr_got] = ob[nr_got]->bucket; bu[nr_got] = ob[nr_got]->bucket;
} }
...@@ -907,7 +901,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, ...@@ -907,7 +901,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
closure_init_stack(&cl); closure_init_stack(&cl);
while (ja->nr != nr && (ret == 0 || ret == -EAGAIN)) { while (ja->nr != nr && (ret == 0 || ret == -BCH_ERR_bucket_alloc_blocked)) {
struct disk_reservation disk_res = { 0, 0 }; struct disk_reservation disk_res = { 0, 0 };
closure_sync(&cl); closure_sync(&cl);
......
...@@ -460,7 +460,7 @@ static inline int bch2_journal_preres_get(struct journal *j, ...@@ -460,7 +460,7 @@ static inline int bch2_journal_preres_get(struct journal *j,
return 0; return 0;
if (flags & JOURNAL_RES_GET_NONBLOCK) if (flags & JOURNAL_RES_GET_NONBLOCK)
return -EAGAIN; return -BCH_ERR_journal_preres_get_blocked;
return __bch2_journal_preres_get(j, res, new_u64s, flags); return __bch2_journal_preres_get(j, res, new_u64s, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment