Commit 1bb3c2a9 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: New error message helpers

Add two new helpers for printing error messages with __func__ and
bch2_err_str():
 - bch_err_fn
 - bch_err_msg

Also kill the old error strings in the recovery path, which were causing
us to incorrectly report memory allocation failures - they're not needed
anymore.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent a83e108f
......@@ -577,7 +577,7 @@ int bch2_alloc_read(struct bch_fs *c)
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -684,8 +684,7 @@ int bch2_bucket_gens_init(struct bch_fs *c)
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -730,7 +729,7 @@ int bch2_bucket_gens_read(struct bch_fs *c)
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1521,7 +1520,9 @@ int bch2_check_alloc_info(struct bch_fs *c)
bch2_check_bucket_gens_key(&trans, &iter, k));
err:
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;
if (ret)
bch_err_fn(c, ret);
return ret;
}
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
......@@ -1599,20 +1600,18 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
bch2_check_alloc_to_lru_ref(&trans, &iter));
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;
ret = bch2_trans_run(c,
for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
bch2_check_alloc_to_lru_ref(&trans, &iter)));
if (ret)
bch_err_fn(c, ret);
return ret;
}
static int bch2_discard_one_bucket(struct btree_trans *trans,
......@@ -2024,6 +2023,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
ret = bch2_dev_freespace_init(c, ca, &last_updated);
if (ret) {
percpu_ref_put(&ca->ref);
bch_err_fn(c, ret);
return ret;
}
}
......@@ -2032,11 +2032,10 @@ int bch2_fs_freespace_init(struct bch_fs *c)
mutex_lock(&c->sb_lock);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
bch_verbose(c, "done initializing freespace");
}
return ret;
return 0;
}
/* Bucket IO clocks: */
......
......@@ -404,12 +404,16 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
return bch2_trans_run(c,
ret = bch2_trans_run(c,
for_each_btree_key_commit(&trans, iter,
BTREE_ID_backpointers, POS_MIN, 0, k,
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
bch2_check_btree_backpointer(&trans, &iter, k)));
if (ret)
bch_err_fn(c, ret);
return ret;
}
struct bpos_level {
......@@ -769,6 +773,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
}
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......@@ -872,5 +878,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
}
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......@@ -291,6 +291,11 @@ do { \
#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
#define bch_err_fn(_c, _ret) \
bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret))
#define bch_err_msg(_c, _ret, _msg) \
bch_err(_c, "%s(): error " _msg " %s", __func__, bch2_err_str(_ret))
#define bch_verbose(c, fmt, ...) \
do { \
if ((c)->opts.verbose) \
......
......@@ -404,8 +404,7 @@ static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct
}
if (ret) {
bch_err(c, "%s: error getting btree node: %s",
__func__, bch2_err_str(ret));
bch_err_msg(c, ret, "getting btree node");
break;
}
......@@ -473,8 +472,7 @@ static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct
ret = PTR_ERR_OR_ZERO(cur);
if (ret) {
bch_err(c, "%s: error getting btree node: %s",
__func__, bch2_err_str(ret));
bch_err_msg(c, ret, "getting btree node");
goto err;
}
......@@ -687,7 +685,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
bch_err(c, "%s: error allocating new key", __func__);
bch_err_msg(c, ret, "allocating new key");
ret = -BCH_ERR_ENOMEM_gc_repair_key;
goto err;
}
......@@ -814,7 +812,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
fsck_err:
err:
if (ret)
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -919,11 +917,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true);
if (ret) {
bch_err(c, "%s: error from bch2_gc_mark_key: %s",
__func__, bch2_err_str(ret));
if (ret)
goto fsck_err;
}
if (b->c.level) {
bch2_bkey_buf_reassemble(&cur, c, k);
......@@ -981,8 +976,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
continue;
}
} else if (ret) {
bch_err(c, "%s: error getting btree node: %s",
__func__, bch2_err_str(ret));
bch_err_msg(c, ret, "getting btree node");
break;
}
......@@ -1049,7 +1043,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
six_unlock_read(&b->c.lock);
if (ret < 0)
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
printbuf_exit(&buf);
return ret;
}
......@@ -1079,7 +1073,7 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
: bch2_gc_btree(&trans, ids[i], initial, metadata_only);
if (ret < 0)
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
bch2_trans_exit(&trans);
return ret;
......@@ -1277,7 +1271,7 @@ static int bch2_gc_done(struct bch_fs *c,
if (ca)
percpu_ref_put(&ca->ref);
if (ret)
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
......@@ -1883,6 +1877,9 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
* allocator thread - issue wakeup in case they blocked on gc_lock:
*/
closure_wake_up(&c->freelist_wait);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......
......@@ -1988,7 +1988,10 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
{
return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
if (ret)
bch_err_fn(c, ret);
return ret;
}
/* Disk reservations: */
......
......@@ -798,7 +798,7 @@ static void ec_stripe_delete_work(struct work_struct *work)
ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
ec_stripe_delete(&trans, idx));
if (ret) {
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
break;
}
}
......@@ -1845,7 +1845,7 @@ int bch2_stripes_read(struct bch_fs *c)
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "error reading stripes: %i", ret);
bch_err_fn(c, ret);
return ret;
}
......
......@@ -303,7 +303,7 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_exit(trans, &iter);
err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -983,7 +983,7 @@ static int check_inode(struct btree_trans *trans,
err:
fsck_err:
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1009,7 +1009,7 @@ static int check_inodes(struct bch_fs *c, bool full)
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1129,7 +1129,7 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
}
fsck_err:
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
......@@ -1353,7 +1353,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1395,7 +1395,7 @@ static int check_extents(struct bch_fs *c)
snapshots_seen_exit(&s);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1434,7 +1434,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
}
fsck_err:
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
......@@ -1555,7 +1555,7 @@ static int check_dirent_target(struct btree_trans *trans,
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1725,7 +1725,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1764,7 +1764,7 @@ static int check_dirents(struct bch_fs *c)
inode_walker_exit(&target);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1801,7 +1801,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
fsck_err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1833,7 +1833,7 @@ static int check_xattrs(struct bch_fs *c)
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1896,12 +1896,18 @@ static int check_root_trans(struct btree_trans *trans)
noinline_for_stack
static int check_root(struct bch_fs *c)
{
int ret;
bch_verbose(c, "checking root directory");
return bch2_trans_do(c, NULL, NULL,
ret = bch2_trans_do(c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
check_root_trans(&trans));
if (ret)
bch_err_fn(c, ret);
return ret;
}
struct pathbuf_entry {
......@@ -2038,7 +2044,7 @@ static int check_path(struct btree_trans *trans,
}
fsck_err:
if (ret)
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -2081,10 +2087,11 @@ static int check_directory_structure(struct bch_fs *c)
break;
}
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
darray_exit(&path);
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......@@ -2364,6 +2371,8 @@ static int check_nlinks(struct bch_fs *c)
kvfree(links.d);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......@@ -2397,7 +2406,6 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
noinline_for_stack
static int fix_reflink_p(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
......@@ -2407,15 +2415,16 @@ static int fix_reflink_p(struct bch_fs *c)
bch_verbose(c, "fixing reflink_p keys");
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_extents, POS_MIN,
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
fix_reflink_p_key(&trans, &iter, k));
ret = bch2_trans_run(c,
for_each_btree_key_commit(&trans, iter,
BTREE_ID_extents, POS_MIN,
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
fix_reflink_p_key(&trans, &iter, k)));
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......
......@@ -978,7 +978,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
}
if (ret)
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
unlock:
up_write(&c->state_lock);
return ret;
......@@ -987,9 +987,12 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
int bch2_dev_journal_alloc(struct bch_dev *ca)
{
unsigned nr;
int ret;
if (dynamic_fault("bcachefs:add:journal_alloc"))
return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
if (dynamic_fault("bcachefs:add:journal_alloc")) {
ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
goto err;
}
/* 1/128th of the device by default: */
nr = ca->mi.nbuckets >> 7;
......@@ -1003,7 +1006,11 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
err:
if (ret)
bch_err_fn(ca, ret);
return ret;
}
/* startup/shutdown: */
......
......@@ -160,20 +160,18 @@ static int bch2_check_lru_key(struct btree_trans *trans,
int bch2_check_lrus(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bpos last_flushed_pos = POS_MIN;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos));
bch2_trans_exit(&trans);
ret = bch2_trans_run(c,
for_each_btree_key_commit(&trans, iter,
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos)));
if (ret)
bch_err_fn(c, ret);
return ret;
}
......@@ -690,7 +690,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
bch2_trans_iter_exit(trans, &iter);
if (ret) {
bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "looking up alloc key");
goto err;
}
......@@ -701,7 +701,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
ret = bch2_btree_write_buffer_flush(trans);
if (ret) {
bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "flushing btree write buffer");
goto err;
}
......@@ -904,7 +904,7 @@ static int bch2_move_btree(struct bch_fs *c,
bch2_trans_exit(&trans);
if (ret)
bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
bch2_btree_interior_updates_flush(c);
......@@ -1029,6 +1029,8 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
mutex_unlock(&c->sb_lock);
}
if (ret)
bch_err_fn(c, ret);
return ret;
}
......
......@@ -621,10 +621,11 @@ int bch2_fs_quota_read(struct bch_fs *c)
for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
bch2_fs_quota_read_inode(&trans, &iter, k));
if (ret)
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......
This diff is collapsed.
......@@ -623,7 +623,7 @@ int bch2_fs_check_snapshots(struct bch_fs *c)
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_snapshot(&trans, &iter, k)));
if (ret)
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -702,8 +702,7 @@ int bch2_fs_check_subvols(struct bch_fs *c)
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_subvol(&trans, &iter, k)));
if (ret)
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -724,7 +723,7 @@ int bch2_fs_snapshots_start(struct bch_fs *c)
bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
bch2_snapshot_set_equiv(&trans, k)));
if (ret)
bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -1123,6 +1122,8 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
err:
darray_exit(&deleted);
bch2_trans_exit(&trans);
if (ret)
bch_err_fn(c, ret);
return ret;
}
......
......@@ -47,7 +47,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "update error");
goto err;
}
......@@ -56,7 +56,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "delete error (first)");
goto err;
}
......@@ -65,7 +65,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "delete error (second)");
goto err;
}
err:
......@@ -93,7 +93,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "update error");
goto err;
}
......@@ -104,7 +104,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "delete error");
goto err;
}
err:
......@@ -137,7 +137,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "insert error");
goto err;
}
}
......@@ -153,7 +153,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
......@@ -168,7 +168,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating backwards");
goto err;
}
......@@ -204,7 +204,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "insert error");
goto err;
}
}
......@@ -221,7 +221,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
......@@ -237,7 +237,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating backwards");
goto err;
}
......@@ -272,7 +272,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "insert error");
goto err;
}
}
......@@ -289,7 +289,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
......@@ -312,7 +312,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
0;
}));
if (ret < 0) {
bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards by slots");
goto err;
}
ret = 0;
......@@ -346,7 +346,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "insert error");
goto err;
}
}
......@@ -364,7 +364,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards");
goto err;
}
......@@ -387,7 +387,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
0;
}));
if (ret) {
bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "error iterating forwards by slots");
goto err;
}
ret = 0;
......@@ -461,7 +461,7 @@ static int insert_test_extent(struct bch_fs *c,
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret)
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
bch_err_fn(c, ret);
return ret;
}
......@@ -560,7 +560,7 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
ret = test_snapshot_filter(c, snapids[0], snapids[1]);
if (ret) {
bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
bch_err_msg(c, ret, "from test_snapshot_filter");
return ret;
}
......@@ -674,7 +674,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
bch_err_msg(trans->c, ret, "lookup error");
if (ret)
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment