Commit 2158fe46 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bch2_trans_inconsistent()

Add a new error macro that also dumps transaction updates in addition to
doing an emergency shutdown - when a transaction update discovers or is
causing a fs inconsistency, it's helpful to see what updates it was
doing.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 0576ba9a
......@@ -1794,19 +1794,44 @@ void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool inte
}
noinline __cold
void bch2_dump_trans_paths_updates(struct btree_trans *trans)
void bch2_dump_trans_updates(struct btree_trans *trans)
{
struct btree_path *path;
struct btree_insert_entry *i;
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch_err(trans->c, "transaction updates:");
trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
printbuf_reset(&buf1);
printbuf_reset(&buf2);
bch2_bkey_val_to_text(&buf1, trans->c, old);
bch2_bkey_val_to_text(&buf2, trans->c, bkey_i_to_s_c(i->k));
printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
bch2_btree_ids[i->btree_id],
(void *) i->ip_allocated,
buf1.buf, buf2.buf);
}
printbuf_exit(&buf2);
printbuf_exit(&buf1);
}
noinline __cold
void bch2_dump_trans_paths_updates(struct btree_trans *trans)
{
struct btree_path *path;
struct printbuf buf = PRINTBUF;
unsigned idx;
btree_trans_sort_paths(trans);
trans_for_each_path_inorder(trans, path, idx) {
printbuf_reset(&buf1);
printbuf_reset(&buf);
bch2_bpos_to_text(&buf1, path->pos);
bch2_bpos_to_text(&buf, path->pos);
printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
path->idx, path->ref, path->intent_ref,
......@@ -1814,7 +1839,7 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
path->preserve ? " P" : "",
bch2_btree_ids[path->btree_id],
path->level,
buf1.buf,
buf.buf,
path->nodes_locked,
#ifdef CONFIG_BCACHEFS_DEBUG
(void *) path->ip_allocated
......@@ -1824,23 +1849,9 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
);
}
trans_for_each_update(trans, i) {
struct bkey u;
struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
printbuf_exit(&buf);
printbuf_reset(&buf1);
printbuf_reset(&buf2);
bch2_bkey_val_to_text(&buf1, trans->c, old);
bch2_bkey_val_to_text(&buf2, trans->c, bkey_i_to_s_c(i->k));
printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
bch2_btree_ids[i->btree_id],
(void *) i->ip_allocated,
buf1.buf, buf2.buf);
}
printbuf_exit(&buf2);
printbuf_exit(&buf1);
bch2_dump_trans_updates(trans);
}
static struct btree_path *btree_path_alloc(struct btree_trans *trans,
......
......@@ -425,6 +425,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
/* new multiple iterator interface: */
void bch2_dump_trans_updates(struct btree_trans *);
void bch2_dump_trans_paths_updates(struct btree_trans *);
void __bch2_trans_init(struct btree_trans *, struct bch_fs *,
unsigned, size_t, const char *);
......
......@@ -510,11 +510,16 @@ static int bch2_mark_alloc(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(new);
struct bch_dev *ca;
struct bch_dev *ca = bch_dev_bkey_exists(c, new_u.dev);
struct bucket *g;
struct bucket_mark old_m, m;
int ret = 0;
if (bch2_trans_inconsistent_on(new_u.bucket < ca->mi.first_bucket ||
new_u.bucket >= ca->mi.nbuckets, trans,
"alloc key outside range of device's buckets"))
return -EIO;
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
......@@ -554,11 +559,6 @@ static int bch2_mark_alloc(struct btree_trans *trans,
}
}
ca = bch_dev_bkey_exists(c, new_u.dev);
if (new_u.bucket >= ca->mi.nbuckets)
return 0;
percpu_down_read(&c->mark_lock);
if (!gc && new_u.gen != old_u.gen)
*bucket_gen(ca, new_u.bucket) = new_u.gen;
......@@ -1466,7 +1466,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_stripe *s;
......@@ -1482,16 +1481,15 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
goto err;
if (k.k->type != KEY_TYPE_stripe) {
bch2_fs_inconsistent(c,
bch2_trans_inconsistent(trans,
"pointer to nonexistent stripe %llu",
(u64) p.ec.idx);
bch2_inconsistent_error(c);
ret = -EIO;
goto err;
}
if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
bch2_fs_inconsistent(c,
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
ret = -EIO;
......@@ -1605,8 +1603,8 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
goto err;
if (!deleting) {
if (bch2_fs_inconsistent_on(u.stripe ||
u.stripe_redundancy, c,
if (bch2_trans_inconsistent_on(u.stripe ||
u.stripe_redundancy, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
iter.pos.inode, iter.pos.offset, u.gen,
bch2_data_types[u.data_type],
......@@ -1616,7 +1614,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
goto err;
}
if (bch2_fs_inconsistent_on(data_type && u.dirty_sectors, c,
if (bch2_trans_inconsistent_on(data_type && u.dirty_sectors, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
iter.pos.inode, iter.pos.offset, u.gen,
bch2_data_types[u.data_type],
......@@ -1629,8 +1627,8 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
u.stripe = s.k->p.offset;
u.stripe_redundancy = s.v->nr_redundant;
} else {
if (bch2_fs_inconsistent_on(u.stripe != s.k->p.offset ||
u.stripe_redundancy != s.v->nr_redundant, c,
if (bch2_trans_inconsistent_on(u.stripe != s.k->p.offset ||
u.stripe_redundancy != s.v->nr_redundant, trans,
"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
iter.pos.inode, iter.pos.offset, u.gen,
s.k->p.offset, u.stripe)) {
......@@ -1791,7 +1789,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
refcount = bkey_refcount(n);
if (!refcount) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c,
bch2_trans_inconsistent(trans,
"nonexistent indirect extent at %llu while marking\n %s",
*idx, buf.buf);
ret = -EIO;
......@@ -1800,7 +1798,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c,
bch2_trans_inconsistent(trans,
"indirect extent refcount underflow at %llu while marking\n %s",
*idx, buf.buf);
ret = -EIO;
......
......@@ -66,6 +66,26 @@ do { \
_ret; \
})
/*
* When a transaction update discovers or is causing a fs inconsistency, it's
* helpful to also dump the pending updates:
*/
#define bch2_trans_inconsistent(trans, ...) \
({ \
bch_err(trans->c, __VA_ARGS__); \
bch2_inconsistent_error(trans->c); \
bch2_dump_trans_updates(trans); \
})
#define bch2_trans_inconsistent_on(cond, trans, ...) \
({ \
bool _ret = unlikely(!!(cond)); \
\
if (_ret) \
bch2_trans_inconsistent(trans, __VA_ARGS__); \
_ret; \
})
/*
* Fsck errors: inconsistency errors we detect at mount time, and should ideally
* be able to repair:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment