Commit fa8e94fa authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Heap allocate printbufs

This patch changes printbufs dynamically allocate and reallocate a
buffer as needed. Stack usage has become a bit of a problem, and a major
cause of that has been static size string buffers on the stack.

The most involved part of this refactoring is that printbufs must now be
exited with printbuf_exit().
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 2be7b16e
...@@ -57,11 +57,12 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed, ...@@ -57,11 +57,12 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
tmp = __bch2_bkey_unpack_key(format, packed); tmp = __bch2_bkey_unpack_key(format, packed);
if (memcmp(&tmp, unpacked, sizeof(struct bkey))) { if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
char buf1[160], buf2[160]; struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
char buf3[160], buf4[160]; char buf3[160], buf4[160];
bch2_bkey_to_text(&PBUF(buf1), unpacked); bch2_bkey_to_text(&buf1, unpacked);
bch2_bkey_to_text(&PBUF(buf2), &tmp); bch2_bkey_to_text(&buf2, &tmp);
bch2_to_binary(buf3, (void *) unpacked, 80); bch2_to_binary(buf3, (void *) unpacked, 80);
bch2_to_binary(buf4, high_word(format, packed), 80); bch2_to_binary(buf4, high_word(format, packed), 80);
...@@ -72,7 +73,7 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed, ...@@ -72,7 +73,7 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
format->bits_per_field[2], format->bits_per_field[2],
format->bits_per_field[3], format->bits_per_field[3],
format->bits_per_field[4], format->bits_per_field[4],
buf1, buf2, buf3, buf4); buf1.buf, buf2.buf, buf3, buf4);
} }
} }
......
...@@ -58,7 +58,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, ...@@ -58,7 +58,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
struct bkey_packed *_k, *_n; struct bkey_packed *_k, *_n;
struct bkey uk, n; struct bkey uk, n;
struct bkey_s_c k; struct bkey_s_c k;
char buf[200]; struct printbuf buf = PRINTBUF;
if (!i->u64s) if (!i->u64s)
return; return;
...@@ -69,12 +69,14 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, ...@@ -69,12 +69,14 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
_n = bkey_next(_k); _n = bkey_next(_k);
k = bkey_disassemble(b, _k, &uk); k = bkey_disassemble(b, _k, &uk);
printbuf_reset(&buf);
if (c) if (c)
bch2_bkey_val_to_text(&PBUF(buf), c, k); bch2_bkey_val_to_text(&buf, c, k);
else else
bch2_bkey_to_text(&PBUF(buf), k.k); bch2_bkey_to_text(&buf, k.k);
printk(KERN_ERR "block %u key %5zu: %s\n", set, printk(KERN_ERR "block %u key %5zu: %s\n", set,
_k->_data - i->_data, buf); _k->_data - i->_data, buf.buf);
if (_n == vstruct_last(i)) if (_n == vstruct_last(i))
continue; continue;
...@@ -90,6 +92,8 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, ...@@ -90,6 +92,8 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
!bpos_cmp(n.p, k.k->p)) !bpos_cmp(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n"); printk(KERN_ERR "Duplicate keys\n");
} }
printbuf_exit(&buf);
} }
void bch2_dump_btree_node(struct bch_fs *c, struct btree *b) void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
...@@ -106,6 +110,7 @@ void bch2_dump_btree_node_iter(struct btree *b, ...@@ -106,6 +110,7 @@ void bch2_dump_btree_node_iter(struct btree *b,
struct btree_node_iter *iter) struct btree_node_iter *iter)
{ {
struct btree_node_iter_set *set; struct btree_node_iter_set *set;
struct printbuf buf = PRINTBUF;
printk(KERN_ERR "btree node iter with %u/%u sets:\n", printk(KERN_ERR "btree node iter with %u/%u sets:\n",
__btree_node_iter_used(iter), b->nsets); __btree_node_iter_used(iter), b->nsets);
...@@ -114,12 +119,14 @@ void bch2_dump_btree_node_iter(struct btree *b, ...@@ -114,12 +119,14 @@ void bch2_dump_btree_node_iter(struct btree *b,
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
struct bset_tree *t = bch2_bkey_to_bset(b, k); struct bset_tree *t = bch2_bkey_to_bset(b, k);
struct bkey uk = bkey_unpack_key(b, k); struct bkey uk = bkey_unpack_key(b, k);
char buf[100];
bch2_bkey_to_text(&PBUF(buf), &uk); printbuf_reset(&buf);
bch2_bkey_to_text(&buf, &uk);
printk(KERN_ERR "set %zu key %u: %s\n", printk(KERN_ERR "set %zu key %u: %s\n",
t - b->set, set->k, buf); t - b->set, set->k, buf.buf);
} }
printbuf_exit(&buf);
} }
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
...@@ -155,13 +162,14 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, ...@@ -155,13 +162,14 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
struct btree_node_iter_set *set; struct btree_node_iter_set *set;
struct bkey ku = bkey_unpack_key(b, k); struct bkey ku = bkey_unpack_key(b, k);
struct bkey nu = bkey_unpack_key(b, n); struct bkey nu = bkey_unpack_key(b, n);
char buf1[80], buf2[80]; struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
bch2_dump_btree_node(NULL, b); bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &ku); bch2_bkey_to_text(&buf1, &ku);
bch2_bkey_to_text(&PBUF(buf2), &nu); bch2_bkey_to_text(&buf2, &nu);
printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n", printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
buf1, buf2); buf1.buf, buf2.buf);
printk(KERN_ERR "iter was:"); printk(KERN_ERR "iter was:");
btree_node_iter_for_each(_iter, set) { btree_node_iter_for_each(_iter, set) {
...@@ -226,6 +234,8 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, ...@@ -226,6 +234,8 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
struct bset_tree *t = bch2_bkey_to_bset(b, where); struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where); struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
struct bkey_packed *next = (void *) (where->_data + clobber_u64s); struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
#if 0 #if 0
BUG_ON(prev && BUG_ON(prev &&
bkey_iter_cmp(b, prev, insert) > 0); bkey_iter_cmp(b, prev, insert) > 0);
...@@ -234,17 +244,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, ...@@ -234,17 +244,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
bkey_iter_cmp(b, prev, insert) > 0) { bkey_iter_cmp(b, prev, insert) > 0) {
struct bkey k1 = bkey_unpack_key(b, prev); struct bkey k1 = bkey_unpack_key(b, prev);
struct bkey k2 = bkey_unpack_key(b, insert); struct bkey k2 = bkey_unpack_key(b, insert);
char buf1[100];
char buf2[100];
bch2_dump_btree_node(NULL, b); bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &k1); bch2_bkey_to_text(&buf1, &k1);
bch2_bkey_to_text(&PBUF(buf2), &k2); bch2_bkey_to_text(&buf2, &k2);
panic("prev > insert:\n" panic("prev > insert:\n"
"prev key %s\n" "prev key %s\n"
"insert key %s\n", "insert key %s\n",
buf1, buf2); buf1.buf, buf2.buf);
} }
#endif #endif
#if 0 #if 0
...@@ -255,17 +263,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, ...@@ -255,17 +263,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
bkey_iter_cmp(b, insert, next) > 0) { bkey_iter_cmp(b, insert, next) > 0) {
struct bkey k1 = bkey_unpack_key(b, insert); struct bkey k1 = bkey_unpack_key(b, insert);
struct bkey k2 = bkey_unpack_key(b, next); struct bkey k2 = bkey_unpack_key(b, next);
char buf1[100];
char buf2[100];
bch2_dump_btree_node(NULL, b); bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &k1); bch2_bkey_to_text(&buf1, &k1);
bch2_bkey_to_text(&PBUF(buf2), &k2); bch2_bkey_to_text(&buf2, &k2);
panic("insert > next:\n" panic("insert > next:\n"
"insert key %s\n" "insert key %s\n"
"next key %s\n", "next key %s\n",
buf1, buf2); buf1.buf, buf2.buf);
} }
#endif #endif
} }
...@@ -1555,9 +1561,6 @@ void bch2_bfloat_to_text(struct printbuf *out, struct btree *b, ...@@ -1555,9 +1561,6 @@ void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
struct bkey uk; struct bkey uk;
unsigned j, inorder; unsigned j, inorder;
if (out->pos != out->end)
*out->pos = '\0';
if (!bset_has_ro_aux_tree(t)) if (!bset_has_ro_aux_tree(t))
return; return;
......
...@@ -742,14 +742,16 @@ static int lock_node_check_fn(struct six_lock *lock, void *p) ...@@ -742,14 +742,16 @@ static int lock_node_check_fn(struct six_lock *lock, void *p)
static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
{ {
char buf1[200], buf2[100], buf3[100]; struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
struct printbuf buf3 = PRINTBUF;
if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
return; return;
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&b->key)); bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&b->key));
bch2_bpos_to_text(&PBUF(buf2), b->data->min_key); bch2_bpos_to_text(&buf2, b->data->min_key);
bch2_bpos_to_text(&PBUF(buf3), b->data->max_key); bch2_bpos_to_text(&buf3, b->data->max_key);
bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n" bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n"
"btree %s level %u\n" "btree %s level %u\n"
...@@ -757,10 +759,14 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) ...@@ -757,10 +759,14 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
"header: btree %s level %llu\n" "header: btree %s level %llu\n"
"min %s max %s\n", "min %s max %s\n",
bch2_btree_ids[b->c.btree_id], b->c.level, bch2_btree_ids[b->c.btree_id], b->c.level,
buf1, buf1.buf,
bch2_btree_ids[BTREE_NODE_ID(b->data)], bch2_btree_ids[BTREE_NODE_ID(b->data)],
BTREE_NODE_LEVEL(b->data), BTREE_NODE_LEVEL(b->data),
buf2, buf3); buf2.buf, buf3.buf);
printbuf_exit(&buf3);
printbuf_exit(&buf2);
printbuf_exit(&buf1);
} }
static inline void btree_check_header(struct bch_fs *c, struct btree *b) static inline void btree_check_header(struct bch_fs *c, struct btree *b)
......
This diff is collapsed.
This diff is collapsed.
...@@ -574,7 +574,9 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, ...@@ -574,7 +574,9 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_node_iter tmp; struct btree_node_iter tmp;
bool locked; bool locked;
struct bkey_packed *p, *k; struct bkey_packed *p, *k;
char buf1[100], buf2[100], buf3[100]; struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
struct printbuf buf3 = PRINTBUF;
const char *msg; const char *msg;
if (!bch2_debug_check_iterators) if (!bch2_debug_check_iterators)
...@@ -622,26 +624,27 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, ...@@ -622,26 +624,27 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
btree_node_unlock(path, level); btree_node_unlock(path, level);
return; return;
err: err:
strcpy(buf2, "(none)"); bch2_bpos_to_text(&buf1, path->pos);
strcpy(buf3, "(none)");
bch2_bpos_to_text(&PBUF(buf1), path->pos);
if (p) { if (p) {
struct bkey uk = bkey_unpack_key(l->b, p); struct bkey uk = bkey_unpack_key(l->b, p);
bch2_bkey_to_text(&PBUF(buf2), &uk); bch2_bkey_to_text(&buf2, &uk);
} else {
pr_buf(&buf2, "(none)");
} }
if (k) { if (k) {
struct bkey uk = bkey_unpack_key(l->b, k); struct bkey uk = bkey_unpack_key(l->b, k);
bch2_bkey_to_text(&PBUF(buf3), &uk); bch2_bkey_to_text(&buf3, &uk);
} else {
pr_buf(&buf3, "(none)");
} }
panic("path should be %s key at level %u:\n" panic("path should be %s key at level %u:\n"
"path pos %s\n" "path pos %s\n"
"prev key %s\n" "prev key %s\n"
"cur key %s\n", "cur key %s\n",
msg, level, buf1, buf2, buf3); msg, level, buf1.buf, buf2.buf, buf3.buf);
} }
static void bch2_btree_path_verify(struct btree_trans *trans, static void bch2_btree_path_verify(struct btree_trans *trans,
...@@ -739,16 +742,16 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k ...@@ -739,16 +742,16 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
if (!bkey_cmp(prev.k->p, k.k->p) && if (!bkey_cmp(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot, bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) { prev.k->p.snapshot) > 0) {
char buf1[100], buf2[200]; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bkey_to_text(&PBUF(buf1), k.k); bch2_bkey_to_text(&buf1, k.k);
bch2_bkey_to_text(&PBUF(buf2), prev.k); bch2_bkey_to_text(&buf2, prev.k);
panic("iter snap %u\n" panic("iter snap %u\n"
"k %s\n" "k %s\n"
"prev %s\n", "prev %s\n",
iter->snapshot, iter->snapshot,
buf1, buf2); buf1.buf, buf2.buf);
} }
out: out:
bch2_trans_iter_exit(trans, &copy); bch2_trans_iter_exit(trans, &copy);
...@@ -760,7 +763,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, ...@@ -760,7 +763,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
{ {
struct btree_path *path; struct btree_path *path;
unsigned idx; unsigned idx;
char buf[100]; struct printbuf buf = PRINTBUF;
trans_for_each_path_inorder(trans, path, idx) { trans_for_each_path_inorder(trans, path, idx) {
int cmp = cmp_int(path->btree_id, id) ?: int cmp = cmp_int(path->btree_id, id) ?:
...@@ -786,9 +789,10 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, ...@@ -786,9 +789,10 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
} }
bch2_dump_trans_paths_updates(trans); bch2_dump_trans_paths_updates(trans);
bch2_bpos_to_text(&buf, pos);
panic("not locked: %s %s%s\n", panic("not locked: %s %s%s\n",
bch2_btree_ids[id], bch2_btree_ids[id], buf.buf,
(bch2_bpos_to_text(&PBUF(buf), pos), buf),
key_cache ? " cached" : ""); key_cache ? " cached" : "");
} }
...@@ -1071,23 +1075,23 @@ static void btree_path_verify_new_node(struct btree_trans *trans, ...@@ -1071,23 +1075,23 @@ static void btree_path_verify_new_node(struct btree_trans *trans,
if (!k || if (!k ||
bkey_deleted(k) || bkey_deleted(k) ||
bkey_cmp_left_packed(l->b, k, &b->key.k.p)) { bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
char buf1[100]; struct printbuf buf1 = PRINTBUF;
char buf2[100]; struct printbuf buf2 = PRINTBUF;
char buf3[100]; struct printbuf buf3 = PRINTBUF;
char buf4[100]; struct printbuf buf4 = PRINTBUF;
struct bkey uk = bkey_unpack_key(b, k); struct bkey uk = bkey_unpack_key(b, k);
bch2_dump_btree_node(c, l->b); bch2_dump_btree_node(c, l->b);
bch2_bpos_to_text(&PBUF(buf1), path->pos); bch2_bpos_to_text(&buf1, path->pos);
bch2_bkey_to_text(&PBUF(buf2), &uk); bch2_bkey_to_text(&buf2, &uk);
bch2_bpos_to_text(&PBUF(buf3), b->data->min_key); bch2_bpos_to_text(&buf3, b->data->min_key);
bch2_bpos_to_text(&PBUF(buf3), b->data->max_key); bch2_bpos_to_text(&buf3, b->data->max_key);
panic("parent iter doesn't point to new node:\n" panic("parent iter doesn't point to new node:\n"
"iter pos %s %s\n" "iter pos %s %s\n"
"iter key %s\n" "iter key %s\n"
"new node %s-%s\n", "new node %s-%s\n",
bch2_btree_ids[path->btree_id], buf1, bch2_btree_ids[path->btree_id],
buf2, buf3, buf4); buf1.buf, buf2.buf, buf3.buf, buf4.buf);
} }
if (!parent_locked) if (!parent_locked)
...@@ -1783,18 +1787,22 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans) ...@@ -1783,18 +1787,22 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
{ {
struct btree_path *path; struct btree_path *path;
struct btree_insert_entry *i; struct btree_insert_entry *i;
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
unsigned idx; unsigned idx;
char buf1[300], buf2[300];
btree_trans_sort_paths(trans); btree_trans_sort_paths(trans);
trans_for_each_path_inorder(trans, path, idx) trans_for_each_path_inorder(trans, path, idx) {
printbuf_reset(&buf1);
bch2_bpos_to_text(&buf1, path->pos);
printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n", printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
path->idx, path->ref, path->intent_ref, path->idx, path->ref, path->intent_ref,
path->should_be_locked ? " S" : "", path->should_be_locked ? " S" : "",
path->preserve ? " P" : "", path->preserve ? " P" : "",
bch2_btree_ids[path->btree_id], bch2_btree_ids[path->btree_id],
(bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1), buf1.buf,
path->nodes_locked, path->nodes_locked,
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
(void *) path->ip_allocated (void *) path->ip_allocated
...@@ -1802,17 +1810,25 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans) ...@@ -1802,17 +1810,25 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
NULL NULL
#endif #endif
); );
}
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
struct bkey u; struct bkey u;
struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u); struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
printbuf_reset(&buf1);
printbuf_reset(&buf2);
bch2_bkey_val_to_text(&buf1, trans->c, old);
bch2_bkey_val_to_text(&buf2, trans->c, bkey_i_to_s_c(i->k));
printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s", printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
bch2_btree_ids[i->btree_id], bch2_btree_ids[i->btree_id],
(void *) i->ip_allocated, (void *) i->ip_allocated,
(bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1), buf1.buf, buf2.buf);
(bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
} }
printbuf_exit(&buf2);
printbuf_exit(&buf1);
} }
static struct btree_path *btree_path_alloc(struct btree_trans *trans, static struct btree_path *btree_path_alloc(struct btree_trans *trans,
......
...@@ -41,7 +41,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) ...@@ -41,7 +41,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_s_c_btree_ptr_v2 bp; struct bkey_s_c_btree_ptr_v2 bp;
struct bkey unpacked; struct bkey unpacked;
char buf1[100], buf2[100]; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
BUG_ON(!b->c.level); BUG_ON(!b->c.level);
...@@ -58,9 +58,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) ...@@ -58,9 +58,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
if (bpos_cmp(next_node, bp.v->min_key)) { if (bpos_cmp(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b); bch2_dump_btree_node(c, b);
panic("expected next min_key %s got %s\n", bch2_bpos_to_text(&buf1, next_node);
(bch2_bpos_to_text(&PBUF(buf1), next_node), buf1), bch2_bpos_to_text(&buf2, bp.v->min_key);
(bch2_bpos_to_text(&PBUF(buf2), bp.v->min_key), buf2)); panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
} }
bch2_btree_node_iter_advance(&iter, b); bch2_btree_node_iter_advance(&iter, b);
...@@ -68,9 +68,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) ...@@ -68,9 +68,9 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
if (bch2_btree_node_iter_end(&iter)) { if (bch2_btree_node_iter_end(&iter)) {
if (bpos_cmp(k.k->p, b->key.k.p)) { if (bpos_cmp(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b); bch2_dump_btree_node(c, b);
panic("expected end %s got %s\n", bch2_bpos_to_text(&buf1, b->key.k.p);
(bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1), bch2_bpos_to_text(&buf2, k.k->p);
(bch2_bpos_to_text(&PBUF(buf2), k.k->p), buf2)); panic("expected end %s got %s\n", buf1.buf, buf2.buf);
} }
break; break;
} }
...@@ -1151,10 +1151,11 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, ...@@ -1151,10 +1151,11 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?: invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?:
bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert)); bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert));
if (invalid) { if (invalid) {
char buf[160]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert)); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf, invalid); bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf.buf, invalid);
printbuf_exit(&buf);
dump_stack(); dump_stack();
} }
...@@ -1636,15 +1637,17 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1636,15 +1637,17 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
} }
if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
char buf1[100], buf2[100]; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key); bch2_bpos_to_text(&buf1, prev->data->max_key);
bch2_bpos_to_text(&PBUF(buf2), next->data->min_key); bch2_bpos_to_text(&buf2, next->data->min_key);
bch_err(c, bch_err(c,
"btree topology error in btree merge:\n" "btree topology error in btree merge:\n"
" prev ends at %s\n" " prev ends at %s\n"
" next starts at %s", " next starts at %s",
buf1, buf2); buf1.buf, buf2.buf);
printbuf_exit(&buf1);
printbuf_exit(&buf2);
bch2_topology_error(c); bch2_topology_error(c);
ret = -EIO; ret = -EIO;
goto err; goto err;
......
...@@ -831,11 +831,12 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -831,11 +831,12 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
const char *invalid = bch2_bkey_invalid(c, const char *invalid = bch2_bkey_invalid(c,
bkey_i_to_s_c(i->k), i->bkey_type); bkey_i_to_s_c(i->k), i->bkey_type);
if (invalid) { if (invalid) {
char buf[200]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
bch2_fs_fatal_error(c, "invalid bkey %s on insert from %s -> %ps: %s\n", bch2_fs_fatal_error(c, "invalid bkey %s on insert from %s -> %ps: %s\n",
buf, trans->fn, (void *) i->ip_allocated, invalid); buf.buf, trans->fn, (void *) i->ip_allocated, invalid);
printbuf_exit(&buf);
return -EINVAL; return -EINVAL;
} }
btree_insert_entry_checks(trans, i); btree_insert_entry_checks(trans, i);
......
...@@ -376,22 +376,23 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k, ...@@ -376,22 +376,23 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
{ {
struct bch_fs_usage __percpu *fs_usage; struct bch_fs_usage __percpu *fs_usage;
int idx, ret = 0; int idx, ret = 0;
char buf[200]; struct printbuf buf = PRINTBUF;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
buf.atomic++;
idx = bch2_replicas_entry_idx(c, r); idx = bch2_replicas_entry_idx(c, r);
if (idx < 0 && if (idx < 0 &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err(c, "no replicas entry\n" fsck_err(c, "no replicas entry\n"
" while marking %s", " while marking %s",
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) { (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
ret = bch2_mark_replicas(c, r); ret = bch2_mark_replicas(c, r);
if (ret)
return ret;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
if (ret)
goto err;
idx = bch2_replicas_entry_idx(c, r); idx = bch2_replicas_entry_idx(c, r);
} }
if (idx < 0) { if (idx < 0) {
...@@ -407,6 +408,7 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k, ...@@ -407,6 +408,7 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
err: err:
fsck_err: fsck_err:
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return ret; return ret;
} }
...@@ -678,7 +680,8 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -678,7 +680,8 @@ static int check_bucket_ref(struct bch_fs *c,
u16 bucket_sectors = !ptr->cached u16 bucket_sectors = !ptr->cached
? dirty_sectors ? dirty_sectors
: cached_sectors; : cached_sectors;
char buf[200]; struct printbuf buf = PRINTBUF;
int ret = 0;
if (gen_after(ptr->gen, b_gen)) { if (gen_after(ptr->gen, b_gen)) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
...@@ -687,8 +690,9 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -687,8 +690,9 @@ static int check_bucket_ref(struct bch_fs *c,
ptr->dev, bucket_nr, b_gen, ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type], bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen, ptr->gen,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
return -EIO; ret = -EIO;
goto err;
} }
if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
...@@ -698,8 +702,10 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -698,8 +702,10 @@ static int check_bucket_ref(struct bch_fs *c,
ptr->dev, bucket_nr, b_gen, ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type], bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen, ptr->gen,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (printbuf_reset(&buf),
return -EIO; bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
goto err;
} }
if (b_gen != ptr->gen && !ptr->cached) { if (b_gen != ptr->gen && !ptr->cached) {
...@@ -710,12 +716,16 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -710,12 +716,16 @@ static int check_bucket_ref(struct bch_fs *c,
*bucket_gen(ca, bucket_nr), *bucket_gen(ca, bucket_nr),
bch2_data_types[bucket_data_type ?: ptr_data_type], bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen, ptr->gen,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (printbuf_reset(&buf),
return -EIO; bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
goto err;
} }
if (b_gen != ptr->gen) if (b_gen != ptr->gen) {
return 1; ret = 1;
goto err;
}
if (bucket_data_type && ptr_data_type && if (bucket_data_type && ptr_data_type &&
bucket_data_type != ptr_data_type) { bucket_data_type != ptr_data_type) {
...@@ -725,8 +735,10 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -725,8 +735,10 @@ static int check_bucket_ref(struct bch_fs *c,
ptr->dev, bucket_nr, b_gen, ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type], bch2_data_types[bucket_data_type],
bch2_data_types[ptr_data_type], bch2_data_types[ptr_data_type],
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (printbuf_reset(&buf),
return -EIO; bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
goto err;
} }
if ((unsigned) (bucket_sectors + sectors) > U16_MAX) { if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
...@@ -736,11 +748,14 @@ static int check_bucket_ref(struct bch_fs *c, ...@@ -736,11 +748,14 @@ static int check_bucket_ref(struct bch_fs *c,
ptr->dev, bucket_nr, b_gen, ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type], bch2_data_types[bucket_data_type ?: ptr_data_type],
bucket_sectors, sectors, bucket_sectors, sectors,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (printbuf_reset(&buf),
return -EIO; bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
goto err;
} }
err:
return 0; printbuf_exit(&buf);
return ret;
} }
static int mark_stripe_bucket(struct btree_trans *trans, static int mark_stripe_bucket(struct btree_trans *trans,
...@@ -759,7 +774,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, ...@@ -759,7 +774,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g; struct bucket *g;
struct bucket_mark new, old; struct bucket_mark new, old;
char buf[200]; struct printbuf buf = PRINTBUF;
int ret = 0; int ret = 0;
BUG_ON(!(flags & BTREE_TRIGGER_GC)); BUG_ON(!(flags & BTREE_TRIGGER_GC));
...@@ -767,6 +782,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, ...@@ -767,6 +782,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
/* * XXX doesn't handle deletion */ /* * XXX doesn't handle deletion */
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
buf.atomic++;
g = PTR_GC_BUCKET(ca, ptr); g = PTR_GC_BUCKET(ca, ptr);
if (g->mark.dirty_sectors || if (g->mark.dirty_sectors ||
...@@ -774,7 +790,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, ...@@ -774,7 +790,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
bch2_fs_inconsistent(c, bch2_fs_inconsistent(c,
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s", "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen, ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
...@@ -799,8 +815,8 @@ static int mark_stripe_bucket(struct btree_trans *trans, ...@@ -799,8 +815,8 @@ static int mark_stripe_bucket(struct btree_trans *trans,
bch2_dev_usage_update(c, ca, old, new, journal_seq, true); bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
err: err:
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return 0; return ret;
} }
static int __mark_pointer(struct btree_trans *trans, static int __mark_pointer(struct btree_trans *trans,
...@@ -987,10 +1003,11 @@ static int bch2_mark_extent(struct btree_trans *trans, ...@@ -987,10 +1003,11 @@ static int bch2_mark_extent(struct btree_trans *trans,
if (r.e.nr_devs) { if (r.e.nr_devs) {
ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true); ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
if (ret) { if (ret) {
char buf[200]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, k); bch2_bkey_val_to_text(&buf, c, k);
bch2_fs_fatal_error(c, "no replicas entry for %s", buf); bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
printbuf_exit(&buf);
return ret; return ret;
} }
} }
...@@ -1019,13 +1036,16 @@ static int bch2_mark_stripe(struct btree_trans *trans, ...@@ -1019,13 +1036,16 @@ static int bch2_mark_stripe(struct btree_trans *trans,
struct stripe *m = genradix_ptr(&c->stripes, idx); struct stripe *m = genradix_ptr(&c->stripes, idx);
if (!m || (old_s && !m->alive)) { if (!m || (old_s && !m->alive)) {
char buf1[200], buf2[200]; struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf1), c, old); bch2_bkey_val_to_text(&buf1, c, old);
bch2_bkey_val_to_text(&PBUF(buf2), c, new); bch2_bkey_val_to_text(&buf2, c, new);
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n" bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
"old %s\n" "old %s\n"
"new %s", idx, buf1, buf2); "new %s", idx, buf1.buf, buf2.buf);
printbuf_exit(&buf2);
printbuf_exit(&buf1);
bch2_inconsistent_error(c); bch2_inconsistent_error(c);
return -1; return -1;
} }
...@@ -1090,10 +1110,11 @@ static int bch2_mark_stripe(struct btree_trans *trans, ...@@ -1090,10 +1110,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
((s64) m->sectors * m->nr_redundant), ((s64) m->sectors * m->nr_redundant),
journal_seq, gc); journal_seq, gc);
if (ret) { if (ret) {
char buf[200]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, new); bch2_bkey_val_to_text(&buf, c, new);
bch2_fs_fatal_error(c, "no replicas entry for %s", buf); bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
printbuf_exit(&buf);
return ret; return ret;
} }
} }
...@@ -1174,7 +1195,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans, ...@@ -1174,7 +1195,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
u64 next_idx = end; u64 next_idx = end;
s64 ret = 0; s64 ret = 0;
char buf[200]; struct printbuf buf = PRINTBUF;
if (r_idx >= c->reflink_gc_nr) if (r_idx >= c->reflink_gc_nr)
goto not_found; goto not_found;
...@@ -1193,7 +1214,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans, ...@@ -1193,7 +1214,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
if (fsck_err(c, "pointer to missing indirect extent\n" if (fsck_err(c, "pointer to missing indirect extent\n"
" %s\n" " %s\n"
" missing range %llu-%llu", " missing range %llu-%llu",
(bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c), buf), (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
*idx, next_idx)) { *idx, next_idx)) {
struct bkey_i_error new; struct bkey_i_error new;
...@@ -1207,6 +1228,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans, ...@@ -1207,6 +1228,7 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
*idx = next_idx; *idx = next_idx;
fsck_err: fsck_err:
printbuf_exit(&buf);
return ret; return ret;
} }
...@@ -1289,7 +1311,7 @@ void fs_usage_apply_warn(struct btree_trans *trans, ...@@ -1289,7 +1311,7 @@ void fs_usage_apply_warn(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i; struct btree_insert_entry *i;
char buf[200]; struct printbuf buf = PRINTBUF;
bch_err(c, "disk usage increased %lli more than %u sectors reserved", bch_err(c, "disk usage increased %lli more than %u sectors reserved",
should_not_have_added, disk_res_sectors); should_not_have_added, disk_res_sectors);
...@@ -1298,13 +1320,17 @@ void fs_usage_apply_warn(struct btree_trans *trans, ...@@ -1298,13 +1320,17 @@ void fs_usage_apply_warn(struct btree_trans *trans,
struct bkey_s_c old = { &i->old_k, i->old_v }; struct bkey_s_c old = { &i->old_k, i->old_v };
pr_err("while inserting"); pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); printbuf_reset(&buf);
pr_err(" %s", buf); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
pr_err(" %s", buf.buf);
pr_err("overlapping with"); pr_err("overlapping with");
bch2_bkey_val_to_text(&PBUF(buf), c, old); printbuf_reset(&buf);
pr_err(" %s", buf); bch2_bkey_val_to_text(&buf, c, old);
pr_err(" %s", buf.buf);
} }
__WARN(); __WARN();
printbuf_exit(&buf);
} }
int bch2_trans_fs_usage_apply(struct btree_trans *trans, int bch2_trans_fs_usage_apply(struct btree_trans *trans,
...@@ -1744,7 +1770,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans, ...@@ -1744,7 +1770,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
struct bkey_i *n; struct bkey_i *n;
__le64 *refcount; __le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
char buf[200]; struct printbuf buf = PRINTBUF;
int ret; int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx), bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
...@@ -1764,19 +1790,19 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans, ...@@ -1764,19 +1790,19 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
refcount = bkey_refcount(n); refcount = bkey_refcount(n);
if (!refcount) { if (!refcount) {
bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c); bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c, bch2_fs_inconsistent(c,
"nonexistent indirect extent at %llu while marking\n %s", "nonexistent indirect extent at %llu while marking\n %s",
*idx, buf); *idx, buf.buf);
ret = -EIO; ret = -EIO;
goto err; goto err;
} }
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) { if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c); bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c, bch2_fs_inconsistent(c,
"indirect extent refcount underflow at %llu while marking\n %s", "indirect extent refcount underflow at %llu while marking\n %s",
*idx, buf); *idx, buf.buf);
ret = -EIO; ret = -EIO;
goto err; goto err;
} }
...@@ -1811,6 +1837,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans, ...@@ -1811,6 +1837,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
*idx = k.k->p.offset; *idx = k.k->p.offset;
err: err:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
return ret; return ret;
} }
......
...@@ -157,6 +157,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock) ...@@ -157,6 +157,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
unsigned long now; unsigned long now;
unsigned i; unsigned i;
out->atomic++;
spin_lock(&clock->timer_lock); spin_lock(&clock->timer_lock);
now = atomic64_read(&clock->now); now = atomic64_read(&clock->now);
...@@ -165,6 +166,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock) ...@@ -165,6 +166,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
clock->timers.data[i]->fn, clock->timers.data[i]->fn,
clock->timers.data[i]->expire - now); clock->timers.data[i]->expire - now);
spin_unlock(&clock->timer_lock); spin_unlock(&clock->timer_lock);
--out->atomic;
} }
void bch2_io_clock_exit(struct io_clock *clock) void bch2_io_clock_exit(struct io_clock *clock)
......
...@@ -169,10 +169,11 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) ...@@ -169,10 +169,11 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
failed |= bch2_btree_verify_replica(c, b, p); failed |= bch2_btree_verify_replica(c, b, p);
if (failed) { if (failed) {
char buf[200]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf); bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf.buf);
printbuf_exit(&buf);
} }
out: out:
mutex_unlock(&c->verify_lock); mutex_unlock(&c->verify_lock);
...@@ -188,8 +189,7 @@ struct dump_iter { ...@@ -188,8 +189,7 @@ struct dump_iter {
struct bch_fs *c; struct bch_fs *c;
enum btree_id id; enum btree_id id;
char buf[1 << 12]; struct printbuf buf;
size_t bytes; /* what's currently in buf */
char __user *ubuf; /* destination user buffer */ char __user *ubuf; /* destination user buffer */
size_t size; /* size of requested read */ size_t size; /* size of requested read */
...@@ -198,9 +198,9 @@ struct dump_iter { ...@@ -198,9 +198,9 @@ struct dump_iter {
static int flush_buf(struct dump_iter *i) static int flush_buf(struct dump_iter *i)
{ {
if (i->bytes) { if (i->buf.pos) {
size_t bytes = min(i->bytes, i->size); size_t bytes = min_t(size_t, i->buf.pos, i->size);
int err = copy_to_user(i->ubuf, i->buf, bytes); int err = copy_to_user(i->ubuf, i->buf.buf, bytes);
if (err) if (err)
return err; return err;
...@@ -208,8 +208,8 @@ static int flush_buf(struct dump_iter *i) ...@@ -208,8 +208,8 @@ static int flush_buf(struct dump_iter *i)
i->ret += bytes; i->ret += bytes;
i->ubuf += bytes; i->ubuf += bytes;
i->size -= bytes; i->size -= bytes;
i->bytes -= bytes; i->buf.pos -= bytes;
memmove(i->buf, i->buf + bytes, i->bytes); memmove(i->buf.buf, i->buf.buf + bytes, i->buf.pos);
} }
return 0; return 0;
...@@ -228,13 +228,17 @@ static int bch2_dump_open(struct inode *inode, struct file *file) ...@@ -228,13 +228,17 @@ static int bch2_dump_open(struct inode *inode, struct file *file)
i->from = POS_MIN; i->from = POS_MIN;
i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]); i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
i->id = bd->id; i->id = bd->id;
i->buf = PRINTBUF;
return 0; return 0;
} }
static int bch2_dump_release(struct inode *inode, struct file *file) static int bch2_dump_release(struct inode *inode, struct file *file)
{ {
kfree(file->private_data); struct dump_iter *i = file->private_data;
printbuf_exit(&i->buf);
kfree(i);
return 0; return 0;
} }
...@@ -266,11 +270,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, ...@@ -266,11 +270,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
k = bch2_btree_iter_peek(&iter); k = bch2_btree_iter_peek(&iter);
while (k.k && !(err = bkey_err(k))) { while (k.k && !(err = bkey_err(k))) {
bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k); bch2_bkey_val_to_text(&i->buf, i->c, k);
i->bytes = strlen(i->buf); pr_char(&i->buf, '\n');
BUG_ON(i->bytes >= sizeof(i->buf));
i->buf[i->bytes] = '\n';
i->bytes++;
k = bch2_btree_iter_next(&iter); k = bch2_btree_iter_next(&iter);
i->from = iter.pos; i->from = iter.pos;
...@@ -319,8 +320,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, ...@@ -319,8 +320,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
bch2_trans_init(&trans, i->c, 0, 0); bch2_trans_init(&trans, i->c, 0, 0);
for_each_btree_node(&trans, iter, i->id, i->from, 0, b, err) { for_each_btree_node(&trans, iter, i->id, i->from, 0, b, err) {
bch2_btree_node_to_text(&PBUF(i->buf), i->c, b); bch2_btree_node_to_text(&i->buf, i->c, b);
i->bytes = strlen(i->buf);
err = flush_buf(i); err = flush_buf(i);
if (err) if (err)
break; break;
...@@ -384,16 +384,14 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, ...@@ -384,16 +384,14 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
bch2_btree_node_iter_peek(&l->iter, l->b); bch2_btree_node_iter_peek(&l->iter, l->b);
if (l->b != prev_node) { if (l->b != prev_node) {
bch2_btree_node_to_text(&PBUF(i->buf), i->c, l->b); bch2_btree_node_to_text(&i->buf, i->c, l->b);
i->bytes = strlen(i->buf);
err = flush_buf(i); err = flush_buf(i);
if (err) if (err)
break; break;
} }
prev_node = l->b; prev_node = l->b;
bch2_bfloat_to_text(&PBUF(i->buf), l->b, _k); bch2_bfloat_to_text(&i->buf, l->b, _k);
i->bytes = strlen(i->buf);
err = flush_buf(i); err = flush_buf(i);
if (err) if (err)
break; break;
......
...@@ -286,14 +286,15 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) ...@@ -286,14 +286,15 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
struct bch_csum got = ec_block_checksum(buf, i, offset); struct bch_csum got = ec_block_checksum(buf, i, offset);
if (bch2_crc_cmp(want, got)) { if (bch2_crc_cmp(want, got)) {
char buf2[200]; struct printbuf buf2 = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&buf->key.k_i)); bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
bch_err_ratelimited(c, bch_err_ratelimited(c,
"stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s", "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
(void *) _RET_IP_, i, j, v->csum_type, (void *) _RET_IP_, i, j, v->csum_type,
want.lo, got.lo, buf2); want.lo, got.lo, buf2.buf);
printbuf_exit(&buf2);
clear_bit(i, buf->valid); clear_bit(i, buf->valid);
break; break;
} }
......
...@@ -1676,7 +1676,8 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root) ...@@ -1676,7 +1676,8 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
{ {
struct bch_fs *c = root->d_sb->s_fs_info; struct bch_fs *c = root->d_sb->s_fs_info;
enum bch_opt_id i; enum bch_opt_id i;
char buf[512]; struct printbuf buf = PRINTBUF;
int ret = 0;
for (i = 0; i < bch2_opts_nr; i++) { for (i = 0; i < bch2_opts_nr; i++) {
const struct bch_option *opt = &bch2_opt_table[i]; const struct bch_option *opt = &bch2_opt_table[i];
...@@ -1688,13 +1689,17 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root) ...@@ -1688,13 +1689,17 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
if (v == bch2_opt_get_by_id(&bch2_opts_default, i)) if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
continue; continue;
bch2_opt_to_text(&PBUF(buf), c, opt, v, printbuf_reset(&buf);
bch2_opt_to_text(&buf, c, opt, v,
OPT_SHOW_MOUNT_STYLE); OPT_SHOW_MOUNT_STYLE);
seq_putc(seq, ','); seq_putc(seq, ',');
seq_puts(seq, buf); seq_puts(seq, buf.buf);
} }
return 0; if (buf.allocation_failure)
ret = -ENOMEM;
printbuf_exit(&buf);
return ret;
} }
static void bch2_put_super(struct super_block *sb) static void bch2_put_super(struct super_block *sb)
......
This diff is collapsed.
...@@ -2057,11 +2057,11 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, ...@@ -2057,11 +2057,11 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev); struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
struct btree_iter iter; struct btree_iter iter;
char buf[200]; struct printbuf buf = PRINTBUF;
int ret; int ret;
bch2_bkey_val_to_text(&PBUF(buf), c, k); bch2_bkey_val_to_text(&buf, c, k);
bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf); bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf.buf);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)), POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)),
...@@ -2069,12 +2069,14 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, ...@@ -2069,12 +2069,14 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (ret) if (ret)
return; goto out;
bch2_bkey_val_to_text(&PBUF(buf), c, k); bch2_bkey_val_to_text(&buf, c, k);
bch_err(c, "%s", buf); bch_err(c, "%s", buf.buf);
bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset)); bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
out:
printbuf_exit(&buf);
} }
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
......
...@@ -414,18 +414,18 @@ static int __journal_res_get(struct journal *j, struct journal_res *res, ...@@ -414,18 +414,18 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
!can_discard && !can_discard &&
j->reservations.idx == j->reservations.unwritten_idx && j->reservations.idx == j->reservations.unwritten_idx &&
(flags & JOURNAL_RES_GET_RESERVED)) { (flags & JOURNAL_RES_GET_RESERVED)) {
char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC); struct printbuf buf = PRINTBUF;
bch_err(c, "Journal stuck! Hava a pre-reservation but journal full"); bch_err(c, "Journal stuck! Hava a pre-reservation but journal full");
if (journal_debug_buf) {
bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
bch_err(c, "%s", journal_debug_buf);
bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j); bch2_journal_debug_to_text(&buf, j);
bch_err(c, "Journal pins:\n%s", journal_debug_buf); bch_err(c, "%s", buf.buf);
kfree(journal_debug_buf);
} printbuf_reset(&buf);
bch2_journal_pins_to_text(&buf, j);
bch_err(c, "Journal pins:\n%s", buf.buf);
printbuf_exit(&buf);
bch2_fatal_error(c); bch2_fatal_error(c);
dump_stack(); dump_stack();
} }
...@@ -1186,6 +1186,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1186,6 +1186,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
unsigned long now = jiffies; unsigned long now = jiffies;
unsigned i; unsigned i;
out->atomic++;
rcu_read_lock(); rcu_read_lock();
s = READ_ONCE(j->reservations); s = READ_ONCE(j->reservations);
...@@ -1270,6 +1272,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1270,6 +1272,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
} }
rcu_read_unlock(); rcu_read_unlock();
--out->atomic;
} }
void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
...@@ -1286,6 +1290,8 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) ...@@ -1286,6 +1290,8 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
u64 i; u64 i;
spin_lock(&j->lock); spin_lock(&j->lock);
out->atomic++;
fifo_for_each_entry_ptr(pin_list, &j->pin, i) { fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
pr_buf(out, "%llu: count %u\n", pr_buf(out, "%llu: count %u\n",
i, atomic_read(&pin_list->count)); i, atomic_read(&pin_list->count));
...@@ -1305,5 +1311,7 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) ...@@ -1305,5 +1311,7 @@ void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
pr_buf(out, "\t%px %ps\n", pr_buf(out, "\t%px %ps\n",
pin, pin->flush); pin, pin->flush);
} }
--out->atomic;
spin_unlock(&j->lock); spin_unlock(&j->lock);
} }
...@@ -251,14 +251,15 @@ static int journal_validate_key(struct bch_fs *c, const char *where, ...@@ -251,14 +251,15 @@ static int journal_validate_key(struct bch_fs *c, const char *where,
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
__btree_node_type(level, btree_id)); __btree_node_type(level, btree_id));
if (invalid) { if (invalid) {
char buf[160]; struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k)); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s", mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
type, where, type, where,
(u64 *) k - entry->_data, (u64 *) k - entry->_data,
le16_to_cpu(entry->u64s), le16_to_cpu(entry->u64s),
invalid, buf); invalid, buf.buf);
printbuf_exit(&buf);
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k)); memmove(k, bkey_next(k), next - (void *) bkey_next(k));
...@@ -995,6 +996,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -995,6 +996,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
struct journal_replay *i, *t; struct journal_replay *i, *t;
struct bch_dev *ca; struct bch_dev *ca;
unsigned iter; unsigned iter;
struct printbuf buf = PRINTBUF;
size_t keys = 0, entries = 0; size_t keys = 0, entries = 0;
bool degraded = false; bool degraded = false;
u64 seq, last_seq = 0; u64 seq, last_seq = 0;
...@@ -1053,7 +1055,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1053,7 +1055,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
if (!last_seq) { if (!last_seq) {
fsck_err(c, "journal read done, but no entries found after dropping non-flushes"); fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
return -1; ret = -1;
goto err;
} }
/* Drop blacklisted entries and entries older than last_seq: */ /* Drop blacklisted entries and entries older than last_seq: */
...@@ -1085,7 +1088,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1085,7 +1088,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
while (seq < le64_to_cpu(i->j.seq)) { while (seq < le64_to_cpu(i->j.seq)) {
u64 missing_start, missing_end; u64 missing_start, missing_end;
char buf1[200], buf2[200]; struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
while (seq < le64_to_cpu(i->j.seq) && while (seq < le64_to_cpu(i->j.seq) &&
bch2_journal_seq_is_blacklisted(c, seq, false)) bch2_journal_seq_is_blacklisted(c, seq, false))
...@@ -1101,14 +1104,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1101,14 +1104,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
seq++; seq++;
if (i->list.prev != list) { if (i->list.prev != list) {
struct printbuf out = PBUF(buf1);
struct journal_replay *p = list_prev_entry(i, list); struct journal_replay *p = list_prev_entry(i, list);
bch2_journal_ptrs_to_text(&out, c, p); bch2_journal_ptrs_to_text(&buf1, c, p);
pr_buf(&out, " size %zu", vstruct_sectors(&p->j, c->block_bits)); pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
} else } else
sprintf(buf1, "(none)"); pr_buf(&buf1, "(none)");
bch2_journal_ptrs_to_text(&PBUF(buf2), c, i); bch2_journal_ptrs_to_text(&buf2, c, i);
missing_end = seq - 1; missing_end = seq - 1;
fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
...@@ -1116,7 +1118,10 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1116,7 +1118,10 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
" next at %s", " next at %s",
missing_start, missing_end, missing_start, missing_end,
last_seq, *blacklist_seq - 1, last_seq, *blacklist_seq - 1,
buf1, buf2); buf1.buf, buf2.buf);
printbuf_exit(&buf1);
printbuf_exit(&buf2);
} }
seq++; seq++;
...@@ -1130,14 +1135,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1130,14 +1135,13 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
.e.nr_required = 1, .e.nr_required = 1,
}; };
unsigned ptr; unsigned ptr;
char buf[80];
if (i->ignore) if (i->ignore)
continue; continue;
ret = jset_validate_entries(c, &i->j, READ); ret = jset_validate_entries(c, &i->j, READ);
if (ret) if (ret)
goto fsck_err; goto err;
for (ptr = 0; ptr < i->nr_ptrs; ptr++) for (ptr = 0; ptr < i->nr_ptrs; ptr++)
replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev; replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
...@@ -1149,15 +1153,17 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1149,15 +1153,17 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
* the devices - this is wrong: * the devices - this is wrong:
*/ */
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, &replicas.e);
if (!degraded && if (!degraded &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c, fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
"superblock not marked as containing replicas %s", "superblock not marked as containing replicas %s",
(bch2_replicas_entry_to_text(&PBUF(buf), buf.buf))) {
&replicas.e), buf)))) {
ret = bch2_mark_replicas(c, &replicas.e); ret = bch2_mark_replicas(c, &replicas.e);
if (ret) if (ret)
return ret; goto err;
} }
for_each_jset_key(k, _n, entry, &i->j) for_each_jset_key(k, _n, entry, &i->j)
...@@ -1171,7 +1177,9 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list, ...@@ -1171,7 +1177,9 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
if (*start_seq != *blacklist_seq) if (*start_seq != *blacklist_seq)
bch_info(c, "dropped unflushed entries %llu-%llu", bch_info(c, "dropped unflushed entries %llu-%llu",
*blacklist_seq, *start_seq - 1); *blacklist_seq, *start_seq - 1);
err:
fsck_err: fsck_err:
printbuf_exit(&buf);
return ret; return ret;
} }
...@@ -1481,7 +1489,7 @@ void bch2_journal_write(struct closure *cl) ...@@ -1481,7 +1489,7 @@ void bch2_journal_write(struct closure *cl)
struct jset_entry *start, *end; struct jset_entry *start, *end;
struct jset *jset; struct jset *jset;
struct bio *bio; struct bio *bio;
char *journal_debug_buf = NULL; struct printbuf journal_debug_buf = PRINTBUF;
bool validate_before_checksum = false; bool validate_before_checksum = false;
unsigned i, sectors, bytes, u64s, nr_rw_members = 0; unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
int ret; int ret;
...@@ -1586,11 +1594,8 @@ void bch2_journal_write(struct closure *cl) ...@@ -1586,11 +1594,8 @@ void bch2_journal_write(struct closure *cl)
goto retry_alloc; goto retry_alloc;
} }
if (ret) { if (ret)
journal_debug_buf = kmalloc(4096, GFP_ATOMIC); __bch2_journal_debug_to_text(&journal_debug_buf, j);
if (journal_debug_buf)
__bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
}
/* /*
* write is allocated, no longer need to account for it in * write is allocated, no longer need to account for it in
...@@ -1607,8 +1612,8 @@ void bch2_journal_write(struct closure *cl) ...@@ -1607,8 +1612,8 @@ void bch2_journal_write(struct closure *cl)
if (ret) { if (ret) {
bch_err(c, "Unable to allocate journal write:\n%s", bch_err(c, "Unable to allocate journal write:\n%s",
journal_debug_buf); journal_debug_buf.buf);
kfree(journal_debug_buf); printbuf_exit(&journal_debug_buf);
bch2_fatal_error(c); bch2_fatal_error(c);
continue_at(cl, journal_write_done, c->io_complete_wq); continue_at(cl, journal_write_done, c->io_complete_wq);
return; return;
......
...@@ -216,14 +216,11 @@ void bch2_journal_space_available(struct journal *j) ...@@ -216,14 +216,11 @@ void bch2_journal_space_available(struct journal *j)
if (!clean_ondisk && if (!clean_ondisk &&
j->reservations.idx == j->reservations.idx ==
j->reservations.unwritten_idx) { j->reservations.unwritten_idx) {
char *buf = kmalloc(4096, GFP_ATOMIC); struct printbuf buf = PRINTBUF;
bch_err(c, "journal stuck"); __bch2_journal_debug_to_text(&buf, j);
if (buf) { bch_err(c, "journal stuck\n%s", buf.buf);
__bch2_journal_debug_to_text(&_PBUF(buf, 4096), j); printbuf_exit(&buf);
pr_err("\n%s", buf);
kfree(buf);
}
bch2_fatal_error(c); bch2_fatal_error(c);
ret = cur_entry_journal_stuck; ret = cur_entry_journal_stuck;
......
...@@ -257,35 +257,47 @@ void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -257,35 +257,47 @@ void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
{ {
struct bch_fs_rebalance *r = &c->rebalance; struct bch_fs_rebalance *r = &c->rebalance;
struct rebalance_work w = rebalance_work(c); struct rebalance_work w = rebalance_work(c);
char h1[21], h2[21];
bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9); out->tabstops[0] = 20;
bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
w.dev_most_full_idx, h1, h2);
bch2_hprint(&PBUF(h1), w.total_work << 9); pr_buf(out, "fullest_dev (%i):", w.dev_most_full_idx);
bch2_hprint(&PBUF(h2), c->capacity << 9); pr_tab(out);
pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate); bch2_hprint(out, w.dev_most_full_work << 9);
pr_buf(out, "/");
bch2_hprint(out, w.dev_most_full_capacity << 9);
pr_newline(out);
pr_buf(out, "total work:");
pr_tab(out);
bch2_hprint(out, w.total_work << 9);
pr_buf(out, "/");
bch2_hprint(out, c->capacity << 9);
pr_newline(out);
pr_buf(out, "rate:");
pr_tab(out);
pr_buf(out, "%u", r->pd.rate.rate);
pr_newline(out);
switch (r->state) { switch (r->state) {
case REBALANCE_WAITING: case REBALANCE_WAITING:
pr_buf(out, "waiting\n"); pr_buf(out, "waiting");
break; break;
case REBALANCE_THROTTLED: case REBALANCE_THROTTLED:
bch2_hprint(&PBUF(h1), pr_buf(out, "throttled for %lu sec or ",
(r->throttled_until_cputime - jiffies) / HZ);
bch2_hprint(out,
(r->throttled_until_iotime - (r->throttled_until_iotime -
atomic64_read(&c->io_clock[WRITE].now)) << 9); atomic64_read(&c->io_clock[WRITE].now)) << 9);
pr_buf(out, "throttled for %lu sec or %s io\n", pr_buf(out, " io");
(r->throttled_until_cputime - jiffies) / HZ,
h1);
break; break;
case REBALANCE_RUNNING: case REBALANCE_RUNNING:
pr_buf(out, "running\n"); pr_buf(out, "running");
break; break;
} }
pr_newline(out);
} }
void bch2_rebalance_stop(struct bch_fs *c) void bch2_rebalance_stop(struct bch_fs *c)
......
...@@ -760,6 +760,8 @@ static int verify_superblock_clean(struct bch_fs *c, ...@@ -760,6 +760,8 @@ static int verify_superblock_clean(struct bch_fs *c,
{ {
unsigned i; unsigned i;
struct bch_sb_field_clean *clean = *cleanp; struct bch_sb_field_clean *clean = *cleanp;
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
int ret = 0; int ret = 0;
if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c, if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
...@@ -772,7 +774,6 @@ static int verify_superblock_clean(struct bch_fs *c, ...@@ -772,7 +774,6 @@ static int verify_superblock_clean(struct bch_fs *c,
} }
for (i = 0; i < BTREE_ID_NR; i++) { for (i = 0; i < BTREE_ID_NR; i++) {
char buf1[200], buf2[200];
struct bkey_i *k1, *k2; struct bkey_i *k1, *k2;
unsigned l1 = 0, l2 = 0; unsigned l1 = 0, l2 = 0;
...@@ -782,6 +783,19 @@ static int verify_superblock_clean(struct bch_fs *c, ...@@ -782,6 +783,19 @@ static int verify_superblock_clean(struct bch_fs *c,
if (!k1 && !k2) if (!k1 && !k2)
continue; continue;
printbuf_reset(&buf1);
printbuf_reset(&buf2);
if (k1)
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
else
pr_buf(&buf1, "(none)");
if (k2)
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
else
pr_buf(&buf2, "(none)");
mustfix_fsck_err_on(!k1 || !k2 || mustfix_fsck_err_on(!k1 || !k2 ||
IS_ERR(k1) || IS_ERR(k1) ||
IS_ERR(k2) || IS_ERR(k2) ||
...@@ -791,10 +805,12 @@ static int verify_superblock_clean(struct bch_fs *c, ...@@ -791,10 +805,12 @@ static int verify_superblock_clean(struct bch_fs *c,
"superblock btree root %u doesn't match journal after clean shutdown\n" "superblock btree root %u doesn't match journal after clean shutdown\n"
"sb: l=%u %s\n" "sb: l=%u %s\n"
"journal: l=%u %s\n", i, "journal: l=%u %s\n", i,
l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1), l1, buf1.buf,
l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2)); l2, buf2.buf);
} }
fsck_err: fsck_err:
printbuf_exit(&buf2);
printbuf_exit(&buf1);
return ret; return ret;
} }
......
...@@ -997,11 +997,12 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs, ...@@ -997,11 +997,12 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
if (dflags & ~flags) { if (dflags & ~flags) {
if (print) { if (print) {
char buf[100]; struct printbuf buf = PRINTBUF;
bch2_replicas_entry_to_text(&PBUF(buf), e); bch2_replicas_entry_to_text(&buf, e);
bch_err(c, "insufficient devices online (%u) for replicas entry %s", bch_err(c, "insufficient devices online (%u) for replicas entry %s",
nr_online, buf); nr_online, buf.buf);
printbuf_exit(&buf);
} }
ret = false; ret = false;
break; break;
......
...@@ -572,16 +572,10 @@ int bch2_read_super(const char *path, struct bch_opts *opts, ...@@ -572,16 +572,10 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
{ {
u64 offset = opt_get(*opts, sb); u64 offset = opt_get(*opts, sb);
struct bch_sb_layout layout; struct bch_sb_layout layout;
char *_err; struct printbuf err = PRINTBUF;
struct printbuf err;
__le64 *i; __le64 *i;
int ret; int ret;
_err = kmalloc(4096, GFP_KERNEL);
if (!_err)
return -ENOMEM;
err = _PBUF(_err, 4096);
pr_verbose_init(*opts, ""); pr_verbose_init(*opts, "");
memset(sb, 0, sizeof(*sb)); memset(sb, 0, sizeof(*sb));
...@@ -633,8 +627,8 @@ int bch2_read_super(const char *path, struct bch_opts *opts, ...@@ -633,8 +627,8 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
goto err; goto err;
printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s", printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s",
path, _err); path, err.buf);
err = _PBUF(_err, 4096); printbuf_reset(&err);
/* /*
* Error reading primary superblock - read location of backup * Error reading primary superblock - read location of backup
...@@ -689,16 +683,16 @@ int bch2_read_super(const char *path, struct bch_opts *opts, ...@@ -689,16 +683,16 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
ret = bch2_sb_validate(sb, &err); ret = bch2_sb_validate(sb, &err);
if (ret) { if (ret) {
printk(KERN_ERR "bcachefs (%s): error validating superblock: %s", printk(KERN_ERR "bcachefs (%s): error validating superblock: %s",
path, _err); path, err.buf);
goto err_no_print; goto err_no_print;
} }
out: out:
pr_verbose_init(*opts, "ret %i", ret); pr_verbose_init(*opts, "ret %i", ret);
kfree(_err); printbuf_exit(&err);
return ret; return ret;
err: err:
printk(KERN_ERR "bcachefs (%s): error reading superblock: %s", printk(KERN_ERR "bcachefs (%s): error reading superblock: %s",
path, _err); path, err.buf);
err_no_print: err_no_print:
bch2_free_super(sb); bch2_free_super(sb);
goto out; goto out;
...@@ -768,6 +762,7 @@ int bch2_write_super(struct bch_fs *c) ...@@ -768,6 +762,7 @@ int bch2_write_super(struct bch_fs *c)
{ {
struct closure *cl = &c->sb_write; struct closure *cl = &c->sb_write;
struct bch_dev *ca; struct bch_dev *ca;
struct printbuf err = PRINTBUF;
unsigned i, sb = 0, nr_wrote; unsigned i, sb = 0, nr_wrote;
struct bch_devs_mask sb_written; struct bch_devs_mask sb_written;
bool wrote, can_mount_without_written, can_mount_with_written; bool wrote, can_mount_without_written, can_mount_with_written;
...@@ -795,18 +790,11 @@ int bch2_write_super(struct bch_fs *c) ...@@ -795,18 +790,11 @@ int bch2_write_super(struct bch_fs *c)
bch2_sb_from_fs(c, ca); bch2_sb_from_fs(c, ca);
for_each_online_member(ca, c, i) { for_each_online_member(ca, c, i) {
struct printbuf buf = { NULL, NULL }; printbuf_reset(&err);
ret = bch2_sb_validate(&ca->disk_sb, &buf); ret = bch2_sb_validate(&ca->disk_sb, &err);
if (ret) { if (ret) {
char *_buf = kmalloc(4096, GFP_NOFS); bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
if (_buf) {
buf = _PBUF(_buf, 4096);
bch2_sb_validate(&ca->disk_sb, &buf);
}
bch2_fs_inconsistent(c, "sb invalid before write: %s", _buf);
kfree(_buf);
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
goto out; goto out;
} }
...@@ -897,6 +885,7 @@ int bch2_write_super(struct bch_fs *c) ...@@ -897,6 +885,7 @@ int bch2_write_super(struct bch_fs *c)
out: out:
/* Make new options visible after they're persistent: */ /* Make new options visible after they're persistent: */
bch2_sb_update(c); bch2_sb_update(c);
printbuf_exit(&err);
return ret; return ret;
} }
......
...@@ -870,12 +870,9 @@ noinline_for_stack ...@@ -870,12 +870,9 @@ noinline_for_stack
static void print_mount_opts(struct bch_fs *c) static void print_mount_opts(struct bch_fs *c)
{ {
enum bch_opt_id i; enum bch_opt_id i;
char buf[512]; struct printbuf p = PRINTBUF;
struct printbuf p = PBUF(buf);
bool first = true; bool first = true;
strcpy(buf, "(null)");
if (c->opts.read_only) { if (c->opts.read_only) {
pr_buf(&p, "ro"); pr_buf(&p, "ro");
first = false; first = false;
...@@ -897,7 +894,11 @@ static void print_mount_opts(struct bch_fs *c) ...@@ -897,7 +894,11 @@ static void print_mount_opts(struct bch_fs *c)
bch2_opt_to_text(&p, c, opt, v, OPT_SHOW_MOUNT_STYLE); bch2_opt_to_text(&p, c, opt, v, OPT_SHOW_MOUNT_STYLE);
} }
bch_info(c, "mounted with opts: %s", buf); if (!p.pos)
pr_buf(&p, "(null)");
bch_info(c, "mounted with opts: %s", p.buf);
printbuf_exit(&p);
} }
int bch2_fs_start(struct bch_fs *c) int bch2_fs_start(struct bch_fs *c)
...@@ -1561,11 +1562,11 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) ...@@ -1561,11 +1562,11 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
data = bch2_dev_has_data(c, ca); data = bch2_dev_has_data(c, ca);
if (data) { if (data) {
char data_has_str[100]; struct printbuf data_has = PRINTBUF;
bch2_flags_to_text(&PBUF(data_has_str), bch2_flags_to_text(&data_has, bch2_data_types, data);
bch2_data_types, data); bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
bch_err(ca, "Remove failed, still has data (%s)", data_has_str); printbuf_exit(&data_has);
ret = -EBUSY; ret = -EBUSY;
goto err; goto err;
} }
...@@ -1614,16 +1615,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path) ...@@ -1614,16 +1615,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
struct bch_sb_field_members *mi; struct bch_sb_field_members *mi;
struct bch_member dev_mi; struct bch_member dev_mi;
unsigned dev_idx, nr_devices, u64s; unsigned dev_idx, nr_devices, u64s;
char *_errbuf; struct printbuf errbuf = PRINTBUF;
struct printbuf errbuf;
int ret; int ret;
_errbuf = kmalloc(4096, GFP_KERNEL);
if (!_errbuf)
return -ENOMEM;
errbuf = _PBUF(_errbuf, 4096);
ret = bch2_read_super(path, &opts, &sb); ret = bch2_read_super(path, &opts, &sb);
if (ret) { if (ret) {
bch_err(c, "device add error: error reading super: %i", ret); bch_err(c, "device add error: error reading super: %i", ret);
...@@ -1741,7 +1735,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) ...@@ -1741,7 +1735,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (ca) if (ca)
bch2_dev_free(ca); bch2_dev_free(ca);
bch2_free_super(&sb); bch2_free_super(&sb);
kfree(_errbuf); printbuf_exit(&errbuf);
return ret; return ret;
err_late: err_late:
up_write(&c->state_lock); up_write(&c->state_lock);
...@@ -1906,8 +1900,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, ...@@ -1906,8 +1900,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
struct bch_sb_field_members *mi; struct bch_sb_field_members *mi;
unsigned i, best_sb = 0; unsigned i, best_sb = 0;
const char *err; const char *err;
char *_errbuf = NULL; struct printbuf errbuf = PRINTBUF;
struct printbuf errbuf;
int ret = 0; int ret = 0;
if (!try_module_get(THIS_MODULE)) if (!try_module_get(THIS_MODULE))
...@@ -1920,14 +1913,6 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, ...@@ -1920,14 +1913,6 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
goto err; goto err;
} }
_errbuf = kmalloc(4096, GFP_KERNEL);
if (!_errbuf) {
ret = -ENOMEM;
goto err;
}
errbuf = _PBUF(_errbuf, 4096);
sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL); sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
if (!sb) { if (!sb) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1991,7 +1976,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, ...@@ -1991,7 +1976,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
} }
out: out:
kfree(sb); kfree(sb);
kfree(_errbuf); printbuf_exit(&errbuf);
module_put(THIS_MODULE); module_put(THIS_MODULE);
pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c)); pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
return c; return c;
......
...@@ -46,8 +46,28 @@ struct sysfs_ops type ## _sysfs_ops = { \ ...@@ -46,8 +46,28 @@ struct sysfs_ops type ## _sysfs_ops = { \
} }
#define SHOW(fn) \ #define SHOW(fn) \
static ssize_t fn ## _to_text(struct printbuf *, \
struct kobject *, struct attribute *);\
\
static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
char *buf) \ char *buf) \
{ \
struct printbuf out = PRINTBUF; \
ssize_t ret = fn ## _to_text(&out, kobj, attr); \
\
if (!ret && out.allocation_failure) \
ret = -ENOMEM; \
\
if (!ret) { \
ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
memcpy(buf, out.buf, ret); \
} \
printbuf_exit(&out); \
return ret; \
} \
\
static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
struct attribute *attr)
#define STORE(fn) \ #define STORE(fn) \
static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\ static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
...@@ -64,22 +84,19 @@ static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\ ...@@ -64,22 +84,19 @@ static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
#define sysfs_printf(file, fmt, ...) \ #define sysfs_printf(file, fmt, ...) \
do { \ do { \
if (attr == &sysfs_ ## file) \ if (attr == &sysfs_ ## file) \
return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\ pr_buf(out, fmt "\n", __VA_ARGS__); \
} while (0) } while (0)
#define sysfs_print(file, var) \ #define sysfs_print(file, var) \
do { \ do { \
if (attr == &sysfs_ ## file) \ if (attr == &sysfs_ ## file) \
return snprint(buf, PAGE_SIZE, var); \ snprint(out, var); \
} while (0) } while (0)
#define sysfs_hprint(file, val) \ #define sysfs_hprint(file, val) \
do { \ do { \
if (attr == &sysfs_ ## file) { \ if (attr == &sysfs_ ## file) \
bch2_hprint(&out, val); \ bch2_hprint(out, val); \
pr_buf(&out, "\n"); \
return out.pos - buf; \
} \
} while (0) } while (0)
#define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var)) #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
...@@ -348,7 +365,6 @@ static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -348,7 +365,6 @@ static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
SHOW(bch2_fs) SHOW(bch2_fs)
{ {
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
struct printbuf out = _PBUF(buf, PAGE_SIZE);
sysfs_print(minor, c->minor); sysfs_print(minor, c->minor);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
...@@ -365,10 +381,8 @@ SHOW(bch2_fs) ...@@ -365,10 +381,8 @@ SHOW(bch2_fs)
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
if (attr == &sysfs_gc_gens_pos) { if (attr == &sysfs_gc_gens_pos)
bch2_gc_gens_pos_to_text(&out, c); bch2_gc_gens_pos_to_text(out, c);
return out.pos - buf;
}
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
...@@ -378,83 +392,54 @@ SHOW(bch2_fs) ...@@ -378,83 +392,54 @@ SHOW(bch2_fs)
max(0LL, c->copygc_wait - max(0LL, c->copygc_wait -
atomic64_read(&c->io_clock[WRITE].now)) << 9); atomic64_read(&c->io_clock[WRITE].now)) << 9);
if (attr == &sysfs_rebalance_work) { if (attr == &sysfs_rebalance_work)
bch2_rebalance_work_to_text(&out, c); bch2_rebalance_work_to_text(out, c);
return out.pos - buf;
}
sysfs_print(promote_whole_extents, c->promote_whole_extents); sysfs_print(promote_whole_extents, c->promote_whole_extents);
/* Debugging: */ /* Debugging: */
if (attr == &sysfs_journal_debug) { if (attr == &sysfs_journal_debug)
bch2_journal_debug_to_text(&out, &c->journal); bch2_journal_debug_to_text(out, &c->journal);
return out.pos - buf;
}
if (attr == &sysfs_journal_pins) { if (attr == &sysfs_journal_pins)
bch2_journal_pins_to_text(&out, &c->journal); bch2_journal_pins_to_text(out, &c->journal);
return out.pos - buf;
}
if (attr == &sysfs_btree_updates) { if (attr == &sysfs_btree_updates)
bch2_btree_updates_to_text(&out, c); bch2_btree_updates_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_dirty_btree_nodes) { if (attr == &sysfs_dirty_btree_nodes)
bch2_dirty_btree_nodes_to_text(&out, c); bch2_dirty_btree_nodes_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_btree_cache) { if (attr == &sysfs_btree_cache)
bch2_btree_cache_to_text(&out, c); bch2_btree_cache_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_btree_key_cache) { if (attr == &sysfs_btree_key_cache)
bch2_btree_key_cache_to_text(&out, &c->btree_key_cache); bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
return out.pos - buf;
}
if (attr == &sysfs_btree_transactions) { if (attr == &sysfs_btree_transactions)
bch2_btree_trans_to_text(&out, c); bch2_btree_trans_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_stripes_heap) { if (attr == &sysfs_stripes_heap)
bch2_stripes_heap_to_text(&out, c); bch2_stripes_heap_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_open_buckets) { if (attr == &sysfs_open_buckets)
bch2_open_buckets_to_text(&out, c); bch2_open_buckets_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_compression_stats) { if (attr == &sysfs_compression_stats)
bch2_compression_stats_to_text(&out, c); bch2_compression_stats_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_new_stripes) { if (attr == &sysfs_new_stripes)
bch2_new_stripes_to_text(&out, c); bch2_new_stripes_to_text(out, c);
return out.pos - buf;
}
if (attr == &sysfs_io_timers_read) { if (attr == &sysfs_io_timers_read)
bch2_io_timers_to_text(&out, &c->io_clock[READ]); bch2_io_timers_to_text(out, &c->io_clock[READ]);
return out.pos - buf;
}
if (attr == &sysfs_io_timers_write) {
bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
return out.pos - buf;
}
if (attr == &sysfs_data_jobs) { if (attr == &sysfs_io_timers_write)
data_progress_to_text(&out, c); bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
return out.pos - buf;
} if (attr == &sysfs_data_jobs)
data_progress_to_text(out, c);
return 0; return 0;
} }
...@@ -567,7 +552,7 @@ struct attribute *bch2_fs_files[] = { ...@@ -567,7 +552,7 @@ struct attribute *bch2_fs_files[] = {
SHOW(bch2_fs_internal) SHOW(bch2_fs_internal)
{ {
struct bch_fs *c = container_of(kobj, struct bch_fs, internal); struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
return bch2_fs_show(&c->kobj, attr, buf); return bch2_fs_to_text(out, &c->kobj, attr);
} }
STORE(bch2_fs_internal) STORE(bch2_fs_internal)
...@@ -617,16 +602,15 @@ struct attribute *bch2_fs_internal_files[] = { ...@@ -617,16 +602,15 @@ struct attribute *bch2_fs_internal_files[] = {
SHOW(bch2_fs_opts_dir) SHOW(bch2_fs_opts_dir)
{ {
struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
const struct bch_option *opt = container_of(attr, struct bch_option, attr); const struct bch_option *opt = container_of(attr, struct bch_option, attr);
int id = opt - bch2_opt_table; int id = opt - bch2_opt_table;
u64 v = bch2_opt_get_by_id(&c->opts, id); u64 v = bch2_opt_get_by_id(&c->opts, id);
bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST); bch2_opt_to_text(out, c, opt, v, OPT_SHOW_FULL_LIST);
pr_buf(&out, "\n"); pr_char(out, '\n');
return out.pos - buf; return 0;
} }
STORE(bch2_fs_opts_dir) STORE(bch2_fs_opts_dir)
...@@ -690,13 +674,10 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj) ...@@ -690,13 +674,10 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
SHOW(bch2_fs_time_stats) SHOW(bch2_fs_time_stats)
{ {
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
struct printbuf out = _PBUF(buf, PAGE_SIZE);
#define x(name) \ #define x(name) \
if (attr == &sysfs_time_stat_##name) { \ if (attr == &sysfs_time_stat_##name) \
bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\ bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
return out.pos - buf; \
}
BCH_TIME_STATS() BCH_TIME_STATS()
#undef x #undef x
...@@ -812,7 +793,6 @@ SHOW(bch2_dev) ...@@ -812,7 +793,6 @@ SHOW(bch2_dev)
{ {
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs; struct bch_fs *c = ca->fs;
struct printbuf out = _PBUF(buf, PAGE_SIZE);
sysfs_printf(uuid, "%pU\n", ca->uuid.b); sysfs_printf(uuid, "%pU\n", ca->uuid.b);
...@@ -825,58 +805,47 @@ SHOW(bch2_dev) ...@@ -825,58 +805,47 @@ SHOW(bch2_dev)
if (attr == &sysfs_label) { if (attr == &sysfs_label) {
if (ca->mi.group) { if (ca->mi.group) {
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
bch2_disk_path_to_text(&out, c->disk_sb.sb, bch2_disk_path_to_text(out, c->disk_sb.sb,
ca->mi.group - 1); ca->mi.group - 1);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
} }
pr_buf(&out, "\n"); pr_char(out, '\n');
return out.pos - buf;
} }
if (attr == &sysfs_has_data) { if (attr == &sysfs_has_data) {
bch2_flags_to_text(&out, bch2_data_types, bch2_flags_to_text(out, bch2_data_types,
bch2_dev_has_data(c, ca)); bch2_dev_has_data(c, ca));
pr_buf(&out, "\n"); pr_char(out, '\n');
return out.pos - buf;
} }
if (attr == &sysfs_state_rw) { if (attr == &sysfs_state_rw) {
bch2_string_opt_to_text(&out, bch2_member_states, bch2_string_opt_to_text(out, bch2_member_states,
ca->mi.state); ca->mi.state);
pr_buf(&out, "\n"); pr_char(out, '\n');
return out.pos - buf;
} }
if (attr == &sysfs_iodone) { if (attr == &sysfs_iodone)
dev_iodone_to_text(&out, ca); dev_iodone_to_text(out, ca);
return out.pos - buf;
}
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ])); sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE])); sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
if (attr == &sysfs_io_latency_stats_read) { if (attr == &sysfs_io_latency_stats_read)
bch2_time_stats_to_text(&out, &ca->io_latency[READ]); bch2_time_stats_to_text(out, &ca->io_latency[READ]);
return out.pos - buf;
} if (attr == &sysfs_io_latency_stats_write)
if (attr == &sysfs_io_latency_stats_write) { bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
return out.pos - buf;
}
sysfs_printf(congested, "%u%%", sysfs_printf(congested, "%u%%",
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX) clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
* 100 / CONGESTED_MAX); * 100 / CONGESTED_MAX);
if (attr == &sysfs_reserve_stats) { if (attr == &sysfs_reserve_stats)
reserve_stats_to_text(&out, ca); reserve_stats_to_text(out, ca);
return out.pos - buf;
} if (attr == &sysfs_alloc_debug)
if (attr == &sysfs_alloc_debug) { dev_alloc_debug_to_text(out, ca);
dev_alloc_debug_to_text(&out, ca);
return out.pos - buf;
}
return 0; return 0;
} }
......
...@@ -871,7 +871,9 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, ...@@ -871,7 +871,9 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
u64 nr, unsigned nr_threads) u64 nr, unsigned nr_threads)
{ {
struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads }; struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
char name_buf[20], nr_buf[20], per_sec_buf[20]; char name_buf[20];
struct printbuf nr_buf = PRINTBUF;
struct printbuf per_sec_buf = PRINTBUF;
unsigned i; unsigned i;
u64 time; u64 time;
...@@ -932,13 +934,15 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, ...@@ -932,13 +934,15 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
time = j.finish - j.start; time = j.finish - j.start;
scnprintf(name_buf, sizeof(name_buf), "%s:", testname); scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
bch2_hprint(&PBUF(nr_buf), nr); bch2_hprint(&nr_buf, nr);
bch2_hprint(&PBUF(per_sec_buf), div64_u64(nr * NSEC_PER_SEC, time)); bch2_hprint(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n", printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
name_buf, nr_buf, nr_threads, name_buf, nr_buf.buf, nr_threads,
div_u64(time, NSEC_PER_SEC), div_u64(time, NSEC_PER_SEC),
div_u64(time * nr_threads, nr), div_u64(time * nr_threads, nr),
per_sec_buf); per_sec_buf.buf);
printbuf_exit(&per_sec_buf);
printbuf_exit(&nr_buf);
return j.ret; return j.ret;
} }
......
...@@ -99,6 +99,38 @@ STRTO_H(strtoll, long long) ...@@ -99,6 +99,38 @@ STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long) STRTO_H(strtoull, unsigned long long)
STRTO_H(strtou64, u64) STRTO_H(strtou64, u64)
static int bch2_printbuf_realloc(struct printbuf *out, unsigned extra)
{
unsigned new_size = roundup_pow_of_two(out->size + extra);
char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_ATOMIC);
if (!buf) {
out->allocation_failure = true;
return -ENOMEM;
}
out->buf = buf;
out->size = new_size;
return 0;
}
void bch2_pr_buf(struct printbuf *out, const char *fmt, ...)
{
va_list args;
int len;
do {
va_start(args, fmt);
len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args);
va_end(args);
} while (len + 1 >= printbuf_remaining(out) &&
!bch2_printbuf_realloc(out, len + 1));
len = min_t(size_t, len,
printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
out->pos += len;
}
void bch2_hprint(struct printbuf *buf, s64 v) void bch2_hprint(struct printbuf *buf, s64 v)
{ {
int u, t = 0; int u, t = 0;
...@@ -151,9 +183,6 @@ void bch2_flags_to_text(struct printbuf *out, ...@@ -151,9 +183,6 @@ void bch2_flags_to_text(struct printbuf *out,
unsigned bit, nr = 0; unsigned bit, nr = 0;
bool first = true; bool first = true;
if (out->pos != out->end)
*out->pos = '\0';
while (list[nr]) while (list[nr])
nr++; nr++;
......
...@@ -242,19 +242,39 @@ enum printbuf_units { ...@@ -242,19 +242,39 @@ enum printbuf_units {
}; };
struct printbuf { struct printbuf {
char *pos; char *buf;
char *end; unsigned size;
char *last_newline; unsigned pos;
char *last_field; unsigned last_newline;
unsigned last_field;
unsigned indent; unsigned indent;
enum printbuf_units units; enum printbuf_units units:8;
unsigned tabstop; u8 atomic;
unsigned tabstops[4]; bool allocation_failure:1;
u8 tabstop;
u8 tabstops[4];
}; };
#define PRINTBUF ((struct printbuf) { NULL })
static inline void printbuf_exit(struct printbuf *buf)
{
kfree(buf->buf);
buf->buf = ERR_PTR(-EINTR); /* poison value */
}
static inline void printbuf_reset(struct printbuf *buf)
{
buf->pos = 0;
buf->last_newline = 0;
buf->last_field = 0;
buf->indent = 0;
buf->tabstop = 0;
}
static inline size_t printbuf_remaining(struct printbuf *buf) static inline size_t printbuf_remaining(struct printbuf *buf)
{ {
return buf->end - buf->pos; return buf->size - buf->pos;
} }
static inline size_t printbuf_linelen(struct printbuf *buf) static inline size_t printbuf_linelen(struct printbuf *buf)
...@@ -262,29 +282,13 @@ static inline size_t printbuf_linelen(struct printbuf *buf) ...@@ -262,29 +282,13 @@ static inline size_t printbuf_linelen(struct printbuf *buf)
return buf->pos - buf->last_newline; return buf->pos - buf->last_newline;
} }
#define _PBUF(_buf, _len) \ void bch2_pr_buf(struct printbuf *out, const char *fmt, ...);
((struct printbuf) { \
.pos = _buf, \
.end = _buf + _len, \
.last_newline = _buf, \
.last_field = _buf, \
})
#define PBUF(_buf) _PBUF(_buf, sizeof(_buf)) #define pr_buf(_out, ...) bch2_pr_buf(_out, __VA_ARGS__)
#define pr_buf(_out, ...) \
do { \
(_out)->pos += scnprintf((_out)->pos, printbuf_remaining(_out), \
__VA_ARGS__); \
} while (0)
static inline void pr_char(struct printbuf *out, char c) static inline void pr_char(struct printbuf *out, char c)
{ {
if (printbuf_remaining(out) > 1) { bch2_pr_buf(out, "%c", c);
*out->pos = c;
out->pos++;
}
} }
static inline void pr_indent_push(struct printbuf *buf, unsigned spaces) static inline void pr_indent_push(struct printbuf *buf, unsigned spaces)
...@@ -298,7 +302,7 @@ static inline void pr_indent_pop(struct printbuf *buf, unsigned spaces) ...@@ -298,7 +302,7 @@ static inline void pr_indent_pop(struct printbuf *buf, unsigned spaces)
{ {
if (buf->last_newline + buf->indent == buf->pos) { if (buf->last_newline + buf->indent == buf->pos) {
buf->pos -= spaces; buf->pos -= spaces;
buf->pos = '\0'; buf->buf[buf->pos] = '\0';
} }
buf->indent -= spaces; buf->indent -= spaces;
} }
...@@ -341,12 +345,12 @@ static inline void pr_tab_rjust(struct printbuf *buf) ...@@ -341,12 +345,12 @@ static inline void pr_tab_rjust(struct printbuf *buf)
BUG_ON(buf->tabstop > ARRAY_SIZE(buf->tabstops)); BUG_ON(buf->tabstop > ARRAY_SIZE(buf->tabstops));
if (shift > 0) { if (shift > 0) {
memmove(buf->last_field + shift, memmove(buf->buf + buf->last_field + shift,
buf->last_field, buf->buf + buf->last_field,
move); move);
memset(buf->last_field, ' ', shift); memset(buf->buf + buf->last_field, ' ', shift);
buf->pos += shift; buf->pos += shift;
*buf->pos = 0; buf->buf[buf->pos] = 0;
} }
buf->last_field = buf->pos; buf->last_field = buf->pos;
...@@ -460,8 +464,8 @@ static inline int bch2_strtoul_h(const char *cp, long *res) ...@@ -460,8 +464,8 @@ static inline int bch2_strtoul_h(const char *cp, long *res)
_r; \ _r; \
}) })
#define snprint(buf, size, var) \ #define snprint(out, var) \
snprintf(buf, size, \ pr_buf(out, \
type_is(var, int) ? "%i\n" \ type_is(var, int) ? "%i\n" \
: type_is(var, unsigned) ? "%u\n" \ : type_is(var, unsigned) ? "%u\n" \
: type_is(var, long) ? "%li\n" \ : type_is(var, long) ? "%li\n" \
...@@ -605,10 +609,8 @@ do { \ ...@@ -605,10 +609,8 @@ do { \
sysfs_print(name##_rate_d_term, (var)->d_term); \ sysfs_print(name##_rate_d_term, (var)->d_term); \
sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
\ \
if (attr == &sysfs_##name##_rate_debug) { \ if (attr == &sysfs_##name##_rate_debug) \
bch2_pd_controller_debug_to_text(&out, var); \ bch2_pd_controller_debug_to_text(out, var); \
return out.pos - buf; \
} \
} while (0) } while (0)
#define sysfs_pd_controller_store(name, var) \ #define sysfs_pd_controller_store(name, var) \
......
...@@ -426,9 +426,8 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, ...@@ -426,9 +426,8 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
bch2_inode_opts_to_opts(bch2_inode_opts_get(&inode->ei_inode)); bch2_inode_opts_to_opts(bch2_inode_opts_get(&inode->ei_inode));
const struct bch_option *opt; const struct bch_option *opt;
int id, inode_opt_id; int id, inode_opt_id;
char buf[512]; struct printbuf out = PRINTBUF;
struct printbuf out = PBUF(buf); int ret;
unsigned val_len;
u64 v; u64 v;
id = bch2_opt_lookup(name); id = bch2_opt_lookup(name);
...@@ -451,14 +450,19 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, ...@@ -451,14 +450,19 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
v = bch2_opt_get_by_id(&opts, id); v = bch2_opt_get_by_id(&opts, id);
bch2_opt_to_text(&out, c, opt, v, 0); bch2_opt_to_text(&out, c, opt, v, 0);
val_len = out.pos - buf; ret = out.pos;
if (buffer && val_len > size) if (out.allocation_failure) {
return -ERANGE; ret = -ENOMEM;
} else if (buffer) {
if (out.pos > size)
ret = -ERANGE;
else
memcpy(buffer, out.buf, out.pos);
}
if (buffer) printbuf_exit(&out);
memcpy(buffer, buf, val_len); return ret;
return val_len;
} }
static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler, static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment