Commit 64f2a880 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Fix bch2_extent_can_insert() not being called

It's supposed to check whether we're splitting a compressed extent and
if so get a bigger disk reservation - hence this fixes a "disk usage
increased by x without a reservaiton" bug.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c61b7e21
......@@ -568,6 +568,16 @@ static inline bool btree_node_is_extents(struct btree *b)
return btree_node_type_is_extents(btree_node_type(b));
}
static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
{
return __btree_node_type(iter->level, iter->btree_id);
}
static inline bool btree_iter_is_extents(struct btree_iter *iter)
{
return btree_node_type_is_extents(btree_iter_key_type(iter));
}
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
((1U << BKEY_TYPE_EXTENTS)| \
(1U << BKEY_TYPE_ALLOC)| \
......
......@@ -265,11 +265,10 @@ static enum btree_insert_ret
btree_key_can_insert(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
unsigned *u64s)
unsigned u64s)
{
struct bch_fs *c = trans->c;
struct btree *b = iter_l(iter)->b;
static enum btree_insert_ret ret;
if (unlikely(btree_node_fake(b)))
return BTREE_INSERT_BTREE_NODE_FULL;
......@@ -281,13 +280,7 @@ btree_key_can_insert(struct btree_trans *trans,
if (unlikely(btree_node_old_extent_overwrite(b)))
return BTREE_INSERT_BTREE_NODE_FULL;
ret = !(iter->flags & BTREE_ITER_IS_EXTENTS)
? BTREE_INSERT_OK
: bch2_extent_can_insert(trans, iter, insert);
if (ret)
return ret;
if (*u64s > bch_btree_keys_u64s_remaining(c, b))
if (unlikely(u64s > bch_btree_keys_u64s_remaining(c, b)))
return BTREE_INSERT_BTREE_NODE_FULL;
return BTREE_INSERT_OK;
......@@ -297,7 +290,7 @@ static enum btree_insert_ret
btree_key_can_insert_cached(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
unsigned *u64s)
unsigned u64s)
{
struct bkey_cached *ck = (void *) iter->l[0].b;
unsigned new_u64s;
......@@ -305,10 +298,10 @@ btree_key_can_insert_cached(struct btree_trans *trans,
BUG_ON(iter->level);
if (*u64s <= ck->u64s)
if (u64s <= ck->u64s)
return BTREE_INSERT_OK;
new_u64s = roundup_pow_of_two(*u64s);
new_u64s = roundup_pow_of_two(u64s);
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k)
return -ENOMEM;
......@@ -414,8 +407,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
u64s += i->k->k.u64s;
ret = btree_iter_type(i->iter) != BTREE_ITER_CACHED
? btree_key_can_insert(trans, i->iter, i->k, &u64s)
: btree_key_can_insert_cached(trans, i->iter, i->k, &u64s);
? btree_key_can_insert(trans, i->iter, i->k, u64s)
: btree_key_can_insert_cached(trans, i->iter, i->k, u64s);
if (ret) {
*stopped_at = i;
return ret;
......@@ -733,6 +726,11 @@ static int extent_update_to_keys(struct btree_trans *trans,
struct bkey_i *insert)
{
struct btree_iter *iter;
int ret;
ret = bch2_extent_can_insert(trans, orig_iter, insert);
if (ret)
return ret;
if (bkey_deleted(&insert->k))
return 0;
......
......@@ -1368,8 +1368,8 @@ int bch2_mark_update(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct btree *b = iter_l(iter)->b;
struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret = 0;
......@@ -1431,32 +1431,38 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
disk_res_sectors);
trans_for_each_update(trans, i) {
struct btree_iter *iter = i->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
pr_err("%s", buf);
pr_err("overlapping with");
node_iter = iter->l[0].iter;
while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
struct bkey unpacked;
struct bkey_s_c k;
if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
struct btree *b = iter_l(i->iter)->b;
struct btree_node_iter node_iter = iter_l(i->iter)->iter;
struct bkey_packed *_k;
k = bkey_disassemble(b, _k, &unpacked);
while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
struct bkey unpacked;
struct bkey_s_c k;
if (btree_node_is_extents(b)
? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
: bkey_cmp(i->k->k.p, k.k->p))
break;
pr_info("_k %px format %u", _k, _k->format);
k = bkey_disassemble(b, _k, &unpacked);
bch2_bkey_val_to_text(&PBUF(buf), c, k);
pr_err("%s", buf);
if (btree_node_is_extents(b)
? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
: bkey_cmp(i->k->k.p, k.k->p))
break;
bch2_bkey_val_to_text(&PBUF(buf), c, k);
pr_err("%s", buf);
bch2_btree_node_iter_advance(&node_iter, b);
bch2_btree_node_iter_advance(&node_iter, b);
}
} else {
struct bkey_cached *ck = (void *) i->iter->l[0].b;
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
pr_err("%s", buf);
}
}
}
......@@ -1808,8 +1814,8 @@ int bch2_trans_mark_update(struct btree_trans *trans,
struct bkey_i *insert,
unsigned flags)
{
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct btree *b = iter_l(iter)->b;
struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment