Commit 2e63e180 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Stash a copy of key being overwritten in btree_insert_entry

We currently need to call bch2_btree_path_peek_slot() multiple times in
the transaction commit path - and some of those need to be updated to
also check the keys from journal replay, too. Let's consolidate this and
stash the key being overwritten in btree_insert_entry.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent ce91abd6
...@@ -1987,6 +1987,7 @@ inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct ...@@ -1987,6 +1987,7 @@ inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct
if (unlikely(!ck->valid)) if (unlikely(!ck->valid))
goto hole; goto hole;
*u = ck->k->k;
k = bkey_i_to_s_c(ck->k); k = bkey_i_to_s_c(ck->k);
} }
......
...@@ -339,12 +339,20 @@ struct btree_insert_entry { ...@@ -339,12 +339,20 @@ struct btree_insert_entry {
unsigned flags; unsigned flags;
u8 bkey_type; u8 bkey_type;
enum btree_id btree_id:8; enum btree_id btree_id:8;
u8 level; u8 level:4;
bool cached:1; bool cached:1;
bool insert_trigger_run:1; bool insert_trigger_run:1;
bool overwrite_trigger_run:1; bool overwrite_trigger_run:1;
/*
* @old_k may be a key from the journal; @old_btree_u64s always refers
* to the size of the key being overwritten in the btree:
*/
u8 old_btree_u64s;
struct bkey_i *k; struct bkey_i *k;
struct btree_path *path; struct btree_path *path;
/* key being overwritten: */
struct bkey old_k;
const struct bch_val *old_v;
unsigned long ip_allocated; unsigned long ip_allocated;
}; };
......
...@@ -653,7 +653,6 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -653,7 +653,6 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i; struct btree_insert_entry *i;
struct bkey_s_c old;
int ret, u64s_delta = 0; int ret, u64s_delta = 0;
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
...@@ -671,22 +670,11 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -671,22 +670,11 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
} }
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
struct bkey u; if (i->cached)
/*
* peek_slot() doesn't yet work on iterators that point to
* interior nodes:
*/
if (i->cached || i->level)
continue; continue;
old = bch2_btree_path_peek_slot(i->path, &u);
ret = bkey_err(old);
if (unlikely(ret))
return ret;
u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0; u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
u64s_delta -= !bkey_deleted(old.k) ? old.k->u64s : 0; u64s_delta -= i->old_btree_u64s;
if (!same_leaf_as_next(trans, i)) { if (!same_leaf_as_next(trans, i)) {
if (u64s_delta <= 0) { if (u64s_delta <= 0) {
...@@ -1432,11 +1420,19 @@ int __must_check bch2_trans_update_by_path(struct btree_trans *trans, struct btr ...@@ -1432,11 +1420,19 @@ int __must_check bch2_trans_update_by_path(struct btree_trans *trans, struct btr
} }
bch2_path_put(trans, i->path, true); bch2_path_put(trans, i->path, true);
*i = n; i->flags = n.flags;
} else i->cached = n.cached;
i->k = n.k;
i->path = n.path;
i->ip_allocated = n.ip_allocated;
} else {
array_insert_item(trans->updates, trans->nr_updates, array_insert_item(trans->updates, trans->nr_updates,
i - trans->updates, n); i - trans->updates, n);
i->old_v = bch2_btree_path_peek_slot(path, &i->old_k).v;
i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
}
__btree_path_get(n.path, true); __btree_path_get(n.path, true);
return 0; return 0;
} }
......
...@@ -1322,25 +1322,14 @@ void fs_usage_apply_warn(struct btree_trans *trans, ...@@ -1322,25 +1322,14 @@ void fs_usage_apply_warn(struct btree_trans *trans,
should_not_have_added, disk_res_sectors); should_not_have_added, disk_res_sectors);
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
pr_err("while inserting"); pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
pr_err("%s", buf); pr_err(" %s", buf);
pr_err("overlapping with"); pr_err("overlapping with");
bch2_bkey_val_to_text(&PBUF(buf), c, old);
if (!i->cached) { pr_err(" %s", buf);
struct bkey u;
struct bkey_s_c k = bch2_btree_path_peek_slot(i->path, &u);
bch2_bkey_val_to_text(&PBUF(buf), c, k);
pr_err("%s", buf);
} else {
struct bkey_cached *ck = (void *) i->path->l[0].b;
if (ck->valid) {
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
pr_err("%s", buf);
}
}
} }
__WARN(); __WARN();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment