Commit eabb10dc authored by Brian Foster's avatar Brian Foster Committed by Kent Overstreet

bcachefs: support btree updates of prejournaled keys

Introduce support for prejournaled key updates. This allows a
transaction to commit an update for a key that already exists (and
is pinned) in the journal. This is required for btree write buffer
updates as the current scheme of journaling both on write buffer
insertion and write buffer (slow path) flush is unsafe in certain
crash recovery scenarios.

Create a small trans update wrapper to pass along the seq where the
key resides into the btree_insert_entry. From there, trans commit
passes the seq into the btree insert path where it is used to manage
the journal pin for the associated btree leaf.

Note that this patch only introduces the underlying mechanism and
otherwise includes no functional changes.
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 78623ee0
...@@ -98,6 +98,7 @@ static inline int bch2_mark_key(struct btree_trans *trans, ...@@ -98,6 +98,7 @@ static inline int bch2_mark_key(struct btree_trans *trans,
enum btree_update_flags { enum btree_update_flags {
__BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END, __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
__BTREE_UPDATE_NOJOURNAL, __BTREE_UPDATE_NOJOURNAL,
__BTREE_UPDATE_PREJOURNAL,
__BTREE_UPDATE_KEY_CACHE_RECLAIM, __BTREE_UPDATE_KEY_CACHE_RECLAIM,
__BTREE_TRIGGER_NORUN, /* Don't run triggers at all */ __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
...@@ -112,6 +113,7 @@ enum btree_update_flags { ...@@ -112,6 +113,7 @@ enum btree_update_flags {
#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL) #define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
#define BTREE_UPDATE_PREJOURNAL (1U << __BTREE_UPDATE_PREJOURNAL)
#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM) #define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN) #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
......
...@@ -380,6 +380,7 @@ struct btree_insert_entry { ...@@ -380,6 +380,7 @@ struct btree_insert_entry {
u8 old_btree_u64s; u8 old_btree_u64s;
struct bkey_i *k; struct bkey_i *k;
struct btree_path *path; struct btree_path *path;
u64 seq;
/* key being overwritten: */ /* key being overwritten: */
struct bkey old_k; struct bkey old_k;
const struct bch_val *old_v; const struct bch_val *old_v;
......
...@@ -111,6 +111,8 @@ int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *, ...@@ -111,6 +111,8 @@ int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *, int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_update_flags); struct bkey_i *, enum btree_update_flags);
int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *,
struct bkey_i *, enum btree_update_flags);
int __must_check bch2_trans_update_buffered(struct btree_trans *, int __must_check bch2_trans_update_buffered(struct btree_trans *,
enum btree_id, struct bkey_i *); enum btree_id, struct bkey_i *);
......
...@@ -747,9 +747,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, ...@@ -747,9 +747,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
i->k->k.needs_whiteout = false; i->k->k.needs_whiteout = false;
if (!i->cached) if (!i->cached) {
bch2_btree_insert_key_leaf(trans, i->path, i->k, trans->journal_res.seq); u64 seq = trans->journal_res.seq;
else if (!i->key_cache_already_flushed)
if (i->flags & BTREE_UPDATE_PREJOURNAL)
seq = i->seq;
bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
} else if (!i->key_cache_already_flushed)
bch2_btree_insert_key_cached(trans, flags, i); bch2_btree_insert_key_cached(trans, flags, i);
else { else {
bch2_btree_key_cache_drop(trans, i->path); bch2_btree_key_cache_drop(trans, i->path);
...@@ -1571,12 +1576,21 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, ...@@ -1571,12 +1576,21 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i, n; struct btree_insert_entry *i, n;
u64 seq = 0;
int cmp; int cmp;
EBUG_ON(!path->should_be_locked); EBUG_ON(!path->should_be_locked);
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
EBUG_ON(!bpos_eq(k->k.p, path->pos)); EBUG_ON(!bpos_eq(k->k.p, path->pos));
/*
* The transaction journal res hasn't been allocated at this point.
* That occurs at commit time. Reuse the seq field to pass in the seq
* of a prejournaled key.
*/
if (flags & BTREE_UPDATE_PREJOURNAL)
seq = trans->journal_res.seq;
n = (struct btree_insert_entry) { n = (struct btree_insert_entry) {
.flags = flags, .flags = flags,
.bkey_type = __btree_node_type(path->level, path->btree_id), .bkey_type = __btree_node_type(path->level, path->btree_id),
...@@ -1585,6 +1599,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, ...@@ -1585,6 +1599,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
.cached = path->cached, .cached = path->cached,
.path = path, .path = path,
.k = k, .k = k,
.seq = seq,
.ip_allocated = ip, .ip_allocated = ip,
}; };
...@@ -1612,6 +1627,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, ...@@ -1612,6 +1627,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
i->cached = n.cached; i->cached = n.cached;
i->k = n.k; i->k = n.k;
i->path = n.path; i->path = n.path;
i->seq = n.seq;
i->ip_allocated = n.ip_allocated; i->ip_allocated = n.ip_allocated;
} else { } else {
array_insert_item(trans->updates, trans->nr_updates, array_insert_item(trans->updates, trans->nr_updates,
...@@ -1709,6 +1725,18 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter ...@@ -1709,6 +1725,18 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_); return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
} }
/*
* Add a transaction update for a key that has already been journaled.
*/
int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
struct btree_iter *iter, struct bkey_i *k,
enum btree_update_flags flags)
{
trans->journal_res.seq = seq;
return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
BTREE_UPDATE_PREJOURNAL);
}
int __must_check bch2_trans_update_buffered(struct btree_trans *trans, int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
enum btree_id btree, enum btree_id btree,
struct bkey_i *k) struct bkey_i *k)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment