Commit 6e738539 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Improve key marking interface

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 572ad769
......@@ -232,9 +232,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0,
BCH_BUCKET_MARK_NOATOMIC|
BCH_BUCKET_MARK_ALLOC_READ);
bch2_mark_key(c, k, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
......@@ -244,10 +244,9 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC)
bch2_mark_key(c, bkey_i_to_s_c(j->k),
true, 0, NULL, 0,
BCH_BUCKET_MARK_NOATOMIC|
BCH_BUCKET_MARK_ALLOC_READ);
bch2_mark_key(c, bkey_i_to_s_c(j->k), 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
percpu_down_write(&c->mark_lock);
bch2_dev_usage_from_buckets(c);
......@@ -953,6 +952,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_BUCKET_INVALIDATE|
flags);
if (ret == -EINTR)
goto retry;
......
......@@ -173,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
bch2_mark_key(c, k, true, k.k->size, NULL, 0, flags);
bch2_mark_key(c, k, k.k->size, NULL, 0, flags);
fsck_err:
return ret;
}
......@@ -420,8 +420,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
true, 0, NULL, 0,
bch2_mark_key(c, bkey_i_to_s_c(&d->key), 0, NULL, 0,
BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock);
......
......@@ -48,6 +48,7 @@ enum {
__BTREE_INSERT_NOMARK,
__BTREE_INSERT_MARK_INMEM,
__BTREE_INSERT_NO_CLEAR_REPLICAS,
__BTREE_INSERT_BUCKET_INVALIDATE,
__BTREE_INSERT_NOWAIT,
__BTREE_INSERT_GC_LOCK_HELD,
__BCH_HASH_SET_MUST_CREATE,
......@@ -94,6 +95,8 @@ enum {
#define BTREE_INSERT_NO_CLEAR_REPLICAS (1 << __BTREE_INSERT_NO_CLEAR_REPLICAS)
#define BTREE_INSERT_BUCKET_INVALIDATE (1 << __BTREE_INSERT_BUCKET_INVALIDATE)
/* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD)
......
......@@ -194,7 +194,9 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
: gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
false, 0, NULL, 0, BCH_BUCKET_MARK_GC);
0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
}
static void __btree_node_free(struct bch_fs *c, struct btree *b)
......@@ -264,13 +266,13 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
{
BUG_ON(!pending->index_update_done);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
false, 0,
NULL, 0, 0);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
false, 0, NULL, 0, BCH_BUCKET_MARK_GC);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
......@@ -1075,10 +1077,12 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, &fs_usage->u, 0, 0);
0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, NULL, 0,
0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC);
if (old && !btree_node_fake(old))
......@@ -1171,11 +1175,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, &fs_usage->u, 0, 0);
0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, NULL, 0, BCH_BUCKET_MARK_GC);
0, NULL, 0,
BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
......@@ -1996,10 +2003,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, &fs_usage->u, 0, 0);
0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, NULL, 0,
0, NULL, 0,
BCH_BUCKET_MARK_INSERT||
BCH_BUCKET_MARK_GC);
bch2_btree_node_free_index(as, NULL,
......
......@@ -542,6 +542,9 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
int ret;
trans_for_each_update_iter(trans, i)
......@@ -618,7 +621,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
trans_for_each_update_iter(trans, i)
if (update_has_triggers(trans, i) &&
!update_triggers_transactional(trans, i))
bch2_mark_update(trans, i, &fs_usage->u, 0);
bch2_mark_update(trans, i, &fs_usage->u, mark_flags);
if (fs_usage && trans->fs_usage_deltas)
bch2_replicas_delta_list_apply(c, &fs_usage->u,
......@@ -632,6 +635,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
trans_for_each_update_iter(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i, NULL,
mark_flags|
BCH_BUCKET_MARK_GC);
trans_for_each_update(trans, i)
......
This diff is collapsed.
......@@ -249,16 +249,17 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
#define BCH_BUCKET_MARK_GC (1 << 0)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
#define BCH_BUCKET_MARK_ALLOC_READ (1 << 2)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct bch_fs_usage *,
u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct bch_fs_usage *,
u64, unsigned);
#define BCH_BUCKET_MARK_INSERT (1 << 0)
#define BCH_BUCKET_MARK_OVERWRITE (1 << 1)
#define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 2)
#define BCH_BUCKET_MARK_GC (1 << 3)
#define BCH_BUCKET_MARK_ALLOC_READ (1 << 4)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 5)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *,
struct disk_reservation *, unsigned);
......@@ -271,7 +272,7 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
void bch2_replicas_delta_list_apply(struct bch_fs *,
struct bch_fs_usage *,
struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, bool, s64);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *,
struct btree_insert_entry *);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
......
......@@ -611,17 +611,21 @@ void bch2_stripes_heap_update(struct bch_fs *c,
ec_stripes_heap *h = &c->ec_stripes_heap;
size_t i;
heap_verify_backpointer(c, idx);
if (m->alive) {
heap_verify_backpointer(c, idx);
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
i = m->heap_idx;
heap_sift_up(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer);
heap_sift_down(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer);
i = m->heap_idx;
heap_sift_up(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer);
heap_sift_down(h, i, ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer);
heap_verify_backpointer(c, idx);
heap_verify_backpointer(c, idx);
} else {
bch2_stripes_heap_insert(c, m, idx);
}
if (stripe_idx_to_delete(c) >= 0)
schedule_work(&c->ec_stripe_delete_work);
......@@ -1274,7 +1278,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0, 0);
bch2_mark_key(c, k, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
......@@ -1285,7 +1291,9 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, i)
if (i->btree_id == BTREE_ID_EC)
bch2_mark_key(c, bkey_i_to_s_c(i->k),
true, 0, NULL, 0, 0);
0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
return 0;
}
......
......@@ -258,8 +258,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
} while (bkey_cmp(iter->pos, k->k.p) < 0);
if (split_compressed) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), false,
-((s64) k->k.size)) ?:
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
-((s64) k->k.size),
BCH_BUCKET_MARK_OVERWRITE) ?:
bch2_trans_commit(&trans, &disk_res, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment