Commit 9afc6652 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill bch2_invalidate_bucket()

This patch is working towards eventually getting rid of the in memory
struct bucket, and relying only on the btree representation.

Since bch2_invalidate_bucket() was only used for incrementing gens, not
invalidating cached data, no other counters were being changed as a side
effect - meaning it's safe for the allocator code to increment the
bucket gen directly.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 72eab8da
......@@ -896,34 +896,32 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
/* first, put on free_inc and mark as owned by allocator: */
percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
verify_not_on_freelist(c, ca, b);
BUG_ON(!fifo_push(&ca->free_inc, b));
g = bucket(ca, b);
m = READ_ONCE(g->mark);
invalidating_cached_data = m.cached_sectors != 0;
BUG_ON(m.data_type || m.dirty_sectors);
bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
spin_lock(&c->freelist_lock);
verify_not_on_freelist(c, ca, b);
BUG_ON(!fifo_push(&ca->free_inc, b));
spin_unlock(&c->freelist_lock);
/*
* If we're not invalidating cached data, we only increment the bucket
* gen in memory here, the incremented gen will be updated in the btree
* by bch2_trans_mark_pointer():
*/
if (!m.cached_sectors &&
!bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
bucket_cmpxchg(g, m, m.gen++);
percpu_up_read(&c->mark_lock);
goto out;
}
if (!invalidating_cached_data)
bch2_invalidate_bucket(c, ca, b, &m);
else
bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
spin_unlock(&c->freelist_lock);
percpu_up_read(&c->mark_lock);
if (!invalidating_cached_data)
goto out;
/*
* If the read-only path is trying to shut down, we can't be generating
* new btree updates:
......
......@@ -644,46 +644,6 @@ int bch2_replicas_delta_list_apply(struct bch_fs *c,
ret; \
})
static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *ret,
bool gc)
{
struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
old = bucket_cmpxchg(g, new, ({
BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = true;
new.data_type = 0;
new.cached_sectors = 0;
new.dirty_sectors = 0;
new.gen++;
}));
bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
if (old.cached_sectors)
update_cached_sectors(c, fs_usage, ca->dev_idx,
-((s64) old.cached_sectors));
if (!gc)
*ret = old;
return 0;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old)
{
do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
ca, b, old);
if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors);
}
static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
bool gc)
......
......@@ -236,8 +236,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
void bch2_bucket_seq_cleanup(struct bch_fs *);
void bch2_fs_usage_initialize(struct bch_fs *);
void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
size_t, struct bucket_mark *);
void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
size_t, bool, struct gc_pos, unsigned);
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment