Commit 36e916e1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Caller now responsible for calling mark_key for gc

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7b512638
......@@ -117,7 +117,6 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
struct gc_pos pos = { 0 };
unsigned flags =
BCH_BUCKET_MARK_GC|
(initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
......@@ -174,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
bch2_mark_key(c, k, true, k.k->size, pos, NULL, 0, flags);
bch2_mark_key(c, k, true, k.k->size, NULL, 0, flags);
fsck_err:
return ret;
}
......@@ -395,7 +394,6 @@ static void bch2_mark_superblocks(struct bch_fs *c)
/* Also see bch2_pending_btree_node_free_insert_done() */
static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
{
struct gc_pos pos = { 0 };
struct btree_update *as;
struct pending_btree_node_free *d;
......@@ -405,8 +403,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
true, 0,
pos, NULL, 0,
true, 0, NULL, 0,
BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock);
......
......@@ -162,7 +162,6 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
{
struct bch_fs *c = as->c;
struct pending_btree_node_free *d;
struct gc_pos pos = { 0 };
for (d = as->pending; d < as->pending + as->nr_pending; d++)
if (!bkey_cmp(k.k->p, d->key.k.p) &&
......@@ -190,18 +189,12 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
* to cancel out one of mark and sweep's markings if necessary:
*/
/*
* bch2_mark_key() compares the current gc pos to the pos we're
* moving this reference from, hence one comparison here:
*/
if (gc_pos_cmp(c->gc_pos, b
? gc_pos_btree_node(b)
: gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c,
bkey_i_to_s_c(&d->key),
false, 0, pos,
NULL, 0, BCH_BUCKET_MARK_GC);
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
false, 0, NULL, 0, BCH_BUCKET_MARK_GC);
}
static void __btree_node_free(struct bch_fs *c, struct btree *b)
......@@ -273,8 +266,11 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
false, 0,
gc_phase(GC_PHASE_PENDING_DELETE),
NULL, 0, 0);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
false, 0, NULL, 0, BCH_BUCKET_MARK_GC);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
......@@ -1079,9 +1075,11 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0,
gc_pos_btree_root(b->btree_id),
fs_usage, 0, 0);
true, 0, fs_usage, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, NULL, 0,
BCH_BUCKET_MARK_GC);
if (old && !btree_node_fake(old))
bch2_btree_node_free_index(as, NULL,
......@@ -1173,8 +1171,11 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0,
gc_pos_btree_node(b), fs_usage, 0, 0);
true, 0, fs_usage, 0, 0);
if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, NULL, 0, BCH_BUCKET_MARK_GC);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
......@@ -1994,9 +1995,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0,
gc_pos_btree_root(b->btree_id),
fs_usage, 0, 0);
true, 0, fs_usage, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, NULL, 0,
BCH_BUCKET_MARK_GC);
bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&b->key),
fs_usage);
......
......@@ -3,6 +3,7 @@
#include "bcachefs.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
......@@ -602,10 +603,17 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
}
trans_for_each_update_iter(trans, i)
bch2_mark_update(trans, i, fs_usage);
bch2_mark_update(trans, i, fs_usage, 0);
if (fs_usage)
bch2_trans_fs_usage_apply(trans, fs_usage);
if (unlikely(c->gc_pos.phase)) {
trans_for_each_update_iter(trans, i)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i, NULL,
BCH_BUCKET_MARK_GC);
}
trans_for_each_update(trans, i)
do_btree_insert_one(trans, i);
out:
......
......@@ -940,12 +940,13 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
return 0;
}
static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
struct bch_fs_usage *fs_usage,
unsigned journal_seq, unsigned flags,
bool gc)
int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k,
bool inserting, s64 sectors,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
bool gc = flags & BCH_BUCKET_MARK_GC;
int ret = 0;
preempt_disable();
......@@ -997,21 +998,8 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
return do_mark_fn(__bch2_mark_key, c, pos, flags,
k, inserting, sectors, fs_usage,
journal_seq, flags);
}
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
......@@ -1019,7 +1007,7 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors,
pos, fs_usage, journal_seq, flags);
fs_usage, journal_seq, flags);
percpu_up_read(&c->mark_lock);
return ret;
......@@ -1027,13 +1015,13 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
void bch2_mark_update(struct btree_trans *trans,
struct btree_insert_entry *insert,
struct bch_fs_usage *fs_usage)
struct bch_fs_usage *fs_usage,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct gc_pos pos = gc_pos_btree_node(b);
struct bkey_packed *_k;
if (!btree_node_type_needs_gc(iter->btree_id))
......@@ -1043,7 +1031,7 @@ void bch2_mark_update(struct btree_trans *trans,
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k->k),
pos, fs_usage, trans->journal_res.seq, 0);
fs_usage, trans->journal_res.seq, flags);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
......@@ -1076,7 +1064,8 @@ void bch2_mark_update(struct btree_trans *trans,
BUG_ON(sectors <= 0);
bch2_mark_key_locked(c, k, true, sectors,
pos, fs_usage, trans->journal_res.seq, 0);
fs_usage, trans->journal_res.seq,
flags);
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
......@@ -1087,7 +1076,7 @@ void bch2_mark_update(struct btree_trans *trans,
}
bch2_mark_key_locked(c, k, false, sectors,
pos, fs_usage, trans->journal_res.seq, 0);
fs_usage, trans->journal_res.seq, flags);
bch2_btree_node_iter_advance(&node_iter, b);
}
......
......@@ -246,16 +246,16 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
#define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
bool, s64, struct bch_fs_usage *,
u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
bool, s64, struct bch_fs_usage *,
u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *);
void bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
struct bch_fs_usage *);
struct bch_fs_usage *, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
/* disk reservations: */
......
......@@ -1232,10 +1232,7 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote)
static void bch2_stripe_read_key(struct bch_fs *c, struct bkey_s_c k)
{
struct gc_pos pos = { 0 };
bch2_mark_key(c, k, true, 0, pos, NULL, 0, 0);
bch2_mark_key(c, k, true, 0, NULL, 0, 0);
}
int bch2_stripes_read(struct bch_fs *c, struct list_head *journal_replay_list)
......
......@@ -876,8 +876,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
* but - there are other correctness issues if btree gc were to run
* before journal replay finishes
*/
BUG_ON(c->gc_pos.phase);
bch2_mark_key(c, bkey_i_to_s_c(k), false, -((s64) k->k.size),
gc_pos_btree_node(iter->l[0].b),
NULL, 0, 0);
bch2_trans_exit(&trans);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment