Commit b2be7c8b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: kill bucket mark sector count saturation

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c6923995
......@@ -413,7 +413,6 @@ struct bch_dev {
/* last calculated minimum prio */
u16 max_last_bucket_io[2];
atomic_long_t saturated_count;
size_t inc_gen_needs_gc;
size_t inc_gen_really_needs_gc;
u64 allocator_journal_seq_flush;
......
......@@ -570,9 +570,6 @@ void bch2_gc(struct bch_fs *c)
bch2_mark_pending_btree_node_frees(c);
bch2_mark_allocator_buckets(c);
for_each_member_device(ca, c, i)
atomic_long_set(&ca->saturated_count, 0);
/* Indicates that gc is no longer in progress: */
gc_pos_set(c, gc_phase(GC_PHASE_DONE));
c->gc_count++;
......
......@@ -454,17 +454,11 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
c->gc_pos.phase == GC_PHASE_DONE);
}
#define saturated_add(ca, dst, src, max) \
#define checked_add(a, b) \
do { \
BUG_ON((int) (dst) + (src) < 0); \
if ((dst) == (max)) \
; \
else if ((dst) + (src) <= (max)) \
dst += (src); \
else { \
dst = (max); \
trace_sectors_saturated(ca); \
} \
unsigned _res = (unsigned) (a) + (b); \
(a) = _res; \
BUG_ON((a) != _res); \
} while (0)
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
......@@ -489,9 +483,9 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
g = bucket(ca, b);
old = bucket_data_cmpxchg(c, ca, g, new, ({
saturated_add(ca, new.dirty_sectors, sectors,
GC_MAX_SECTORS_USED);
new.data_type = type;
new.data_type = type;
checked_add(new.dirty_sectors, sectors);
new.dirty_sectors += sectors;
}));
rcu_read_unlock();
......@@ -525,7 +519,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
u64 journal_seq, unsigned flags)
{
struct bucket_mark old, new;
unsigned saturated;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_BUCKET(ca, ptr);
enum bch_data_type data_type = type == S_META
......@@ -560,7 +553,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
v = atomic64_read(&g->_mark.v);
do {
new.v.counter = old.v.counter = v;
saturated = 0;
/*
* Check this after reading bucket mark to guard against
......@@ -574,17 +566,10 @@ static void bch2_mark_pointer(struct bch_fs *c,
return;
}
if (!ptr->cached &&
new.dirty_sectors == GC_MAX_SECTORS_USED &&
sectors < 0)
saturated = -sectors;
if (ptr->cached)
saturated_add(ca, new.cached_sectors, sectors,
GC_MAX_SECTORS_USED);
if (!ptr->cached)
checked_add(new.dirty_sectors, sectors);
else
saturated_add(ca, new.dirty_sectors, sectors,
GC_MAX_SECTORS_USED);
checked_add(new.cached_sectors, sectors);
if (!new.dirty_sectors &&
!new.cached_sectors) {
......@@ -610,16 +595,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
if (saturated &&
atomic_long_add_return(saturated,
&ca->saturated_count) >=
bucket_to_sector(ca, ca->free_inc.size)) {
if (c->gc_thread) {
trace_gc_sectors_saturated(c);
wake_up_process(c->gc_thread);
}
}
}
void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
......
......@@ -115,11 +115,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,
/* bucket gc marks */
/* The dirty and cached sector counts saturate. If this occurs,
* reference counting alone will not free the bucket, and a btree
* GC must be performed. */
#define GC_MAX_SECTORS_USED ((1U << 15) - 1)
static inline unsigned bucket_sectors_used(struct bucket_mark mark)
{
return mark.dirty_sectors + mark.cached_sectors;
......
......@@ -44,21 +44,6 @@ DECLARE_EVENT_CLASS(bkey,
__entry->offset, __entry->size)
);
DECLARE_EVENT_CLASS(bch_dev,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca),
TP_STRUCT__entry(
__array(char, uuid, 16 )
),
TP_fast_assign(
memcpy(__entry->uuid, ca->uuid.b, 16);
),
TP_printk("%pU", __entry->uuid)
);
DECLARE_EVENT_CLASS(bch_fs,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
......@@ -361,16 +346,6 @@ DEFINE_EVENT(bch_fs, gc_coalesce_end,
TP_ARGS(c)
);
DEFINE_EVENT(bch_dev, sectors_saturated,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(bch_fs, gc_sectors_saturated,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment