Commit d44c2f9e authored by Tang Junhui's avatar Tang Junhui Committed by Jens Axboe

bcache: update bucket_in_use in real time

bucket_in_use is updated in gc thread which triggered by invalidating or
writing sectors_to_gc dirty data, It's a long interval. Therefore, when we
use it to compare with the threshold, it is often not timely, which leads
to inaccurate judgment and often results in bucket depletion.

We have send a patch before, by the means of updating bucket_in_use
periodically In gc thread, which Coly thought that would lead high
latency, In this patch, we add avail_nbuckets to record the count of
available buckets, and we calculate bucket_in_use when alloc or free
bucket in real time.

[edited by ML: eliminated some whitespace errors]
Signed-off-by: default avatarTang Junhui <tang.junhui@zte.com.cn>
Signed-off-by: default avatarMichael Lyle <mlyle@lyle.org>
Reviewed-by: default avatarMichael Lyle <mlyle@lyle.org>
Reviewed-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3b304d24
...@@ -442,6 +442,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) ...@@ -442,6 +442,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
b->prio = INITIAL_PRIO; b->prio = INITIAL_PRIO;
} }
if (ca->set->avail_nbuckets > 0) {
ca->set->avail_nbuckets--;
bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
}
return r; return r;
} }
...@@ -449,6 +454,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) ...@@ -449,6 +454,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
{ {
SET_GC_MARK(b, 0); SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0); SET_GC_SECTORS_USED(b, 0);
if (ca->set->avail_nbuckets < ca->set->nbuckets) {
ca->set->avail_nbuckets++;
bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
}
} }
void bch_bucket_free(struct cache_set *c, struct bkey *k) void bch_bucket_free(struct cache_set *c, struct bkey *k)
......
...@@ -581,6 +581,7 @@ struct cache_set { ...@@ -581,6 +581,7 @@ struct cache_set {
uint8_t need_gc; uint8_t need_gc;
struct gc_stat gc_stats; struct gc_stat gc_stats;
size_t nbuckets; size_t nbuckets;
size_t avail_nbuckets;
struct task_struct *gc_thread; struct task_struct *gc_thread;
/* Where in the btree gc currently is */ /* Where in the btree gc currently is */
......
...@@ -1240,6 +1240,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) ...@@ -1240,6 +1240,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
__bch_btree_mark_key(c, level, k); __bch_btree_mark_key(c, level, k);
} }
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
{
stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
}
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{ {
uint8_t stale = 0; uint8_t stale = 0;
...@@ -1651,9 +1656,8 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1651,9 +1656,8 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
} }
static size_t bch_btree_gc_finish(struct cache_set *c) static void bch_btree_gc_finish(struct cache_set *c)
{ {
size_t available = 0;
struct bucket *b; struct bucket *b;
struct cache *ca; struct cache *ca;
unsigned i; unsigned i;
...@@ -1690,6 +1694,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c) ...@@ -1690,6 +1694,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
} }
rcu_read_unlock(); rcu_read_unlock();
c->avail_nbuckets = 0;
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
uint64_t *i; uint64_t *i;
...@@ -1711,18 +1716,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c) ...@@ -1711,18 +1716,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
available++; c->avail_nbuckets++;
} }
} }
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
return available;
} }
static void bch_btree_gc(struct cache_set *c) static void bch_btree_gc(struct cache_set *c)
{ {
int ret; int ret;
unsigned long available;
struct gc_stat stats; struct gc_stat stats;
struct closure writes; struct closure writes;
struct btree_op op; struct btree_op op;
...@@ -1745,14 +1748,14 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1745,14 +1748,14 @@ static void bch_btree_gc(struct cache_set *c)
pr_warn("gc failed!"); pr_warn("gc failed!");
} while (ret); } while (ret);
available = bch_btree_gc_finish(c); bch_btree_gc_finish(c);
wake_up_allocators(c); wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time); bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t); stats.key_bytes *= sizeof(uint64_t);
stats.data <<= 9; stats.data <<= 9;
stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; bch_update_bucket_in_use(c, &stats);
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c); trace_bcache_gc_end(c);
......
...@@ -305,5 +305,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *); ...@@ -305,5 +305,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *); struct keybuf_key *bch_keybuf_next(struct keybuf *);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *); struct bkey *, keybuf_pred_fn *);
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment