Commit 4fe6a816 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Add a real GC_MARK_RECLAIMABLE

This means the garbage collection code can better check for data and metadata
pointers to the same buckets.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent c13f3af9
...@@ -155,7 +155,8 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) ...@@ -155,7 +155,8 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
{ {
return GC_MARK(b) == GC_MARK_RECLAIMABLE && return (!GC_MARK(b) ||
GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) && !atomic_read(&b->pin) &&
can_inc_bucket_gen(b); can_inc_bucket_gen(b);
} }
...@@ -475,7 +476,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) ...@@ -475,7 +476,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
for (i = 0; i < KEY_PTRS(k); i++) { for (i = 0; i < KEY_PTRS(k); i++) {
struct bucket *b = PTR_BUCKET(c, k, i); struct bucket *b = PTR_BUCKET(c, k, i);
SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0); SET_GC_SECTORS_USED(b, 0);
bch_bucket_add_unused(PTR_CACHE(c, k, i), b); bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
} }
......
...@@ -207,9 +207,9 @@ struct bucket { ...@@ -207,9 +207,9 @@ struct bucket {
*/ */
BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0 #define GC_MARK_RECLAIMABLE 1
#define GC_MARK_DIRTY 1 #define GC_MARK_DIRTY 2
#define GC_MARK_METADATA 2 #define GC_MARK_METADATA 3
#define GC_SECTORS_USED_SIZE 13 #define GC_SECTORS_USED_SIZE 13
#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
......
...@@ -1160,6 +1160,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1160,6 +1160,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
SET_GC_MARK(g, GC_MARK_METADATA); SET_GC_MARK(g, GC_MARK_METADATA);
else if (KEY_DIRTY(k)) else if (KEY_DIRTY(k))
SET_GC_MARK(g, GC_MARK_DIRTY); SET_GC_MARK(g, GC_MARK_DIRTY);
else if (!GC_MARK(g))
SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */ /* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned, SET_GC_SECTORS_USED(g, min_t(unsigned,
...@@ -1559,7 +1561,7 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1559,7 +1561,7 @@ static void btree_gc_start(struct cache_set *c)
for_each_bucket(b, ca) { for_each_bucket(b, ca) {
b->gc_gen = b->gen; b->gc_gen = b->gen;
if (!atomic_read(&b->pin)) { if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0); SET_GC_SECTORS_USED(b, 0);
} }
} }
...@@ -1622,14 +1624,18 @@ size_t bch_btree_gc_finish(struct cache_set *c) ...@@ -1622,14 +1624,18 @@ size_t bch_btree_gc_finish(struct cache_set *c)
b->last_gc = b->gc_gen; b->last_gc = b->gc_gen;
c->need_gc = max(c->need_gc, bucket_gc_gen(b)); c->need_gc = max(c->need_gc, bucket_gc_gen(b));
if (!atomic_read(&b->pin) && if (atomic_read(&b->pin))
GC_MARK(b) == GC_MARK_RECLAIMABLE) { continue;
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
available++; available++;
if (!GC_SECTORS_USED(b))
if (!GC_MARK(b))
bch_bucket_add_unused(ca, b); bch_bucket_add_unused(ca, b);
} }
} }
}
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
return available; return available;
......
...@@ -499,9 +499,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, ...@@ -499,9 +499,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
if (mutex_trylock(&b->c->bucket_lock)) { if (mutex_trylock(&b->c->bucket_lock)) {
if (b->c->gc_mark_valid && if (b->c->gc_mark_valid &&
((GC_MARK(g) != GC_MARK_DIRTY && (!GC_MARK(g) ||
KEY_DIRTY(k)) || GC_MARK(g) == GC_MARK_METADATA ||
GC_MARK(g) == GC_MARK_METADATA)) (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
goto err; goto err;
if (g->prio == BTREE_PRIO) if (g->prio == BTREE_PRIO)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment