Commit 142cbdff authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Change copygc to consider bucket fragmentation

When devices have different sized buckets this is more correct.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 1421bea3
...@@ -123,6 +123,7 @@ struct disk_reservation { ...@@ -123,6 +123,7 @@ struct disk_reservation {
struct copygc_heap_entry { struct copygc_heap_entry {
u8 dev; u8 dev;
u8 gen; u8 gen;
u16 fragmentation;
u32 sectors; u32 sectors;
u64 offset; u64 offset;
}; };
......
...@@ -44,13 +44,6 @@ ...@@ -44,13 +44,6 @@
#define COPYGC_BUCKETS_PER_ITER(ca) \ #define COPYGC_BUCKETS_PER_ITER(ca) \
((ca)->free[RESERVE_MOVINGGC].size / 2) ((ca)->free[RESERVE_MOVINGGC].size / 2)
static inline int sectors_used_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
{
return cmp_int(l.sectors, r.sectors);
}
static int bucket_offset_cmp(const void *_l, const void *_r, size_t size) static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
{ {
const struct copygc_heap_entry *l = _l; const struct copygc_heap_entry *l = _l;
...@@ -123,6 +116,13 @@ static bool have_copygc_reserve(struct bch_dev *ca) ...@@ -123,6 +116,13 @@ static bool have_copygc_reserve(struct bch_dev *ca)
return ret; return ret;
} }
static inline int fragmentation_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
{
return cmp_int(l.fragmentation, r.fragmentation);
}
static int bch2_copygc(struct bch_fs *c) static int bch2_copygc(struct bch_fs *c)
{ {
copygc_heap *h = &c->copygc_heap; copygc_heap *h = &c->copygc_heap;
...@@ -180,10 +180,12 @@ static int bch2_copygc(struct bch_fs *c) ...@@ -180,10 +180,12 @@ static int bch2_copygc(struct bch_fs *c)
e = (struct copygc_heap_entry) { e = (struct copygc_heap_entry) {
.dev = dev_idx, .dev = dev_idx,
.gen = m.gen, .gen = m.gen,
.fragmentation = bucket_sectors_used(m) * (1U << 15)
/ ca->mi.bucket_size,
.sectors = bucket_sectors_used(m), .sectors = bucket_sectors_used(m),
.offset = bucket_to_sector(ca, b), .offset = bucket_to_sector(ca, b),
}; };
heap_add_or_replace(h, e, -sectors_used_cmp, NULL); heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
} }
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
} }
...@@ -197,7 +199,7 @@ static int bch2_copygc(struct bch_fs *c) ...@@ -197,7 +199,7 @@ static int bch2_copygc(struct bch_fs *c)
sectors_to_move += i->sectors; sectors_to_move += i->sectors;
while (sectors_to_move > sectors_reserved) { while (sectors_to_move > sectors_reserved) {
BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL)); BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
sectors_to_move -= e.sectors; sectors_to_move -= e.sectors;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment