Commit bf0fdb4d authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Don't erasure code cached ptrs

It doesn't make much sense to be erasure coding cached pointers, we
should be erasure coding one of the dirty pointers in an extent. This
patch makes sure we're passing BCH_WRITE_CACHED when we expect the new
pointer to be a cached pointer, and tweaks the write path to not
allocate from a stripe when BCH_WRITE_CACHED is set - and fixes an
assertion we were hitting in the ec path where when adding the stripe to
an extent and deleting the other pointers the pointer to the stripe
didn't exist (because dropping all dirty pointers from an extent turns
it into a KEY_TYPE_error key).
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 990d42d1
......@@ -143,8 +143,8 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
}
/* returns blocknr in stripe that we matched: */
static int bkey_matches_stripe(struct bch_stripe *s,
struct bkey_s_c k)
static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
struct bkey_s_c k, unsigned *block)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
......@@ -153,10 +153,12 @@ static int bkey_matches_stripe(struct bch_stripe *s,
bkey_for_each_ptr(ptrs, ptr)
for (i = 0; i < nr_data; i++)
if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
le16_to_cpu(s->sectors)))
return i;
le16_to_cpu(s->sectors))) {
*block = i;
return ptr;
}
return -1;
return NULL;
}
static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
......@@ -834,6 +836,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
(k = bch2_btree_iter_peek(&iter)).k &&
!(ret = bkey_err(k)) &&
bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
const struct bch_extent_ptr *ptr_c;
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
......@@ -841,8 +844,12 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
continue;
}
block = bkey_matches_stripe(&s->key.v, k);
if (block < 0) {
ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
/*
* It doesn't generally make sense to erasure code cached ptrs:
* XXX: should we be incrementing a counter?
*/
if (!ptr_c || ptr_c->cached) {
bch2_btree_iter_advance(&iter);
continue;
}
......
......@@ -1179,7 +1179,7 @@ static void __bch2_write(struct closure *cl)
*/
wp = bch2_alloc_sectors_start(c,
op->target,
op->opts.erasure_code,
op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
op->write_point,
&op->devs_have,
op->nr_replicas,
......
......@@ -394,10 +394,14 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (p.ptr.dev == data_opts.rewrite_dev &&
!p.ptr.cached &&
crc_is_compressed(p.crc))
compressed_sectors += p.crc.compressed_size;
if (p.ptr.dev == data_opts.rewrite_dev) {
if (p.ptr.cached)
m->op.flags |= BCH_WRITE_CACHED;
if (!p.ptr.cached &&
crc_is_compressed(p.crc))
compressed_sectors += p.crc.compressed_size;
}
if (compressed_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment