Commit ab05de4c authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Track incompressible data

This fixes the background_compression option: wihout some way of marking
data as incompressible, rebalance will keep rewriting incompressible
data over and over.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 182084e3
...@@ -1298,7 +1298,8 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); ...@@ -1298,7 +1298,8 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
x(reflink, 6) \ x(reflink, 6) \
x(new_siphash, 7) \ x(new_siphash, 7) \
x(inline_data, 8) \ x(inline_data, 8) \
x(new_extent_overwrite, 9) x(new_extent_overwrite, 9) \
x(incompressible, 10)
enum bch_sb_feature { enum bch_sb_feature {
#define x(f, n) BCH_FEATURE_##f, #define x(f, n) BCH_FEATURE_##f,
...@@ -1378,11 +1379,12 @@ enum bch_csum_opts { ...@@ -1378,11 +1379,12 @@ enum bch_csum_opts {
}; };
#define BCH_COMPRESSION_TYPES() \ #define BCH_COMPRESSION_TYPES() \
x(none, 0) \ x(none, 0) \
x(lz4_old, 1) \ x(lz4_old, 1) \
x(gzip, 2) \ x(gzip, 2) \
x(lz4, 3) \ x(lz4, 3) \
x(zstd, 4) x(zstd, 4) \
x(incompressible, 5)
enum bch_compression_type { enum bch_compression_type {
#define x(t, n) BCH_COMPRESSION_TYPE_##t, #define x(t, n) BCH_COMPRESSION_TYPE_##t,
......
...@@ -326,7 +326,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, ...@@ -326,7 +326,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
BUG_ON(len_a + len_b > bio_sectors(bio)); BUG_ON(len_a + len_b > bio_sectors(bio));
BUG_ON(crc_old.uncompressed_size != bio_sectors(bio)); BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
BUG_ON(crc_old.compression_type); BUG_ON(crc_is_compressed(crc_old));
BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) != BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
bch2_csum_type_is_encryption(new_csum_type)); bch2_csum_type_is_encryption(new_csum_type));
...@@ -355,6 +355,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, ...@@ -355,6 +355,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
if (i->crc) if (i->crc)
*i->crc = (struct bch_extent_crc_unpacked) { *i->crc = (struct bch_extent_crc_unpacked) {
.csum_type = i->csum_type, .csum_type = i->csum_type,
.compression_type = crc_old.compression_type,
.compressed_size = i->len, .compressed_size = i->len,
.uncompressed_size = i->len, .uncompressed_size = i->len,
.offset = 0, .offset = 0,
......
...@@ -155,13 +155,16 @@ static inline struct nonce null_nonce(void) ...@@ -155,13 +155,16 @@ static inline struct nonce null_nonce(void)
static inline struct nonce extent_nonce(struct bversion version, static inline struct nonce extent_nonce(struct bversion version,
struct bch_extent_crc_unpacked crc) struct bch_extent_crc_unpacked crc)
{ {
unsigned size = crc.compression_type ? crc.uncompressed_size : 0; unsigned compression_type = crc_is_compressed(crc)
? crc.compression_type
: 0;
unsigned size = compression_type ? crc.uncompressed_size : 0;
struct nonce nonce = (struct nonce) {{ struct nonce nonce = (struct nonce) {{
[0] = cpu_to_le32(size << 22), [0] = cpu_to_le32(size << 22),
[1] = cpu_to_le32(version.lo), [1] = cpu_to_le32(version.lo),
[2] = cpu_to_le32(version.lo >> 32), [2] = cpu_to_le32(version.lo >> 32),
[3] = cpu_to_le32(version.hi| [3] = cpu_to_le32(version.hi|
(crc.compression_type << 24))^BCH_NONCE_EXTENT, (compression_type << 24))^BCH_NONCE_EXTENT,
}}; }};
return nonce_add(nonce, crc.nonce << 9); return nonce_add(nonce, crc.nonce << 9);
......
...@@ -434,7 +434,7 @@ static unsigned __bio_compress(struct bch_fs *c, ...@@ -434,7 +434,7 @@ static unsigned __bio_compress(struct bch_fs *c,
bio_unmap_or_unbounce(c, dst_data); bio_unmap_or_unbounce(c, dst_data);
return compression_type; return compression_type;
err: err:
compression_type = 0; compression_type = BCH_COMPRESSION_TYPE_incompressible;
goto out; goto out;
} }
......
...@@ -336,7 +336,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *c, ...@@ -336,7 +336,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *c,
if (!bch2_checksum_mergeable(crc_l.csum_type)) if (!bch2_checksum_mergeable(crc_l.csum_type))
return BCH_MERGE_NOMERGE; return BCH_MERGE_NOMERGE;
if (crc_l.compression_type) if (crc_is_compressed(crc_l))
return BCH_MERGE_NOMERGE; return BCH_MERGE_NOMERGE;
if (crc_l.csum_type && if (crc_l.csum_type &&
...@@ -447,7 +447,7 @@ static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l, ...@@ -447,7 +447,7 @@ static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u, static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
struct bch_extent_crc_unpacked n) struct bch_extent_crc_unpacked n)
{ {
return !u.compression_type && return !crc_is_compressed(u) &&
u.csum_type && u.csum_type &&
u.uncompressed_size > u.live_size && u.uncompressed_size > u.live_size &&
bch2_csum_type_is_encryption(u.csum_type) == bch2_csum_type_is_encryption(u.csum_type) ==
...@@ -491,7 +491,7 @@ bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n) ...@@ -491,7 +491,7 @@ bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
/* Find a checksum entry that covers only live data: */ /* Find a checksum entry that covers only live data: */
if (!n.csum_type) { if (!n.csum_type) {
bkey_for_each_crc(&k->k, ptrs, u, i) bkey_for_each_crc(&k->k, ptrs, u, i)
if (!u.compression_type && if (!crc_is_compressed(u) &&
u.csum_type && u.csum_type &&
u.live_size == u.uncompressed_size) { u.live_size == u.uncompressed_size) {
n = u; n = u;
...@@ -500,7 +500,7 @@ bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n) ...@@ -500,7 +500,7 @@ bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
return false; return false;
} }
found: found:
BUG_ON(n.compression_type); BUG_ON(crc_is_compressed(n));
BUG_ON(n.offset); BUG_ON(n.offset);
BUG_ON(n.live_size != k->k.size); BUG_ON(n.live_size != k->k.size);
...@@ -609,8 +609,7 @@ unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k) ...@@ -609,8 +609,7 @@ unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
ret += !p.ptr.cached && ret += !p.ptr.cached && !crc_is_compressed(p.crc);
p.crc.compression_type == BCH_COMPRESSION_TYPE_none;
} }
return ret; return ret;
...@@ -624,13 +623,24 @@ unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k) ...@@ -624,13 +623,24 @@ unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
unsigned ret = 0; unsigned ret = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && if (!p.ptr.cached && crc_is_compressed(p.crc))
p.crc.compression_type != BCH_COMPRESSION_TYPE_none)
ret += p.crc.compressed_size; ret += p.crc.compressed_size;
return ret; return ret;
} }
bool bch2_bkey_is_incompressible(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
bkey_for_each_crc(k.k, ptrs, crc, entry)
if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
return true;
return false;
}
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size, bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
unsigned nr_replicas) unsigned nr_replicas)
{ {
......
...@@ -175,6 +175,12 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc) ...@@ -175,6 +175,12 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
#undef common_fields #undef common_fields
} }
static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
{
return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
}
/* bkey_ptrs: generically over any key type that has ptrs */ /* bkey_ptrs: generically over any key type that has ptrs */
struct bkey_ptrs_c { struct bkey_ptrs_c {
...@@ -483,6 +489,7 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k) ...@@ -483,6 +489,7 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c); unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c); unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c); unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
bool bch2_bkey_is_incompressible(struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c); unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned); bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c); unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
......
...@@ -562,9 +562,14 @@ static void __bch2_write_index(struct bch_write_op *op) ...@@ -562,9 +562,14 @@ static void __bch2_write_index(struct bch_write_op *op)
* particularly want to plumb io_opts all the way through the btree * particularly want to plumb io_opts all the way through the btree
* update stack right now * update stack right now
*/ */
for_each_keylist_key(keys, k) for_each_keylist_key(keys, k) {
bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts); bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
}
if (!bch2_keylist_empty(keys)) { if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys); u64 sectors_start = keylist_sectors(keys);
int ret = op->index_update_fn(op); int ret = op->index_update_fn(op);
...@@ -786,8 +791,9 @@ static enum prep_encoded_ret { ...@@ -786,8 +791,9 @@ static enum prep_encoded_ret {
/* Can we just write the entire extent as is? */ /* Can we just write the entire extent as is? */
if (op->crc.uncompressed_size == op->crc.live_size && if (op->crc.uncompressed_size == op->crc.live_size &&
op->crc.compressed_size <= wp->sectors_free && op->crc.compressed_size <= wp->sectors_free &&
op->crc.compression_type == op->compression_type) { (op->crc.compression_type == op->compression_type ||
if (!op->crc.compression_type && op->incompressible)) {
if (!crc_is_compressed(op->crc) &&
op->csum_type != op->crc.csum_type && op->csum_type != op->crc.csum_type &&
bch2_write_rechecksum(c, op, op->csum_type)) bch2_write_rechecksum(c, op, op->csum_type))
return PREP_ENCODED_CHECKSUM_ERR; return PREP_ENCODED_CHECKSUM_ERR;
...@@ -799,7 +805,7 @@ static enum prep_encoded_ret { ...@@ -799,7 +805,7 @@ static enum prep_encoded_ret {
* If the data is compressed and we couldn't write the entire extent as * If the data is compressed and we couldn't write the entire extent as
* is, we have to decompress it: * is, we have to decompress it:
*/ */
if (op->crc.compression_type) { if (crc_is_compressed(op->crc)) {
struct bch_csum csum; struct bch_csum csum;
if (bch2_write_decrypt(op)) if (bch2_write_decrypt(op))
...@@ -910,11 +916,13 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp, ...@@ -910,11 +916,13 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
bch2_csum_type_is_encryption(op->crc.csum_type)); bch2_csum_type_is_encryption(op->crc.csum_type));
BUG_ON(op->compression_type && !bounce); BUG_ON(op->compression_type && !bounce);
crc.compression_type = op->compression_type crc.compression_type = op->incompressible
? bch2_bio_compress(c, dst, &dst_len, src, &src_len, ? BCH_COMPRESSION_TYPE_incompressible
op->compression_type) : op->compression_type
? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
op->compression_type)
: 0; : 0;
if (!crc.compression_type) { if (!crc_is_compressed(crc)) {
dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size); dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9); dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
...@@ -943,7 +951,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp, ...@@ -943,7 +951,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
} }
if ((op->flags & BCH_WRITE_DATA_ENCODED) && if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
!crc.compression_type && !crc_is_compressed(crc) &&
bch2_csum_type_is_encryption(op->crc.csum_type) == bch2_csum_type_is_encryption(op->crc.csum_type) ==
bch2_csum_type_is_encryption(op->csum_type)) { bch2_csum_type_is_encryption(op->csum_type)) {
/* /*
...@@ -1340,6 +1348,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio) ...@@ -1340,6 +1348,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
static struct promote_op *__promote_alloc(struct bch_fs *c, static struct promote_op *__promote_alloc(struct bch_fs *c,
enum btree_id btree_id, enum btree_id btree_id,
struct bkey_s_c k,
struct bpos pos, struct bpos pos,
struct extent_ptr_decoded *pick, struct extent_ptr_decoded *pick,
struct bch_io_opts opts, struct bch_io_opts opts,
...@@ -1396,8 +1405,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c, ...@@ -1396,8 +1405,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
(struct data_opts) { (struct data_opts) {
.target = opts.promote_target .target = opts.promote_target
}, },
btree_id, btree_id, k);
bkey_s_c_null);
BUG_ON(ret); BUG_ON(ret);
return op; return op;
...@@ -1439,7 +1447,7 @@ static struct promote_op *promote_alloc(struct bch_fs *c, ...@@ -1439,7 +1447,7 @@ static struct promote_op *promote_alloc(struct bch_fs *c,
k.k->type == KEY_TYPE_reflink_v k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_REFLINK ? BTREE_ID_REFLINK
: BTREE_ID_EXTENTS, : BTREE_ID_EXTENTS,
pos, pick, opts, sectors, rbio); k, pos, pick, opts, sectors, rbio);
if (!promote) if (!promote)
return NULL; return NULL;
...@@ -1703,7 +1711,7 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) ...@@ -1703,7 +1711,7 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset; u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
int ret; int ret;
if (rbio->pick.crc.compression_type) if (crc_is_compressed(rbio->pick.crc))
return; return;
bkey_on_stack_init(&new); bkey_on_stack_init(&new);
...@@ -1788,7 +1796,7 @@ static void __bch2_read_endio(struct work_struct *work) ...@@ -1788,7 +1796,7 @@ static void __bch2_read_endio(struct work_struct *work)
crc.offset += rbio->offset_into_extent; crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter); crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
if (crc.compression_type != BCH_COMPRESSION_TYPE_none) { if (crc_is_compressed(crc)) {
bch2_encrypt_bio(c, crc.csum_type, nonce, src); bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc)) if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
goto decompression_err; goto decompression_err;
...@@ -1885,7 +1893,7 @@ static void bch2_read_endio(struct bio *bio) ...@@ -1885,7 +1893,7 @@ static void bch2_read_endio(struct bio *bio)
} }
if (rbio->narrow_crcs || if (rbio->narrow_crcs ||
rbio->pick.crc.compression_type || crc_is_compressed(rbio->pick.crc) ||
bch2_csum_type_is_encryption(rbio->pick.crc.csum_type)) bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq; context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
else if (rbio->pick.crc.csum_type) else if (rbio->pick.crc.csum_type)
...@@ -1996,7 +2004,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, ...@@ -1996,7 +2004,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
if (pick.crc.compression_type != BCH_COMPRESSION_TYPE_none || if (crc_is_compressed(pick.crc) ||
(pick.crc.csum_type != BCH_CSUM_NONE && (pick.crc.csum_type != BCH_CSUM_NONE &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size || (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
(bch2_csum_type_is_encryption(pick.crc.csum_type) && (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
...@@ -2011,7 +2019,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, ...@@ -2011,7 +2019,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
&rbio, &bounce, &read_full); &rbio, &bounce, &read_full);
if (!read_full) { if (!read_full) {
EBUG_ON(pick.crc.compression_type); EBUG_ON(crc_is_compressed(pick.crc));
EBUG_ON(pick.crc.csum_type && EBUG_ON(pick.crc.csum_type &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size || (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
bvec_iter_sectors(iter) != pick.crc.live_size || bvec_iter_sectors(iter) != pick.crc.live_size ||
......
...@@ -107,6 +107,7 @@ struct bch_write_op { ...@@ -107,6 +107,7 @@ struct bch_write_op {
unsigned nr_replicas:4; unsigned nr_replicas:4;
unsigned nr_replicas_required:4; unsigned nr_replicas_required:4;
unsigned alloc_reserve:4; unsigned alloc_reserve:4;
unsigned incompressible:1;
struct bch_devs_list devs_have; struct bch_devs_list devs_have;
u16 target; u16 target;
......
...@@ -214,6 +214,9 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, ...@@ -214,6 +214,9 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
enum btree_id btree_id, enum btree_id btree_id,
struct bkey_s_c k) struct bkey_s_c k)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
int ret; int ret;
m->btree_id = btree_id; m->btree_id = btree_id;
...@@ -222,9 +225,14 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, ...@@ -222,9 +225,14 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
m->nr_ptrs_reserved = 0; m->nr_ptrs_reserved = 0;
bch2_write_op_init(&m->op, c, io_opts); bch2_write_op_init(&m->op, c, io_opts);
m->op.compression_type =
bch2_compression_opt_to_type[io_opts.background_compression ?: if (!bch2_bkey_is_incompressible(k))
io_opts.compression]; m->op.compression_type =
bch2_compression_opt_to_type[io_opts.background_compression ?:
io_opts.compression];
else
m->op.incompressible = true;
m->op.target = data_opts.target, m->op.target = data_opts.target,
m->op.write_point = wp; m->op.write_point = wp;
...@@ -264,14 +272,11 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, ...@@ -264,14 +272,11 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
break; break;
} }
case DATA_REWRITE: { case DATA_REWRITE: {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned compressed_sectors = 0; unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && if (!p.ptr.cached &&
p.crc.compression_type != BCH_COMPRESSION_TYPE_none && crc_is_compressed(p.crc) &&
bch2_dev_in_target(c, p.ptr.dev, data_opts.target)) bch2_dev_in_target(c, p.ptr.dev, data_opts.target))
compressed_sectors += p.crc.compressed_size; compressed_sectors += p.crc.compressed_size;
......
...@@ -30,7 +30,8 @@ static int __bch2_rebalance_pred(struct bch_fs *c, ...@@ -30,7 +30,8 @@ static int __bch2_rebalance_pred(struct bch_fs *c,
const union bch_extent_entry *entry; const union bch_extent_entry *entry;
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
if (io_opts->background_compression) if (io_opts->background_compression &&
!bch2_bkey_is_incompressible(k))
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && if (!p.ptr.cached &&
p.crc.compression_type != p.crc.compression_type !=
......
...@@ -276,7 +276,7 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) ...@@ -276,7 +276,7 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
extent_for_each_ptr_decode(e, p, entry) { extent_for_each_ptr_decode(e, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_none) { if (!crc_is_compressed(p.crc)) {
nr_uncompressed_extents++; nr_uncompressed_extents++;
uncompressed_sectors += e.k->size; uncompressed_sectors += e.k->size;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment