Commit 5663a415 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: refactor bch_fs_usage

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 73e6ab95
......@@ -721,7 +721,7 @@ static struct write_point *__writepoint_find(struct hlist_head *head,
static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
{
u64 stranded = c->write_points_nr * c->bucket_size_max;
u64 free = bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
u64 free = bch2_fs_sectors_free(c);
return stranded * factor > free;
}
......
......@@ -503,6 +503,10 @@ enum bch_fs_state {
BCH_FS_RW,
};
struct bch_fs_pcpu {
u64 sectors_available;
};
struct bch_fs {
struct closure cl;
......@@ -615,6 +619,8 @@ struct bch_fs {
atomic64_t sectors_available;
struct bch_fs_pcpu __percpu *pcpu;
struct bch_fs_usage __percpu *usage[2];
struct percpu_rw_semaphore mark_lock;
......
This diff is collapsed.
......@@ -164,6 +164,20 @@ static inline bool bucket_unused(struct bucket_mark mark)
!bucket_sectors_used(mark);
}
static inline bool is_available_bucket(struct bucket_mark mark)
{
return (!mark.owned_by_allocator &&
!mark.dirty_sectors &&
!mark.stripe);
}
static inline bool bucket_needs_journal_commit(struct bucket_mark m,
u16 last_seq_ondisk)
{
return m.journal_seq_valid &&
((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
}
/* Device usage: */
struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *, bool);
......@@ -207,31 +221,21 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *, bool);
struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, struct gc_pos);
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
static inline u64 bch2_fs_sectors_free(struct bch_fs *c,
struct bch_fs_usage stats)
{
return c->capacity - bch2_fs_sectors_used(c, stats);
}
struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *);
static inline bool is_available_bucket(struct bucket_mark mark)
static inline u64 bch2_fs_sectors_free(struct bch_fs *c)
{
return (!mark.owned_by_allocator &&
!mark.dirty_sectors &&
!mark.stripe);
}
struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
static inline bool bucket_needs_journal_commit(struct bucket_mark m,
u16 last_seq_ondisk)
{
return m.journal_seq_valid &&
((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
return usage.capacity - usage.used;
}
/* key/bucket marking: */
void bch2_bucket_seq_cleanup(struct bch_fs *);
void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
......@@ -252,6 +256,10 @@ int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, struct gc_pos);
/* disk reservations: */
void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
......
......@@ -73,9 +73,13 @@ struct bch_fs_usage {
u64 nr_inodes;
/* fields starting here aren't touched by gc: */
u64 online_reserved;
u64 available_cache;
};
struct bch_fs_usage_short {
u64 capacity;
u64 used;
u64 nr_inodes;
};
/*
......
......@@ -306,7 +306,7 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
.p.btree_id = ctx->stats.iter.btree_id,
.p.pos = ctx->stats.iter.pos,
.p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
.p.sectors_total = bch2_fs_sectors_used(c, bch2_fs_usage_read(c)),
.p.sectors_total = bch2_fs_usage_read_short(c).used,
};
if (len < sizeof(e))
......
......@@ -1423,16 +1423,14 @@ static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct bch_fs *c = sb->s_fs_info;
struct bch_fs_usage usage = bch2_fs_usage_read(c);
u64 hidden_metadata = usage.buckets[BCH_DATA_SB] +
usage.buckets[BCH_DATA_JOURNAL];
struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
unsigned shift = sb->s_blocksize_bits - 9;
u64 fsid;
buf->f_type = BCACHEFS_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (c->capacity - hidden_metadata) >> shift;
buf->f_bfree = (c->capacity - bch2_fs_sectors_used(c, usage)) >> shift;
buf->f_blocks = usage.capacity >> shift;
buf->f_bfree = (usage.capacity - usage.used) >> shift;
buf->f_bavail = buf->f_bfree;
buf->f_files = usage.nr_inodes;
buf->f_ffree = U64_MAX;
......
......@@ -376,6 +376,7 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->usage[0]);
free_percpu(c->pcpu);
mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool);
bioset_exit(&c->btree_bio);
......@@ -612,6 +613,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment