Commit 73e6ab95 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Switch replicas to mark_lock

Prep work for upcoming disk accounting changes
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 56e0e7c7
...@@ -528,8 +528,8 @@ struct bch_fs { ...@@ -528,8 +528,8 @@ struct bch_fs {
struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX]; struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
struct bch_replicas_cpu __rcu *replicas; struct bch_replicas_cpu replicas;
struct bch_replicas_cpu __rcu *replicas_gc; struct bch_replicas_cpu replicas_gc;
struct mutex replicas_gc_lock; struct mutex replicas_gc_lock;
struct bch_disk_groups_cpu __rcu *disk_groups; struct bch_disk_groups_cpu __rcu *disk_groups;
......
...@@ -131,8 +131,7 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -131,8 +131,7 @@ int bch2_fs_recovery(struct bch_fs *c)
int ret; int ret;
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
if (!rcu_dereference_protected(c->replicas, if (!c->replicas.entries) {
lockdep_is_held(&c->sb_lock))->nr) {
bch_info(c, "building replicas info"); bch_info(c, "building replicas info");
set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags); set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
} }
......
This diff is collapsed.
...@@ -2,10 +2,9 @@ ...@@ -2,10 +2,9 @@
#define _BCACHEFS_REPLICAS_TYPES_H #define _BCACHEFS_REPLICAS_TYPES_H
struct bch_replicas_cpu { struct bch_replicas_cpu {
struct rcu_head rcu;
unsigned nr; unsigned nr;
unsigned entry_size; unsigned entry_size;
struct bch_replicas_entry entries[]; struct bch_replicas_entry *entries;
}; };
#endif /* _BCACHEFS_REPLICAS_TYPES_H */ #endif /* _BCACHEFS_REPLICAS_TYPES_H */
...@@ -383,7 +383,8 @@ static void bch2_fs_free(struct bch_fs *c) ...@@ -383,7 +383,8 @@ static void bch2_fs_free(struct bch_fs *c)
mempool_exit(&c->btree_reserve_pool); mempool_exit(&c->btree_reserve_pool);
mempool_exit(&c->fill_iter); mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes); percpu_ref_exit(&c->writes);
kfree(rcu_dereference_protected(c->replicas, 1)); kfree(c->replicas.entries);
kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1)); kfree(rcu_dereference_protected(c->disk_groups, 1));
if (c->copygc_wq) if (c->copygc_wq)
...@@ -565,6 +566,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -565,6 +566,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_btree_cache_init_early(&c->btree_cache); bch2_fs_btree_cache_init_early(&c->btree_cache);
if (percpu_init_rwsem(&c->mark_lock))
goto err;
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
if (bch2_sb_to_fs(c, sb)) { if (bch2_sb_to_fs(c, sb)) {
...@@ -608,7 +612,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -608,7 +612,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)), offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) || !(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
percpu_init_rwsem(&c->mark_lock) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) || btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment