Commit f295298b authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: New helpers for device refcounts

This will be used in the next patch for adding some new debug mode
asserts.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e98786ea
......@@ -1656,10 +1656,9 @@ static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_st
bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
bch2_journal_flush_async(&c->journal, NULL);
if (s->ca)
percpu_ref_put(&s->ca->ref);
bch2_dev_put(s->ca);
if (ca)
percpu_ref_get(&ca->ref);
bch2_dev_get(ca);
s->ca = ca;
s->need_journal_commit_this_dev = 0;
}
......@@ -2014,7 +2013,7 @@ static void bch2_do_invalidates_work(struct work_struct *work)
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
if (ret < 0) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
break;
}
}
......@@ -2151,7 +2150,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
if (ret) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
bch_err_fn(c, ret);
return ret;
}
......
......@@ -733,21 +733,21 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
if (ca)
percpu_ref_get(&ca->ref);
bch2_dev_get(ca);
rcu_read_unlock();
if (!ca)
continue;
if (!ca->mi.durability && *have_cache) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
continue;
}
ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
if (IS_ERR(ob)) {
ret = PTR_ERR(ob);
......
......@@ -815,10 +815,8 @@ static int bch2_gc_done(struct bch_fs *c)
#undef copy_stripe_field
#undef copy_field
fsck_err:
if (ca)
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
bch_err_fn(c, ret);
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
return ret;
......@@ -841,7 +839,7 @@ static int bch2_gc_start(struct bch_fs *c)
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
if (!ca->usage_gc) {
bch_err(c, "error allocating ca->usage_gc");
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_gc_start;
}
......@@ -969,7 +967,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c)
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
bch2_alloc_write_key(trans, &iter, k)));
if (ret) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
break;
}
}
......@@ -985,7 +983,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
ca->mi.nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO);
if (!buckets) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
bch_err(c, "error allocating ca->buckets[gc]");
return -BCH_ERR_ENOMEM_gc_alloc_start;
}
......@@ -1330,7 +1328,7 @@ int bch2_gc_gens(struct bch_fs *c)
ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_gens;
goto err;
}
......
......@@ -1438,7 +1438,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
}
......@@ -1536,7 +1536,7 @@ int bch2_buckets_nouse_alloc(struct bch_fs *c)
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO);
if (!ca->buckets_nouse) {
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_buckets_nouse;
}
}
......
......@@ -35,7 +35,7 @@ static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
if (ca)
percpu_ref_get(&ca->ref);
bch2_dev_get(ca);
rcu_read_unlock();
if (!ca)
......@@ -391,7 +391,7 @@ static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
return PTR_ERR(ca);
ret = bch2_dev_offline(c, ca, arg.flags);
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
......@@ -420,7 +420,7 @@ static long bch2_ioctl_disk_set_state(struct bch_fs *c,
if (ret)
bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
......@@ -615,7 +615,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
arg.d[i].fragmented = src.d[i].fragmented;
}
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return copy_to_user_errcode(user_arg, &arg, sizeof(arg));
}
......@@ -667,7 +667,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
goto err;
}
err:
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
......@@ -689,11 +689,9 @@ static long bch2_ioctl_read_super(struct bch_fs *c,
if (arg.flags & BCH_READ_DEV) {
ca = bch2_device_lookup(c, arg.dev, arg.flags);
if (IS_ERR(ca)) {
ret = PTR_ERR(ca);
goto err;
}
ret = PTR_ERR_OR_ZERO(ca);
if (ret)
goto err_unlock;
sb = ca->disk_sb.sb;
} else {
......@@ -708,8 +706,8 @@ static long bch2_ioctl_read_super(struct bch_fs *c,
ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb,
vstruct_bytes(sb));
err:
if (!IS_ERR_OR_NULL(ca))
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
err_unlock:
mutex_unlock(&c->sb_lock);
return ret;
}
......@@ -753,7 +751,7 @@ static long bch2_ioctl_disk_resize(struct bch_fs *c,
ret = bch2_dev_resize(c, ca, arg.nbuckets);
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
......@@ -779,7 +777,7 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return ret;
}
......
......@@ -360,7 +360,7 @@ void bch2_data_update_exit(struct data_update *update)
if (c->opts.nocow_enabled)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, ptr), 0);
percpu_ref_put(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
bch2_dev_put(bch2_dev_bkey_exists(c, ptr->dev));
}
bch2_bkey_buf_exit(&update->k, c);
......@@ -541,7 +541,7 @@ int bch2_data_update_init(struct btree_trans *trans,
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
bkey_for_each_ptr(ptrs, ptr)
percpu_ref_get(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
bch2_dev_get(bch2_dev_bkey_exists(c, ptr->dev));
unsigned durability_have = 0, durability_removing = 0;
......@@ -653,7 +653,7 @@ int bch2_data_update_init(struct btree_trans *trans,
if ((1U << i) & ptrs_locked)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0);
percpu_ref_put(&bch2_dev_bkey_exists(c, p.ptr.dev)->ref);
bch2_dev_put(bch2_dev_bkey_exists(c, p.ptr.dev));
i++;
}
......
......@@ -523,7 +523,7 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
ca = bch2_dev_lookup(c, val);
if (!IS_ERR(ca)) {
*res = dev_to_target(ca->dev_idx);
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
return 0;
}
......
......@@ -105,14 +105,28 @@ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *
for (struct bch_dev *_ca = NULL; \
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
static inline void bch2_dev_get(struct bch_dev *ca)
{
percpu_ref_get(&ca->ref);
}
static inline void __bch2_dev_put(struct bch_dev *ca)
{
percpu_ref_put(&ca->ref);
}
static inline void bch2_dev_put(struct bch_dev *ca)
{
rcu_read_lock();
if (ca)
percpu_ref_put(&ca->ref);
__bch2_dev_put(ca);
}
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
{
rcu_read_lock();
bch2_dev_put(ca);
if ((ca = __bch2_next_dev(c, ca, NULL)))
percpu_ref_get(&ca->ref);
bch2_dev_get(ca);
rcu_read_unlock();
return ca;
......
......@@ -710,7 +710,7 @@ static int bch2_fs_online(struct bch_fs *c)
ret = bch2_dev_sysfs_online(c, ca);
if (ret) {
bch_err(c, "error creating sysfs objects");
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
goto err;
}
}
......@@ -1613,7 +1613,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
* We consume a reference to ca->ref, regardless of whether we succeed
* or fail:
*/
percpu_ref_put(&ca->ref);
bch2_dev_put(ca);
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment