Commit 9166b41d authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: s/usage_lock/mark_lock

better describes what it's for, and we're going to call a new lock
usage_lock
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 76640280
...@@ -218,9 +218,9 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) ...@@ -218,9 +218,9 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
if (a.k->p.offset >= ca->mi.nbuckets) if (a.k->p.offset >= ca->mi.nbuckets)
return; return;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
__alloc_read_key(bucket(ca, a.k->p.offset), a.v); __alloc_read_key(bucket(ca, a.k->p.offset), a.v);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} }
int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
...@@ -288,12 +288,12 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, ...@@ -288,12 +288,12 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
a->k.p = POS(ca->dev_idx, b); a->k.p = POS(ca->dev_idx, b);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
g = bucket(ca, b); g = bucket(ca, b);
m = bucket_cmpxchg(g, m, m.dirty = false); m = bucket_cmpxchg(g, m, m.dirty = false);
__alloc_write_key(a, g, m); __alloc_write_key(a, g, m);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
bch2_btree_iter_cond_resched(iter); bch2_btree_iter_cond_resched(iter);
...@@ -804,7 +804,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -804,7 +804,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
{ {
struct bucket_mark m; struct bucket_mark m;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
bch2_invalidate_bucket(c, ca, bucket, &m); bch2_invalidate_bucket(c, ca, bucket, &m);
...@@ -817,7 +817,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -817,7 +817,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
bucket_io_clock_reset(c, ca, bucket, READ); bucket_io_clock_reset(c, ca, bucket, READ);
bucket_io_clock_reset(c, ca, bucket, WRITE); bucket_io_clock_reset(c, ca, bucket, WRITE);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
if (m.journal_seq_valid) { if (m.journal_seq_valid) {
u64 journal_seq = atomic64_read(&c->journal.seq); u64 journal_seq = atomic64_read(&c->journal.seq);
...@@ -1345,7 +1345,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c) ...@@ -1345,7 +1345,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
struct bucket_mark m; struct bucket_mark m;
down_read(&ca->bucket_lock); down_read(&ca->bucket_lock);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
buckets = bucket_array(ca); buckets = bucket_array(ca);
...@@ -1369,7 +1369,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c) ...@@ -1369,7 +1369,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
if (fifo_full(&ca->free[RESERVE_BTREE])) if (fifo_full(&ca->free[RESERVE_BTREE]))
break; break;
} }
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
} }
......
...@@ -101,7 +101,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) ...@@ -101,7 +101,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
return; return;
} }
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock); spin_lock(&ob->lock);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
...@@ -109,7 +109,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) ...@@ -109,7 +109,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
ob->valid = false; ob->valid = false;
spin_unlock(&ob->lock); spin_unlock(&ob->lock);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
ob->freelist = c->open_buckets_freelist; ob->freelist = c->open_buckets_freelist;
...@@ -441,7 +441,7 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) ...@@ -441,7 +441,7 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
open_bucket_for_each(c, &h->blocks, ob, i) open_bucket_for_each(c, &h->blocks, ob, i)
__clear_bit(ob->ptr.dev, devs.d); __clear_bit(ob->ptr.dev, devs.d);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
rcu_read_lock(); rcu_read_lock();
if (h->parity.nr < h->redundancy) { if (h->parity.nr < h->redundancy) {
...@@ -477,12 +477,12 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) ...@@ -477,12 +477,12 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
} }
rcu_read_unlock(); rcu_read_unlock();
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
return bch2_ec_stripe_new_alloc(c, h); return bch2_ec_stripe_new_alloc(c, h);
err: err:
rcu_read_unlock(); rcu_read_unlock();
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
return -1; return -1;
} }
...@@ -638,7 +638,7 @@ static int open_bucket_add_buckets(struct bch_fs *c, ...@@ -638,7 +638,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
if (*nr_effective >= nr_replicas) if (*nr_effective >= nr_replicas)
return 0; return 0;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
rcu_read_lock(); rcu_read_lock();
retry_blocking: retry_blocking:
...@@ -655,7 +655,7 @@ static int open_bucket_add_buckets(struct bch_fs *c, ...@@ -655,7 +655,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
} }
rcu_read_unlock(); rcu_read_unlock();
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
return ret; return ret;
} }
......
...@@ -390,7 +390,7 @@ struct bch_dev { ...@@ -390,7 +390,7 @@ struct bch_dev {
/* /*
* Buckets: * Buckets:
* Per-bucket arrays are protected by c->usage_lock, bucket_lock and * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access: * gc_lock, for device resize - holding any is sufficient for access:
* Or rcu_read_lock(), but only for ptr_stale(): * Or rcu_read_lock(), but only for ptr_stale():
*/ */
...@@ -617,7 +617,7 @@ struct bch_fs { ...@@ -617,7 +617,7 @@ struct bch_fs {
struct bch_fs_usage __percpu *usage[2]; struct bch_fs_usage __percpu *usage[2];
struct percpu_rw_semaphore usage_lock; struct percpu_rw_semaphore mark_lock;
/* /*
* When we invalidate buckets, we use both the priority and the amount * When we invalidate buckets, we use both the priority and the amount
......
...@@ -351,7 +351,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ...@@ -351,7 +351,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
*/ */
if (c) { if (c) {
lockdep_assert_held(&c->sb_lock); lockdep_assert_held(&c->sb_lock);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
} else { } else {
preempt_disable(); preempt_disable();
} }
...@@ -376,7 +376,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ...@@ -376,7 +376,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
} }
if (c) { if (c) {
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} else { } else {
preempt_enable(); preempt_enable();
} }
...@@ -422,7 +422,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c) ...@@ -422,7 +422,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
size_t i, j, iter; size_t i, j, iter;
unsigned ci; unsigned ci;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
gc_pos_set(c, gc_pos_alloc(c, NULL)); gc_pos_set(c, gc_pos_alloc(c, NULL));
...@@ -458,7 +458,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c) ...@@ -458,7 +458,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
spin_unlock(&ob->lock); spin_unlock(&ob->lock);
} }
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} }
static void bch2_gc_free(struct bch_fs *c) static void bch2_gc_free(struct bch_fs *c)
...@@ -578,7 +578,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) ...@@ -578,7 +578,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
#define copy_fs_field(_f, _msg, ...) \ #define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
percpu_down_write(&c->usage_lock); percpu_down_write(&c->mark_lock);
if (initial) { if (initial) {
bch2_gc_done_nocheck(c); bch2_gc_done_nocheck(c);
...@@ -698,7 +698,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) ...@@ -698,7 +698,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
preempt_enable(); preempt_enable();
} }
out: out:
percpu_up_write(&c->usage_lock); percpu_up_write(&c->mark_lock);
#undef copy_fs_field #undef copy_fs_field
#undef copy_dev_field #undef copy_dev_field
...@@ -743,7 +743,7 @@ static int bch2_gc_start(struct bch_fs *c) ...@@ -743,7 +743,7 @@ static int bch2_gc_start(struct bch_fs *c)
} }
} }
percpu_down_write(&c->usage_lock); percpu_down_write(&c->mark_lock);
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
struct bucket_array *dst = __bucket_array(ca, 1); struct bucket_array *dst = __bucket_array(ca, 1);
...@@ -757,7 +757,7 @@ static int bch2_gc_start(struct bch_fs *c) ...@@ -757,7 +757,7 @@ static int bch2_gc_start(struct bch_fs *c)
dst->b[b]._mark.gen = src->b[b].mark.gen; dst->b[b]._mark.gen = src->b[b].mark.gen;
}; };
percpu_up_write(&c->usage_lock); percpu_up_write(&c->mark_lock);
return bch2_ec_mem_alloc(c, true); return bch2_ec_mem_alloc(c, true);
} }
......
...@@ -1062,7 +1062,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1062,7 +1062,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
__bch2_btree_set_root_inmem(c, b); __bch2_btree_set_root_inmem(c, b);
mutex_lock(&c->btree_interior_update_lock); mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, true, 0,
...@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id)); gc_pos_btree_root(b->btree_id));
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
} }
...@@ -1155,7 +1155,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1155,7 +1155,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b)); BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
mutex_lock(&c->btree_interior_update_lock); mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, true, 0,
...@@ -1177,7 +1177,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1177,7 +1177,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_node(b)); gc_pos_btree_node(b));
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
bch2_btree_bset_insert_key(iter, b, node_iter, insert); bch2_btree_bset_insert_key(iter, b, node_iter, insert);
...@@ -1965,7 +1965,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1965,7 +1965,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_btree_node_lock_write(b, iter); bch2_btree_node_lock_write(b, iter);
mutex_lock(&c->btree_interior_update_lock); mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, true, 0,
...@@ -1977,7 +1977,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1977,7 +1977,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id)); gc_pos_btree_root(b->btree_id));
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) { if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
......
...@@ -323,7 +323,7 @@ void bch2_fs_usage_apply(struct bch_fs *c, ...@@ -323,7 +323,7 @@ void bch2_fs_usage_apply(struct bch_fs *c,
s64 added = sum.data + sum.reserved; s64 added = sum.data + sum.reserved;
s64 should_not_have_added; s64 should_not_have_added;
percpu_rwsem_assert_held(&c->usage_lock); percpu_rwsem_assert_held(&c->mark_lock);
/* /*
* Not allowed to reduce sectors_available except by getting a * Not allowed to reduce sectors_available except by getting a
...@@ -364,7 +364,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, ...@@ -364,7 +364,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
{ {
struct bch_dev_usage *dev_usage; struct bch_dev_usage *dev_usage;
percpu_rwsem_assert_held(&c->usage_lock); percpu_rwsem_assert_held(&c->mark_lock);
bch2_fs_inconsistent_on(old.data_type && new.data_type && bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c, old.data_type != new.data_type, c,
...@@ -413,14 +413,14 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca) ...@@ -413,14 +413,14 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
struct bucket_array *buckets; struct bucket_array *buckets;
struct bucket *g; struct bucket *g;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
fs_usage = this_cpu_ptr(c->usage[0]); fs_usage = this_cpu_ptr(c->usage[0]);
buckets = bucket_array(ca); buckets = bucket_array(ca);
for_each_bucket(g, buckets) for_each_bucket(g, buckets)
if (g->mark.data_type) if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false); bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} }
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \ #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
...@@ -455,7 +455,7 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -455,7 +455,7 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old) size_t b, struct bucket_mark *old)
{ {
percpu_rwsem_assert_held(&c->usage_lock); percpu_rwsem_assert_held(&c->mark_lock);
__bch2_invalidate_bucket(c, ca, b, old, false); __bch2_invalidate_bucket(c, ca, b, old, false);
...@@ -484,7 +484,7 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -484,7 +484,7 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator, size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags) struct gc_pos pos, unsigned flags)
{ {
percpu_rwsem_assert_held(&c->usage_lock); percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC)) if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false); __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
...@@ -531,7 +531,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -531,7 +531,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
preempt_disable(); preempt_disable();
if (likely(c)) { if (likely(c)) {
percpu_rwsem_assert_held(&c->usage_lock); percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC)) if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors, __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
...@@ -924,10 +924,10 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -924,10 +924,10 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
{ {
int ret; int ret;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors, ret = bch2_mark_key_locked(c, k, inserting, sectors,
pos, stats, journal_seq, flags); pos, stats, journal_seq, flags);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
return ret; return ret;
} }
...@@ -946,7 +946,7 @@ void bch2_mark_update(struct btree_insert *trans, ...@@ -946,7 +946,7 @@ void bch2_mark_update(struct btree_insert *trans,
if (!btree_node_type_needs_gc(iter->btree_id)) if (!btree_node_type_needs_gc(iter->btree_id))
return; return;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true, bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
...@@ -1003,7 +1003,7 @@ void bch2_mark_update(struct btree_insert *trans, ...@@ -1003,7 +1003,7 @@ void bch2_mark_update(struct btree_insert *trans,
bch2_fs_usage_apply(c, &stats, trans->disk_res, pos); bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} }
/* Disk reservations: */ /* Disk reservations: */
...@@ -1020,12 +1020,12 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c) ...@@ -1020,12 +1020,12 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{ {
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
this_cpu_sub(c->usage[0]->online_reserved, this_cpu_sub(c->usage[0]->online_reserved,
res->sectors); res->sectors);
bch2_fs_stats_verify(c); bch2_fs_stats_verify(c);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
res->sectors = 0; res->sectors = 0;
} }
...@@ -1040,7 +1040,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1040,7 +1040,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
s64 sectors_available; s64 sectors_available;
int ret; int ret;
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
preempt_disable(); preempt_disable();
stats = this_cpu_ptr(c->usage[0]); stats = this_cpu_ptr(c->usage[0]);
...@@ -1054,7 +1054,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1054,7 +1054,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
if (get < sectors) { if (get < sectors) {
preempt_enable(); preempt_enable();
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
goto recalculate; goto recalculate;
} }
} while ((v = atomic64_cmpxchg(&c->sectors_available, } while ((v = atomic64_cmpxchg(&c->sectors_available,
...@@ -1070,7 +1070,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1070,7 +1070,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
bch2_disk_reservations_verify(c, flags); bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c); bch2_fs_stats_verify(c);
preempt_enable(); preempt_enable();
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
return 0; return 0;
recalculate: recalculate:
...@@ -1091,7 +1091,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1091,7 +1091,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
return -EINTR; return -EINTR;
} }
percpu_down_write(&c->usage_lock); percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c); sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available || if (sectors <= sectors_available ||
...@@ -1109,7 +1109,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1109,7 +1109,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
} }
bch2_fs_stats_verify(c); bch2_fs_stats_verify(c);
percpu_up_write(&c->usage_lock); percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock); up_read(&c->gc_lock);
...@@ -1185,7 +1185,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1185,7 +1185,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (resize) { if (resize) {
down_write(&c->gc_lock); down_write(&c->gc_lock);
down_write(&ca->bucket_lock); down_write(&ca->bucket_lock);
percpu_down_write(&c->usage_lock); percpu_down_write(&c->mark_lock);
} }
old_buckets = bucket_array(ca); old_buckets = bucket_array(ca);
...@@ -1215,7 +1215,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1215,7 +1215,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
swap(ca->buckets_written, buckets_written); swap(ca->buckets_written, buckets_written);
if (resize) if (resize)
percpu_up_write(&c->usage_lock); percpu_up_write(&c->mark_lock);
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) { for (i = 0; i < RESERVE_NR; i++) {
......
...@@ -34,7 +34,7 @@ static inline struct bucket_array *__bucket_array(struct bch_dev *ca, ...@@ -34,7 +34,7 @@ static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
{ {
return rcu_dereference_check(ca->buckets[gc], return rcu_dereference_check(ca->buckets[gc],
!ca->fs || !ca->fs ||
percpu_rwsem_is_held(&ca->fs->usage_lock) || percpu_rwsem_is_held(&ca->fs->mark_lock) ||
lockdep_is_held(&ca->fs->gc_lock) || lockdep_is_held(&ca->fs->gc_lock) ||
lockdep_is_held(&ca->bucket_lock)); lockdep_is_held(&ca->bucket_lock));
} }
......
...@@ -1740,9 +1740,9 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, ...@@ -1740,9 +1740,9 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ); bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) { if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
bio_inc_remaining(&orig->bio); bio_inc_remaining(&orig->bio);
......
...@@ -754,7 +754,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -754,7 +754,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
} }
if (c) { if (c) {
percpu_down_read(&c->usage_lock); percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock); spin_lock(&c->journal.lock);
} else { } else {
preempt_disable(); preempt_disable();
...@@ -782,7 +782,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -782,7 +782,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (c) { if (c) {
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->mark_lock);
} else { } else {
preempt_enable(); preempt_enable();
} }
......
...@@ -374,7 +374,7 @@ static void bch2_fs_free(struct bch_fs *c) ...@@ -374,7 +374,7 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[WRITE]); bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]); bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c); bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->usage_lock); percpu_free_rwsem(&c->mark_lock);
free_percpu(c->usage[0]); free_percpu(c->usage[0]);
mempool_exit(&c->btree_iters_pool); mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool); mempool_exit(&c->btree_bounce_pool);
...@@ -608,7 +608,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -608,7 +608,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)), offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) || !(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
percpu_init_rwsem(&c->usage_lock) || percpu_init_rwsem(&c->mark_lock) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) || btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment