Commit 9166b41d authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: s/usage_lock/mark_lock

better describes what it's for, and we're going to call a new lock
usage_lock
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 76640280
......@@ -218,9 +218,9 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
if (a.k->p.offset >= ca->mi.nbuckets)
return;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
__alloc_read_key(bucket(ca, a.k->p.offset), a.v);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
}
int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
......@@ -288,12 +288,12 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
a->k.p = POS(ca->dev_idx, b);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
g = bucket(ca, b);
m = bucket_cmpxchg(g, m, m.dirty = false);
__alloc_write_key(a, g, m);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
bch2_btree_iter_cond_resched(iter);
......@@ -804,7 +804,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
{
struct bucket_mark m;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
bch2_invalidate_bucket(c, ca, bucket, &m);
......@@ -817,7 +817,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
bucket_io_clock_reset(c, ca, bucket, READ);
bucket_io_clock_reset(c, ca, bucket, WRITE);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
if (m.journal_seq_valid) {
u64 journal_seq = atomic64_read(&c->journal.seq);
......@@ -1345,7 +1345,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
struct bucket_mark m;
down_read(&ca->bucket_lock);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
buckets = bucket_array(ca);
......@@ -1369,7 +1369,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
if (fifo_full(&ca->free[RESERVE_BTREE]))
break;
}
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
up_read(&ca->bucket_lock);
}
......
......@@ -101,7 +101,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
return;
}
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
......@@ -109,7 +109,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
ob->valid = false;
spin_unlock(&ob->lock);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
ob->freelist = c->open_buckets_freelist;
......@@ -441,7 +441,7 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
open_bucket_for_each(c, &h->blocks, ob, i)
__clear_bit(ob->ptr.dev, devs.d);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
rcu_read_lock();
if (h->parity.nr < h->redundancy) {
......@@ -477,12 +477,12 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
}
rcu_read_unlock();
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
return bch2_ec_stripe_new_alloc(c, h);
err:
rcu_read_unlock();
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
return -1;
}
......@@ -638,7 +638,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
if (*nr_effective >= nr_replicas)
return 0;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
rcu_read_lock();
retry_blocking:
......@@ -655,7 +655,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
}
rcu_read_unlock();
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
return ret;
}
......
......@@ -390,7 +390,7 @@ struct bch_dev {
/*
* Buckets:
* Per-bucket arrays are protected by c->usage_lock, bucket_lock and
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access:
* Or rcu_read_lock(), but only for ptr_stale():
*/
......@@ -617,7 +617,7 @@ struct bch_fs {
struct bch_fs_usage __percpu *usage[2];
struct percpu_rw_semaphore usage_lock;
struct percpu_rw_semaphore mark_lock;
/*
* When we invalidate buckets, we use both the priority and the amount
......
......@@ -351,7 +351,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
*/
if (c) {
lockdep_assert_held(&c->sb_lock);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
} else {
preempt_disable();
}
......@@ -376,7 +376,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
}
if (c) {
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
}
......@@ -422,7 +422,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
size_t i, j, iter;
unsigned ci;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
gc_pos_set(c, gc_pos_alloc(c, NULL));
......@@ -458,7 +458,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
spin_unlock(&ob->lock);
}
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
}
static void bch2_gc_free(struct bch_fs *c)
......@@ -578,7 +578,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
percpu_down_write(&c->usage_lock);
percpu_down_write(&c->mark_lock);
if (initial) {
bch2_gc_done_nocheck(c);
......@@ -698,7 +698,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
preempt_enable();
}
out:
percpu_up_write(&c->usage_lock);
percpu_up_write(&c->mark_lock);
#undef copy_fs_field
#undef copy_dev_field
......@@ -743,7 +743,7 @@ static int bch2_gc_start(struct bch_fs *c)
}
}
percpu_down_write(&c->usage_lock);
percpu_down_write(&c->mark_lock);
for_each_member_device(ca, c, i) {
struct bucket_array *dst = __bucket_array(ca, 1);
......@@ -757,7 +757,7 @@ static int bch2_gc_start(struct bch_fs *c)
dst->b[b]._mark.gen = src->b[b].mark.gen;
};
percpu_up_write(&c->usage_lock);
percpu_up_write(&c->mark_lock);
return bch2_ec_mem_alloc(c, true);
}
......
......@@ -1062,7 +1062,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
__bch2_btree_set_root_inmem(c, b);
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0,
......@@ -1076,7 +1076,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
}
......@@ -1155,7 +1155,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0,
......@@ -1177,7 +1177,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_node(b));
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
......@@ -1965,7 +1965,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_btree_node_lock_write(b, iter);
mutex_lock(&c->btree_interior_update_lock);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0,
......@@ -1977,7 +1977,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
......
......@@ -323,7 +323,7 @@ void bch2_fs_usage_apply(struct bch_fs *c,
s64 added = sum.data + sum.reserved;
s64 should_not_have_added;
percpu_rwsem_assert_held(&c->usage_lock);
percpu_rwsem_assert_held(&c->mark_lock);
/*
* Not allowed to reduce sectors_available except by getting a
......@@ -364,7 +364,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
{
struct bch_dev_usage *dev_usage;
percpu_rwsem_assert_held(&c->usage_lock);
percpu_rwsem_assert_held(&c->mark_lock);
bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
......@@ -413,14 +413,14 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
struct bucket_array *buckets;
struct bucket *g;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
fs_usage = this_cpu_ptr(c->usage[0]);
buckets = bucket_array(ca);
for_each_bucket(g, buckets)
if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
}
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
......@@ -455,7 +455,7 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old)
{
percpu_rwsem_assert_held(&c->usage_lock);
percpu_rwsem_assert_held(&c->mark_lock);
__bch2_invalidate_bucket(c, ca, b, old, false);
......@@ -484,7 +484,7 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
percpu_rwsem_assert_held(&c->usage_lock);
percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
......@@ -531,7 +531,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
preempt_disable();
if (likely(c)) {
percpu_rwsem_assert_held(&c->usage_lock);
percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors,
......@@ -924,10 +924,10 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
{
int ret;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors,
pos, stats, journal_seq, flags);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
return ret;
}
......@@ -946,7 +946,7 @@ void bch2_mark_update(struct btree_insert *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
......@@ -1003,7 +1003,7 @@ void bch2_mark_update(struct btree_insert *trans,
bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
}
/* Disk reservations: */
......@@ -1020,12 +1020,12 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
this_cpu_sub(c->usage[0]->online_reserved,
res->sectors);
bch2_fs_stats_verify(c);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
res->sectors = 0;
}
......@@ -1040,7 +1040,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
s64 sectors_available;
int ret;
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
preempt_disable();
stats = this_cpu_ptr(c->usage[0]);
......@@ -1054,7 +1054,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
if (get < sectors) {
preempt_enable();
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
goto recalculate;
}
} while ((v = atomic64_cmpxchg(&c->sectors_available,
......@@ -1070,7 +1070,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
preempt_enable();
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
return 0;
recalculate:
......@@ -1091,7 +1091,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
return -EINTR;
}
percpu_down_write(&c->usage_lock);
percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
......@@ -1109,7 +1109,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
}
bch2_fs_stats_verify(c);
percpu_up_write(&c->usage_lock);
percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
......@@ -1185,7 +1185,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (resize) {
down_write(&c->gc_lock);
down_write(&ca->bucket_lock);
percpu_down_write(&c->usage_lock);
percpu_down_write(&c->mark_lock);
}
old_buckets = bucket_array(ca);
......@@ -1215,7 +1215,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
swap(ca->buckets_written, buckets_written);
if (resize)
percpu_up_write(&c->usage_lock);
percpu_up_write(&c->mark_lock);
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) {
......
......@@ -34,7 +34,7 @@ static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
{
return rcu_dereference_check(ca->buckets[gc],
!ca->fs ||
percpu_rwsem_is_held(&ca->fs->usage_lock) ||
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
lockdep_is_held(&ca->fs->gc_lock) ||
lockdep_is_held(&ca->bucket_lock));
}
......
......@@ -1740,9 +1740,9 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
bio_inc_remaining(&orig->bio);
......
......@@ -754,7 +754,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
}
if (c) {
percpu_down_read(&c->usage_lock);
percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock);
} else {
preempt_disable();
......@@ -782,7 +782,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (c) {
spin_unlock(&c->journal.lock);
percpu_up_read(&c->usage_lock);
percpu_up_read(&c->mark_lock);
} else {
preempt_enable();
}
......
......@@ -374,7 +374,7 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->usage_lock);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->usage[0]);
mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool);
......@@ -608,7 +608,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
percpu_init_rwsem(&c->usage_lock) ||
percpu_init_rwsem(&c->mark_lock) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment