Commit 0d2234a7 authored by Kent Overstreet's avatar Kent Overstreet

six locks: Kill six_lock_pcpu_(alloc|free)

six_lock_pcpu_alloc() is an unsafe interface: it's not safe to allocate
or free the percpu reader count on an existing lock that's in use, the
only safe time to allocate percpu readers is when the lock is first
being initialized.

This patch adds a flags parameter to six_lock_init(), and instead of
six_lock_pcpu_free() we now expose six_lock_exit(), which does the same
thing but is less likely to be misused.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 01bf56a9
......@@ -121,7 +121,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_btree_ptr_init(&b->key);
bch2_btree_lock_init(&b->c);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
......@@ -142,6 +141,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return NULL;
}
bch2_btree_lock_init(&b->c, 0);
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
......@@ -435,7 +436,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->freed_nonpcpu)) {
b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
six_lock_pcpu_free(&b->c.lock);
six_lock_exit(&b->c.lock);
kfree(b);
}
......@@ -595,8 +596,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
mutex_lock(&bc->lock);
}
if (pcpu_read_locks)
six_lock_pcpu_alloc(&b->c.lock);
bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
......
......@@ -282,9 +282,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return NULL;
init:
INIT_LIST_HEAD(&ck->list);
bch2_btree_lock_init(&ck->c);
if (pcpu_readers)
six_lock_pcpu_alloc(&ck->c.lock);
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
......@@ -340,9 +338,6 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
}
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
} else {
if (path->btree_id == BTREE_ID_subvolumes)
six_lock_pcpu_alloc(&ck->c.lock);
}
ck->c.level = 0;
......@@ -871,7 +866,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
six_lock_pcpu_free(&ck->c.lock);
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
......@@ -887,7 +882,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
six_lock_pcpu_free(&ck->c.lock);
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
......@@ -1012,7 +1007,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
list_del(&ck->list);
kfree(ck->k);
six_lock_pcpu_free(&ck->c.lock);
six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
}
......
......@@ -6,9 +6,10 @@
static struct lock_class_key bch2_btree_node_lock_key;
void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
enum six_lock_init_flags flags)
{
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
lockdep_set_novalidate_class(&b->lock);
}
......
......@@ -13,7 +13,7 @@
#include "btree_iter.h"
#include "six.h"
void bch2_btree_lock_init(struct btree_bkey_cached_common *);
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void);
......
......@@ -814,25 +814,6 @@ void six_lock_wakeup_all(struct six_lock *lock)
}
EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
void six_lock_pcpu_free(struct six_lock *lock)
{
BUG_ON(lock->readers && pcpu_read_count(lock));
BUG_ON(lock->state.read_lock);
free_percpu(lock->readers);
lock->readers = NULL;
}
EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
void six_lock_pcpu_alloc(struct six_lock *lock)
{
#ifdef __KERNEL__
if (!lock->readers)
lock->readers = alloc_percpu(unsigned);
#endif
}
EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
/*
* Returns lock held counts, for both read and intent
*/
......@@ -860,3 +841,37 @@ void six_lock_readers_add(struct six_lock *lock, int nr)
atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
}
EXPORT_SYMBOL_GPL(six_lock_readers_add);
void six_lock_exit(struct six_lock *lock)
{
WARN_ON(lock->readers && pcpu_read_count(lock));
WARN_ON(lock->state.read_lock);
free_percpu(lock->readers);
lock->readers = NULL;
}
EXPORT_SYMBOL_GPL(six_lock_exit);
void __six_lock_init(struct six_lock *lock, const char *name,
struct lock_class_key *key, enum six_lock_init_flags flags)
{
atomic64_set(&lock->state.counter, 0);
raw_spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *) lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
if (flags & SIX_LOCK_INIT_PCPU) {
/*
* We don't return an error here on memory allocation failure
* since percpu is an optimization, and locks will work with the
* same semantics in non-percpu mode: callers can check for
* failure if they wish by checking lock->readers, but generally
* will not want to treat it as an error.
*/
lock->readers = alloc_percpu(unsigned);
}
}
EXPORT_SYMBOL_GPL(__six_lock_init);
......@@ -132,24 +132,20 @@ struct six_lock_waiter {
typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
static __always_inline void __six_lock_init(struct six_lock *lock,
const char *name,
struct lock_class_key *key)
{
atomic64_set(&lock->state.counter, 0);
raw_spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *) lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
}
void six_lock_exit(struct six_lock *lock);
#define six_lock_init(lock) \
enum six_lock_init_flags {
SIX_LOCK_INIT_PCPU = 1U << 0,
};
void __six_lock_init(struct six_lock *lock, const char *name,
struct lock_class_key *key, enum six_lock_init_flags flags);
#define six_lock_init(lock, flags) \
do { \
static struct lock_class_key __key; \
\
__six_lock_init((lock), #lock, &__key); \
__six_lock_init((lock), #lock, &__key, flags); \
} while (0)
#define __SIX_LOCK(type) \
......@@ -248,9 +244,6 @@ void six_lock_increment(struct six_lock *, enum six_lock_type);
void six_lock_wakeup_all(struct six_lock *);
void six_lock_pcpu_free(struct six_lock *);
void six_lock_pcpu_alloc(struct six_lock *);
struct six_lock_count {
unsigned n[3];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment