Commit 8e569669 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Reorganize btree_locking.[ch]

Tidy things up a bit before doing more work in this file.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent cd5afabe
...@@ -8,10 +8,12 @@ struct lock_class_key bch2_btree_node_lock_key; ...@@ -8,10 +8,12 @@ struct lock_class_key bch2_btree_node_lock_key;
/* Btree node locking: */ /* Btree node locking: */
void bch2_btree_node_unlock_write(struct btree_trans *trans, static inline void six_lock_readers_add(struct six_lock *lock, int nr)
struct btree_path *path, struct btree *b)
{ {
bch2_btree_node_unlock_write_inlined(trans, path, b); if (!lock->readers)
atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
else
this_cpu_add(*lock->readers, nr);
} }
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans, struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
...@@ -34,14 +36,16 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans, ...@@ -34,14 +36,16 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
return ret; return ret;
} }
static inline void six_lock_readers_add(struct six_lock *lock, int nr) /* unlock */
void bch2_btree_node_unlock_write(struct btree_trans *trans,
struct btree_path *path, struct btree *b)
{ {
if (!lock->readers) bch2_btree_node_unlock_write_inlined(trans, path, b);
atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
else
this_cpu_add(*lock->readers, nr);
} }
/* lock */
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{ {
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read; int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read;
...@@ -57,118 +61,6 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) ...@@ -57,118 +61,6 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
six_lock_readers_add(&b->c.lock, readers); six_lock_readers_add(&b->c.lock, readers);
} }
bool __bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
struct btree *b = btree_path_node(path, level);
int want = __btree_lock_want(path, level);
if (!is_btree_node(path, level))
goto fail;
if (race_fault())
goto fail;
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(trans, path, level, want);
return true;
}
fail:
if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
b != ERR_PTR(-BCH_ERR_no_btree_node_init))
trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
return false;
}
bool bch2_btree_node_upgrade(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
struct btree *b = path->l[level].b;
if (!is_btree_node(path, level))
return false;
switch (btree_lock_want(path, level)) {
case BTREE_NODE_UNLOCKED:
BUG_ON(btree_node_locked(path, level));
return true;
case BTREE_NODE_READ_LOCKED:
BUG_ON(btree_node_intent_locked(path, level));
return bch2_btree_node_relock(trans, path, level);
case BTREE_NODE_INTENT_LOCKED:
break;
}
if (btree_node_intent_locked(path, level))
return true;
if (race_fault())
return false;
if (btree_node_locked(path, level)
? six_lock_tryupgrade(&b->c.lock)
: six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
goto success;
if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(trans, path, level);
goto success;
}
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
return false;
success:
mark_btree_node_intent_locked(trans, path, level);
return true;
}
static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
bool upgrade)
{
unsigned l = path->level;
int fail_idx = -1;
do {
if (!btree_path_node(path, l))
break;
if (!(upgrade
? bch2_btree_node_upgrade(trans, path, l)
: bch2_btree_node_relock(trans, path, l)))
fail_idx = l;
l++;
} while (l < path->locks_want);
/*
* When we fail to get a lock, we have to ensure that any child nodes
* can't be relocked so bch2_btree_path_traverse has to walk back up to
* the node that we failed to relock:
*/
if (fail_idx >= 0) {
__bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
do {
path->l[fail_idx].b = upgrade
? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
: ERR_PTR(-BCH_ERR_no_btree_node_relock);
--fail_idx;
} while (fail_idx >= 0);
}
if (path->uptodate == BTREE_ITER_NEED_RELOCK)
path->uptodate = BTREE_ITER_UPTODATE;
bch2_trans_verify_locks(trans);
return path->uptodate < BTREE_ITER_NEED_RELOCK;
}
/* Slowpath: */ /* Slowpath: */
int __bch2_btree_node_lock(struct btree_trans *trans, int __bch2_btree_node_lock(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
...@@ -250,34 +142,121 @@ int __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -250,34 +142,121 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
} }
/* Btree iterator locking: */ /* relock */
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_path_verify_locks(struct btree_path *path) static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
bool upgrade)
{ {
unsigned l; unsigned l = path->level;
int fail_idx = -1;
if (!path->nodes_locked) { do {
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && if (!btree_path_node(path, l))
btree_path_node(path, path->level)); break;
return;
if (!(upgrade
? bch2_btree_node_upgrade(trans, path, l)
: bch2_btree_node_relock(trans, path, l)))
fail_idx = l;
l++;
} while (l < path->locks_want);
/*
* When we fail to get a lock, we have to ensure that any child nodes
* can't be relocked so bch2_btree_path_traverse has to walk back up to
* the node that we failed to relock:
*/
if (fail_idx >= 0) {
__bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
do {
path->l[fail_idx].b = upgrade
? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
: ERR_PTR(-BCH_ERR_no_btree_node_relock);
--fail_idx;
} while (fail_idx >= 0);
} }
for (l = 0; btree_path_node(path, l); l++) if (path->uptodate == BTREE_ITER_NEED_RELOCK)
BUG_ON(btree_lock_want(path, l) != path->uptodate = BTREE_ITER_UPTODATE;
btree_node_locked_type(path, l));
bch2_trans_verify_locks(trans);
return path->uptodate < BTREE_ITER_NEED_RELOCK;
} }
void bch2_trans_verify_locks(struct btree_trans *trans) bool __bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{ {
struct btree_path *path; struct btree *b = btree_path_node(path, level);
int want = __btree_lock_want(path, level);
trans_for_each_path(trans, path) if (!is_btree_node(path, level))
bch2_btree_path_verify_locks(path); goto fail;
if (race_fault())
goto fail;
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(trans, path, level, want);
return true;
}
fail:
if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
b != ERR_PTR(-BCH_ERR_no_btree_node_init))
trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
return false;
} }
#endif /* upgrade */
bool bch2_btree_node_upgrade(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
struct btree *b = path->l[level].b;
if (!is_btree_node(path, level))
return false;
switch (btree_lock_want(path, level)) {
case BTREE_NODE_UNLOCKED:
BUG_ON(btree_node_locked(path, level));
return true;
case BTREE_NODE_READ_LOCKED:
BUG_ON(btree_node_intent_locked(path, level));
return bch2_btree_node_relock(trans, path, level);
case BTREE_NODE_INTENT_LOCKED:
break;
}
if (btree_node_intent_locked(path, level))
return true;
if (race_fault())
return false;
if (btree_node_locked(path, level)
? six_lock_tryupgrade(&b->c.lock)
: six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
goto success;
if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(trans, path, level);
goto success;
}
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
return false;
success:
mark_btree_node_intent_locked(trans, path, level);
return true;
}
/* Btree path locking: */ /* Btree path locking: */
...@@ -406,6 +385,8 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, ...@@ -406,6 +385,8 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
bch2_btree_path_verify_locks(path); bch2_btree_path_verify_locks(path);
} }
/* Btree transaction locking: */
void bch2_trans_downgrade(struct btree_trans *trans) void bch2_trans_downgrade(struct btree_trans *trans)
{ {
struct btree_path *path; struct btree_path *path;
...@@ -414,8 +395,6 @@ void bch2_trans_downgrade(struct btree_trans *trans) ...@@ -414,8 +395,6 @@ void bch2_trans_downgrade(struct btree_trans *trans)
bch2_btree_path_downgrade(trans, path); bch2_btree_path_downgrade(trans, path);
} }
/* Btree transaction locking: */
int bch2_trans_relock(struct btree_trans *trans) int bch2_trans_relock(struct btree_trans *trans)
{ {
struct btree_path *path; struct btree_path *path;
...@@ -440,3 +419,32 @@ void bch2_trans_unlock(struct btree_trans *trans) ...@@ -440,3 +419,32 @@ void bch2_trans_unlock(struct btree_trans *trans)
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
__bch2_btree_path_unlock(trans, path); __bch2_btree_path_unlock(trans, path);
} }
/* Debug */
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_path_verify_locks(struct btree_path *path)
{
unsigned l;
if (!path->nodes_locked) {
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
btree_path_node(path, path->level));
return;
}
for (l = 0; btree_path_node(path, l); l++)
BUG_ON(btree_lock_want(path, l) !=
btree_node_locked_type(path, l));
}
void bch2_trans_verify_locks(struct btree_trans *trans)
{
struct btree_path *path;
trans_for_each_path(trans, path)
bch2_btree_path_verify_locks(path);
}
#endif
...@@ -20,6 +20,13 @@ static inline bool is_btree_node(struct btree_path *path, unsigned l) ...@@ -20,6 +20,13 @@ static inline bool is_btree_node(struct btree_path *path, unsigned l)
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b); return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
} }
static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
{
return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
? &trans->c->btree_transaction_stats[trans->fn_idx]
: NULL;
}
/* matches six lock types */ /* matches six lock types */
enum btree_node_locked_type { enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = -1, BTREE_NODE_UNLOCKED = -1,
...@@ -114,13 +121,6 @@ btree_lock_want(struct btree_path *path, int level) ...@@ -114,13 +121,6 @@ btree_lock_want(struct btree_path *path, int level)
return BTREE_NODE_UNLOCKED; return BTREE_NODE_UNLOCKED;
} }
static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
{
return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
? &trans->c->btree_transaction_stats[trans->fn_idx]
: NULL;
}
static void btree_trans_lock_hold_time_update(struct btree_trans *trans, static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
struct btree_path *path, unsigned level) struct btree_path *path, unsigned level)
{ {
...@@ -134,6 +134,22 @@ static void btree_trans_lock_hold_time_update(struct btree_trans *trans, ...@@ -134,6 +134,22 @@ static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
#endif #endif
} }
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
{
switch (type) {
case SIX_LOCK_read:
return BCH_TIME_btree_lock_contended_read;
case SIX_LOCK_intent:
return BCH_TIME_btree_lock_contended_intent;
case SIX_LOCK_write:
return BCH_TIME_btree_lock_contended_write;
default:
BUG();
}
}
/* unlock: */
static inline void btree_node_unlock(struct btree_trans *trans, static inline void btree_node_unlock(struct btree_trans *trans,
struct btree_path *path, unsigned level) struct btree_path *path, unsigned level)
{ {
...@@ -157,20 +173,30 @@ static inline void __bch2_btree_path_unlock(struct btree_trans *trans, ...@@ -157,20 +173,30 @@ static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
btree_node_unlock(trans, path, __ffs(path->nodes_locked)); btree_node_unlock(trans, path, __ffs(path->nodes_locked));
} }
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type) /*
* Updates the saved lock sequence number, so that bch2_btree_node_relock() will
* succeed:
*/
static inline void
bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
struct btree *b)
{ {
switch (type) { struct btree_path *linked;
case SIX_LOCK_read:
return BCH_TIME_btree_lock_contended_read; EBUG_ON(path->l[b->c.level].b != b);
case SIX_LOCK_intent: EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
return BCH_TIME_btree_lock_contended_intent;
case SIX_LOCK_write: trans_for_each_path_with_node(trans, b, linked)
return BCH_TIME_btree_lock_contended_write; linked->l[b->c.level].lock_seq += 2;
default:
BUG(); six_unlock_write(&b->c.lock);
}
} }
void bch2_btree_node_unlock_write(struct btree_trans *,
struct btree_path *, struct btree *);
/* lock: */
static inline int btree_node_lock_type(struct btree_trans *trans, static inline int btree_node_lock_type(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
struct btree *b, struct btree *b,
...@@ -253,41 +279,6 @@ static inline int btree_node_lock(struct btree_trans *trans, ...@@ -253,41 +279,6 @@ static inline int btree_node_lock(struct btree_trans *trans,
return ret; return ret;
} }
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(path, level) &&
btree_node_locked_type(path, level) !=
__btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) ||
__bch2_btree_node_relock(trans, path, level);
}
/*
* Updates the saved lock sequence number, so that bch2_btree_node_relock() will
* succeed:
*/
static inline void
bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
struct btree *b)
{
struct btree_path *linked;
EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2;
six_unlock_write(&b->c.lock);
}
void bch2_btree_node_unlock_write(struct btree_trans *,
struct btree_path *, struct btree *);
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *); void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
static inline void bch2_btree_node_lock_write(struct btree_trans *trans, static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
...@@ -302,6 +293,36 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans, ...@@ -302,6 +293,36 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
__bch2_btree_node_lock_write(trans, b); __bch2_btree_node_lock_write(trans, b);
} }
/* relock: */
bool bch2_btree_path_relock_norestart(struct btree_trans *,
struct btree_path *, unsigned long);
int __bch2_btree_path_relock(struct btree_trans *,
struct btree_path *, unsigned long);
static inline int bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
return btree_node_locked(path, path->level)
? 0
: __bch2_btree_path_relock(trans, path, trace_ip);
}
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(path, level) &&
btree_node_locked_type(path, level) !=
__btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) ||
__bch2_btree_node_relock(trans, path, level);
}
/* upgrade */
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *, bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
struct btree_path *, unsigned); struct btree_path *, unsigned);
bool __bch2_btree_path_upgrade(struct btree_trans *, bool __bch2_btree_path_upgrade(struct btree_trans *,
...@@ -318,6 +339,8 @@ static inline bool bch2_btree_path_upgrade(struct btree_trans *trans, ...@@ -318,6 +339,8 @@ static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
: path->uptodate == BTREE_ITER_UPTODATE; : path->uptodate == BTREE_ITER_UPTODATE;
} }
/* misc: */
static inline void btree_path_set_should_be_locked(struct btree_path *path) static inline void btree_path_set_should_be_locked(struct btree_path *path)
{ {
EBUG_ON(!btree_node_locked(path, path->level)); EBUG_ON(!btree_node_locked(path, path->level));
...@@ -341,23 +364,11 @@ static inline void btree_path_set_level_up(struct btree_trans *trans, ...@@ -341,23 +364,11 @@ static inline void btree_path_set_level_up(struct btree_trans *trans,
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
} }
/* debug */
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *, struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
struct btree_path *, struct btree *, unsigned); struct btree_path *, struct btree *, unsigned);
bool bch2_btree_path_relock_norestart(struct btree_trans *,
struct btree_path *, unsigned long);
int __bch2_btree_path_relock(struct btree_trans *,
struct btree_path *, unsigned long);
static inline int bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
return btree_node_locked(path, path->level)
? 0
: __bch2_btree_path_relock(trans, path, trace_ip);
}
int bch2_btree_path_relock(struct btree_trans *, struct btree_path *, unsigned long);
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_path_verify_locks(struct btree_path *); void bch2_btree_path_verify_locks(struct btree_path *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment