Commit 90b05e4e authored by Imre Deak's avatar Imre Deak Committed by Greg Kroah-Hartman

locking/lockdep: Fix OOO unlock when hlocks need merging

[ Upstream commit 8c8889d8 ]

The sequence

	static DEFINE_WW_CLASS(test_ww_class);

	struct ww_acquire_ctx ww_ctx;
	struct ww_mutex ww_lock_a;
	struct ww_mutex ww_lock_b;
	struct mutex lock_c;
	struct mutex lock_d;

	ww_acquire_init(&ww_ctx, &test_ww_class);

	ww_mutex_init(&ww_lock_a, &test_ww_class);
	ww_mutex_init(&ww_lock_b, &test_ww_class);

	mutex_init(&lock_c);

	ww_mutex_lock(&ww_lock_a, &ww_ctx);

	mutex_lock(&lock_c);

	ww_mutex_lock(&ww_lock_b, &ww_ctx);

	mutex_unlock(&lock_c);		(*)

	ww_mutex_unlock(&ww_lock_b);
	ww_mutex_unlock(&ww_lock_a);

	ww_acquire_fini(&ww_ctx);

triggers the following WARN in __lock_release() when doing the unlock at *:

	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1);

The problem is that the WARN check doesn't take into account the merging
of ww_lock_a and ww_lock_b which results in decreasing curr->lockdep_depth
by 2 not only 1.

Note that the following sequence doesn't trigger the WARN, since there
won't be any hlock merging.

	ww_acquire_init(&ww_ctx, &test_ww_class);

	ww_mutex_init(&ww_lock_a, &test_ww_class);
	ww_mutex_init(&ww_lock_b, &test_ww_class);

	mutex_init(&lock_c);
	mutex_init(&lock_d);

	ww_mutex_lock(&ww_lock_a, &ww_ctx);

	mutex_lock(&lock_c);
	mutex_lock(&lock_d);

	ww_mutex_lock(&ww_lock_b, &ww_ctx);

	mutex_unlock(&lock_d);

	ww_mutex_unlock(&ww_lock_b);
	ww_mutex_unlock(&ww_lock_a);

	mutex_unlock(&lock_c);

	ww_acquire_fini(&ww_ctx);

In general both of the above two sequences are valid and shouldn't
trigger any lockdep warning.

Fix this by taking the decrement due to the hlock merging into account
during lock release and hlock class re-setting. Merging can't happen
during lock downgrading since there won't be a new possibility to merge
hlocks in that case, so add a WARN if merging still happens then.
Signed-off-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: ville.syrjala@linux.intel.com
Link: https://lkml.kernel.org/r/20190524201509.9199-1-imre.deak@intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent a2b46e78
...@@ -3623,7 +3623,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -3623,7 +3623,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->references = 2; hlock->references = 2;
} }
return 1; return 2;
} }
} }
...@@ -3829,22 +3829,33 @@ static struct held_lock *find_held_lock(struct task_struct *curr, ...@@ -3829,22 +3829,33 @@ static struct held_lock *find_held_lock(struct task_struct *curr,
} }
static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
int idx) int idx, unsigned int *merged)
{ {
struct held_lock *hlock; struct held_lock *hlock;
int first_idx = idx;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0; return 0;
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
if (!__lock_acquire(hlock->instance, switch (__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock_class(hlock)->subclass,
hlock->trylock, hlock->trylock,
hlock->read, hlock->check, hlock->read, hlock->check,
hlock->hardirqs_off, hlock->hardirqs_off,
hlock->nest_lock, hlock->acquire_ip, hlock->nest_lock, hlock->acquire_ip,
hlock->references, hlock->pin_count)) hlock->references, hlock->pin_count)) {
case 0:
return 1; return 1;
case 1:
break;
case 2:
*merged += (idx == first_idx);
break;
default:
WARN_ON(1);
return 0;
}
} }
return 0; return 0;
} }
...@@ -3855,9 +3866,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name, ...@@ -3855,9 +3866,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned long ip) unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
unsigned int depth, merged = 0;
struct held_lock *hlock; struct held_lock *hlock;
struct lock_class *class; struct lock_class *class;
unsigned int depth;
int i; int i;
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
...@@ -3882,14 +3893,14 @@ __lock_set_class(struct lockdep_map *lock, const char *name, ...@@ -3882,14 +3893,14 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
curr->lockdep_depth = i; curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key; curr->curr_chain_key = hlock->prev_chain_key;
if (reacquire_held_locks(curr, depth, i)) if (reacquire_held_locks(curr, depth, i, &merged))
return 0; return 0;
/* /*
* I took it apart and put it back together again, except now I have * I took it apart and put it back together again, except now I have
* these 'spare' parts.. where shall I put them. * these 'spare' parts.. where shall I put them.
*/ */
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
return 0; return 0;
return 1; return 1;
} }
...@@ -3897,8 +3908,8 @@ __lock_set_class(struct lockdep_map *lock, const char *name, ...@@ -3897,8 +3908,8 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
unsigned int depth, merged = 0;
struct held_lock *hlock; struct held_lock *hlock;
unsigned int depth;
int i; int i;
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
...@@ -3923,7 +3934,11 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) ...@@ -3923,7 +3934,11 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
hlock->read = 1; hlock->read = 1;
hlock->acquire_ip = ip; hlock->acquire_ip = ip;
if (reacquire_held_locks(curr, depth, i)) if (reacquire_held_locks(curr, depth, i, &merged))
return 0;
/* Merging can't happen with unchanged classes.. */
if (DEBUG_LOCKS_WARN_ON(merged))
return 0; return 0;
/* /*
...@@ -3932,6 +3947,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) ...@@ -3932,6 +3947,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
*/ */
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0; return 0;
return 1; return 1;
} }
...@@ -3946,8 +3962,8 @@ static int ...@@ -3946,8 +3962,8 @@ static int
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip) __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
unsigned int depth, merged = 1;
struct held_lock *hlock; struct held_lock *hlock;
unsigned int depth;
int i; int i;
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
...@@ -4002,14 +4018,15 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) ...@@ -4002,14 +4018,15 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
if (i == depth-1) if (i == depth-1)
return 1; return 1;
if (reacquire_held_locks(curr, depth, i + 1)) if (reacquire_held_locks(curr, depth, i + 1, &merged))
return 0; return 0;
/* /*
* We had N bottles of beer on the wall, we drank one, but now * We had N bottles of beer on the wall, we drank one, but now
* there's not N-1 bottles of beer left on the wall... * there's not N-1 bottles of beer left on the wall...
* Pouring two of the bottles together is acceptable.
*/ */
DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1); DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
/* /*
* Since reacquire_held_locks() would have called check_chain_key() * Since reacquire_held_locks() would have called check_chain_key()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment