Commit 617fe4fa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Two changes in this cycle:

   - a micro-optimization in static_key_slow_inc_cpuslocked()

   - fix futex death-notification wakeup bug"

* tag 'locking-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  futex: Resend potentially swallowed owner death notification
  jump_label: Use atomic_try_cmpxchg() in static_key_slow_inc_cpuslocked()
parents 2f60f830 90d75889
...@@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, ...@@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
bool pi, bool pending_op) bool pi, bool pending_op)
{ {
u32 uval, nval, mval; u32 uval, nval, mval;
pid_t owner;
int err; int err;
/* Futex address must be 32bit aligned */ /* Futex address must be 32bit aligned */
...@@ -659,6 +660,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, ...@@ -659,6 +660,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
* 2. A woken up waiter is killed before it can acquire the * 2. A woken up waiter is killed before it can acquire the
* futex in user space. * futex in user space.
* *
* In the second case, the wake up notification could be generated
* by the unlock path in user space after setting the futex value
* to zero or by the kernel after setting the OWNER_DIED bit below.
*
* In both cases the TID validation below prevents a wakeup of * In both cases the TID validation below prevents a wakeup of
* potential waiters which can cause these waiters to block * potential waiters which can cause these waiters to block
* forever. * forever.
...@@ -667,24 +672,27 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, ...@@ -667,24 +672,27 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
* *
* 1) task->robust_list->list_op_pending != NULL * 1) task->robust_list->list_op_pending != NULL
* @pending_op == true * @pending_op == true
* 2) User space futex value == 0 * 2) The owner part of user space futex value == 0
* 3) Regular futex: @pi == false * 3) Regular futex: @pi == false
* *
* If these conditions are met, it is safe to attempt waking up a * If these conditions are met, it is safe to attempt waking up a
* potential waiter without touching the user space futex value and * potential waiter without touching the user space futex value and
* trying to set the OWNER_DIED bit. The user space futex value is * trying to set the OWNER_DIED bit. If the futex value is zero,
* uncontended and the rest of the user space mutex state is * the rest of the user space mutex state is consistent, so a woken
* consistent, so a woken waiter will just take over the * waiter will just take over the uncontended futex. Setting the
* uncontended futex. Setting the OWNER_DIED bit would create * OWNER_DIED bit would create inconsistent state and malfunction
* inconsistent state and malfunction of the user space owner died * of the user space owner died handling. Otherwise, the OWNER_DIED
* handling. * bit is already set, and the woken waiter is expected to deal with
* this.
*/ */
if (pending_op && !pi && !uval) { owner = uval & FUTEX_TID_MASK;
if (pending_op && !pi && !owner) {
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
return 0; return 0;
} }
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) if (owner != task_pid_vnr(curr))
return 0; return 0;
/* /*
......
...@@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(static_key_count); ...@@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(static_key_count);
void static_key_slow_inc_cpuslocked(struct static_key *key) void static_key_slow_inc_cpuslocked(struct static_key *key)
{ {
int v, v1;
STATIC_KEY_CHECK_USE(key); STATIC_KEY_CHECK_USE(key);
lockdep_assert_cpus_held(); lockdep_assert_cpus_held();
...@@ -132,11 +130,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) ...@@ -132,11 +130,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
* so it counts as "enabled" in jump_label_update(). Note that * so it counts as "enabled" in jump_label_update(). Note that
* atomic_inc_unless_negative() checks >= 0, so roll our own. * atomic_inc_unless_negative() checks >= 0, so roll our own.
*/ */
for (v = atomic_read(&key->enabled); v > 0; v = v1) { for (int v = atomic_read(&key->enabled); v > 0; )
v1 = atomic_cmpxchg(&key->enabled, v, v + 1); if (likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)))
if (likely(v1 == v))
return; return;
}
jump_label_lock(); jump_label_lock();
if (atomic_read(&key->enabled) == 0) { if (atomic_read(&key->enabled) == 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment