Commit 12f4bd86 authored by Paolo Abeni's avatar Paolo Abeni Committed by Jakub Kicinski

net: add annotation for sock_{lock,unlock}_fast

The static checker is fooled by the non-static locking scheme
implemented by the mentioned helpers.
Let's make its life easier adding some unconditional annotation
so that the helpers are now interpreted as a plain spinlock from
sparse.

v1 -> v2:
 - add __releases() annotation to unlock_sock_fast()
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Link: https://lore.kernel.org/r/6ed7ae627d8271fb7f20e0a9c6750fbba1ac2635.1605634911.git.pabeni@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c3bc2adb
...@@ -1595,7 +1595,8 @@ void release_sock(struct sock *sk); ...@@ -1595,7 +1595,8 @@ void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING) SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
bool lock_sock_fast(struct sock *sk); bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
/** /**
* unlock_sock_fast - complement of lock_sock_fast * unlock_sock_fast - complement of lock_sock_fast
* @sk: socket * @sk: socket
...@@ -1605,11 +1606,14 @@ bool lock_sock_fast(struct sock *sk); ...@@ -1605,11 +1606,14 @@ bool lock_sock_fast(struct sock *sk);
* If slow mode is on, we call regular release_sock() * If slow mode is on, we call regular release_sock()
*/ */
static inline void unlock_sock_fast(struct sock *sk, bool slow) static inline void unlock_sock_fast(struct sock *sk, bool slow)
__releases(&sk->sk_lock.slock)
{ {
if (slow) if (slow) {
release_sock(sk); release_sock(sk);
else __release(&sk->sk_lock.slock);
} else {
spin_unlock_bh(&sk->sk_lock.slock); spin_unlock_bh(&sk->sk_lock.slock);
}
} }
/* Used by processes to "lock" a socket state, so that /* Used by processes to "lock" a socket state, so that
......
...@@ -3078,7 +3078,7 @@ EXPORT_SYMBOL(release_sock); ...@@ -3078,7 +3078,7 @@ EXPORT_SYMBOL(release_sock);
* *
* sk_lock.slock unlocked, owned = 1, BH enabled * sk_lock.slock unlocked, owned = 1, BH enabled
*/ */
bool lock_sock_fast(struct sock *sk) bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{ {
might_sleep(); might_sleep();
spin_lock_bh(&sk->sk_lock.slock); spin_lock_bh(&sk->sk_lock.slock);
...@@ -3096,6 +3096,7 @@ bool lock_sock_fast(struct sock *sk) ...@@ -3096,6 +3096,7 @@ bool lock_sock_fast(struct sock *sk)
* The sk_lock has mutex_lock() semantics here: * The sk_lock has mutex_lock() semantics here:
*/ */
mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
__acquire(&sk->sk_lock.slock);
local_bh_enable(); local_bh_enable();
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment