Commit 24426654 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: net: Avoid sk_setsockopt() taking sk lock when called from bpf

Most of the code in bpf_setsockopt(SOL_SOCKET) are duplicated from
the sk_setsockopt().  The number of supported optnames are
increasing ever and so as the duplicated code.

One issue in reusing sk_setsockopt() is that the bpf prog
has already acquired the sk lock.  This patch adds a
has_current_bpf_ctx() to tell if the sk_setsockopt() is called from
a bpf prog.  The bpf prog calling bpf_setsockopt() is either running
in_task() or in_serving_softirq().  Both cases have the current->bpf_ctx
initialized.  Thus, the has_current_bpf_ctx() only needs to
test !!current->bpf_ctx.

This patch also adds sockopt_{lock,release}_sock() helpers
for sk_setsockopt() to use.  These helpers will test
has_current_bpf_ctx() before acquiring/releasing the lock.  They are
in EXPORT_SYMBOL for the ipv6 module to use in a latter patch.

Note on the change in sock_setbindtodevice().  sockopt_lock_sock()
is done in sock_setbindtodevice() instead of doing the lock_sock
in sock_bindtoindex(..., lock_sk = true).
Reviewed-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/r/20220817061717.4175589-1-kafai@fb.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 4d748f99
...@@ -1966,6 +1966,15 @@ static inline bool unprivileged_ebpf_enabled(void) ...@@ -1966,6 +1966,15 @@ static inline bool unprivileged_ebpf_enabled(void)
return !sysctl_unprivileged_bpf_disabled; return !sysctl_unprivileged_bpf_disabled;
} }
/* Not all bpf prog type has the bpf_ctx.
* For the bpf prog type that has initialized the bpf_ctx,
* this function can be used to decide if a kernel function
* is called by a bpf program.
*/
static inline bool has_current_bpf_ctx(void)
{
return !!current->bpf_ctx;
}
#else /* !CONFIG_BPF_SYSCALL */ #else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{ {
...@@ -2175,6 +2184,10 @@ static inline bool unprivileged_ebpf_enabled(void) ...@@ -2175,6 +2184,10 @@ static inline bool unprivileged_ebpf_enabled(void)
return false; return false;
} }
static inline bool has_current_bpf_ctx(void)
{
return false;
}
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
void __bpf_free_used_btfs(struct bpf_prog_aux *aux, void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
......
...@@ -1749,6 +1749,9 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) ...@@ -1749,6 +1749,9 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
} }
} }
void sockopt_lock_sock(struct sock *sk);
void sockopt_release_sock(struct sock *sk);
/* Used by processes to "lock" a socket state, so that /* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it * interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming * from under us. It essentially blocks any incoming
......
...@@ -703,7 +703,9 @@ static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) ...@@ -703,7 +703,9 @@ static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
goto out; goto out;
} }
return sock_bindtoindex(sk, index, true); sockopt_lock_sock(sk);
ret = sock_bindtoindex_locked(sk, index);
sockopt_release_sock(sk);
out: out:
#endif #endif
...@@ -1036,6 +1038,28 @@ static int sock_reserve_memory(struct sock *sk, int bytes) ...@@ -1036,6 +1038,28 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
return 0; return 0;
} }
void sockopt_lock_sock(struct sock *sk)
{
/* When current->bpf_ctx is set, the setsockopt is called from
* a bpf prog. bpf has ensured the sk lock has been
* acquired before calling setsockopt().
*/
if (has_current_bpf_ctx())
return;
lock_sock(sk);
}
EXPORT_SYMBOL(sockopt_lock_sock);
void sockopt_release_sock(struct sock *sk)
{
if (has_current_bpf_ctx())
return;
release_sock(sk);
}
EXPORT_SYMBOL(sockopt_release_sock);
/* /*
* This is meant for all protocols to use and covers goings on * This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic. * at the socket level. Everything here is generic.
...@@ -1067,7 +1091,7 @@ static int sk_setsockopt(struct sock *sk, int level, int optname, ...@@ -1067,7 +1091,7 @@ static int sk_setsockopt(struct sock *sk, int level, int optname,
valbool = val ? 1 : 0; valbool = val ? 1 : 0;
lock_sock(sk); sockopt_lock_sock(sk);
switch (optname) { switch (optname) {
case SO_DEBUG: case SO_DEBUG:
...@@ -1496,7 +1520,7 @@ static int sk_setsockopt(struct sock *sk, int level, int optname, ...@@ -1496,7 +1520,7 @@ static int sk_setsockopt(struct sock *sk, int level, int optname,
ret = -ENOPROTOOPT; ret = -ENOPROTOOPT;
break; break;
} }
release_sock(sk); sockopt_release_sock(sk);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment