Commit 3b04cba7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mptcp-properly-clean-up-unaccepted-subflows'

Mat Martineau says:

====================
mptcp: Properly clean up unaccepted subflows

Patch 1 factors out part of the mptcp_close() function for use by a caller
that already owns the socket lock. This is a prerequisite for patch 2.

Patch 2 is the fix that fully cleans up the unaccepted subflow sockets.
====================

Link: https://lore.kernel.org/r/20220927193158.195729-1-mathew.j.martineau@linux.intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3e1308a7 30e51b92
...@@ -2662,7 +2662,7 @@ static void __mptcp_clear_xmit(struct sock *sk) ...@@ -2662,7 +2662,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
dfrag_clear(sk, dfrag); dfrag_clear(sk, dfrag);
} }
static void mptcp_cancel_work(struct sock *sk) void mptcp_cancel_work(struct sock *sk)
{ {
struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_sock *msk = mptcp_sk(sk);
...@@ -2802,13 +2802,12 @@ static void __mptcp_destroy_sock(struct sock *sk) ...@@ -2802,13 +2802,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
sock_put(sk); sock_put(sk);
} }
static void mptcp_close(struct sock *sk, long timeout) bool __mptcp_close(struct sock *sk, long timeout)
{ {
struct mptcp_subflow_context *subflow; struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_sock *msk = mptcp_sk(sk);
bool do_cancel_work = false; bool do_cancel_work = false;
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_shutdown = SHUTDOWN_MASK;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
...@@ -2850,6 +2849,17 @@ static void mptcp_close(struct sock *sk, long timeout) ...@@ -2850,6 +2849,17 @@ static void mptcp_close(struct sock *sk, long timeout)
} else { } else {
mptcp_reset_timeout(msk, 0); mptcp_reset_timeout(msk, 0);
} }
return do_cancel_work;
}
static void mptcp_close(struct sock *sk, long timeout)
{
bool do_cancel_work;
lock_sock(sk);
do_cancel_work = __mptcp_close(sk, timeout);
release_sock(sk); release_sock(sk);
if (do_cancel_work) if (do_cancel_work)
mptcp_cancel_work(sk); mptcp_cancel_work(sk);
......
...@@ -612,6 +612,8 @@ void mptcp_subflow_reset(struct sock *ssk); ...@@ -612,6 +612,8 @@ void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *ssk); void mptcp_subflow_queue_clean(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent); void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk); struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
void mptcp_cancel_work(struct sock *sk);
bool mptcp_addresses_equal(const struct mptcp_addr_info *a, bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
const struct mptcp_addr_info *b, bool use_port); const struct mptcp_addr_info *b, bool use_port);
......
...@@ -602,30 +602,6 @@ static bool subflow_hmac_valid(const struct request_sock *req, ...@@ -602,30 +602,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
} }
static void mptcp_sock_destruct(struct sock *sk)
{
/* if new mptcp socket isn't accepted, it is free'd
* from the tcp listener sockets request queue, linked
* from req->sk. The tcp socket is released.
* This calls the ULP release function which will
* also remove the mptcp socket, via
* sock_put(ctx->conn).
*
* Problem is that the mptcp socket will be in
* ESTABLISHED state and will not have the SOCK_DEAD flag.
* Both result in warnings from inet_sock_destruct.
*/
if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
sk->sk_state = TCP_CLOSE;
WARN_ON_ONCE(sk->sk_socket);
sock_orphan(sk);
}
/* We don't need to clear msk->subflow, as it's still NULL at this point */
mptcp_destroy_common(mptcp_sk(sk), 0);
inet_sock_destruct(sk);
}
static void mptcp_force_close(struct sock *sk) static void mptcp_force_close(struct sock *sk)
{ {
/* the msk is not yet exposed to user-space */ /* the msk is not yet exposed to user-space */
...@@ -768,7 +744,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, ...@@ -768,7 +744,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
/* new mpc subflow takes ownership of the newly /* new mpc subflow takes ownership of the newly
* created mptcp socket * created mptcp socket
*/ */
new_msk->sk_destruct = mptcp_sock_destruct;
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq; mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1); mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
mptcp_token_accept(subflow_req, mptcp_sk(new_msk)); mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
...@@ -1763,13 +1738,19 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk) ...@@ -1763,13 +1738,19 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
for (msk = head; msk; msk = next) { for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk; struct sock *sk = (struct sock *)msk;
bool slow; bool slow, do_cancel_work;
sock_hold(sk);
slow = lock_sock_fast_nested(sk); slow = lock_sock_fast_nested(sk);
next = msk->dl_next; next = msk->dl_next;
msk->first = NULL; msk->first = NULL;
msk->dl_next = NULL; msk->dl_next = NULL;
do_cancel_work = __mptcp_close(sk, 0);
unlock_sock_fast(sk, slow); unlock_sock_fast(sk, slow);
if (do_cancel_work)
mptcp_cancel_work(sk);
sock_put(sk);
} }
/* we are still under the listener msk socket lock */ /* we are still under the listener msk socket lock */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment