Commit b6a66e52 authored by Matthieu Baerts (NGI0)'s avatar Matthieu Baerts (NGI0) Committed by Paolo Abeni

mptcp: sched: check both directions for backup

The 'mptcp_subflow_context' structure has two items related to the
backup flags:

 - 'backup': the subflow has been marked as backup by the other peer

 - 'request_bkup': the backup flag has been set by the host

Before this patch, the scheduler was only looking at the 'backup' flag.
That can make sense in some cases, but it looks like that's not what we
wanted for the general use, because either the path-manager was setting
both of them when sending an MP_PRIO, or the receiver was duplicating
the 'backup' flag in the subflow request.

Note that the use of these two flags in the path-manager are going to be
fixed in the next commits, but this change here is needed not to modify
the behaviour.

Fixes: f296234c ("mptcp: Add handling of incoming MP_JOIN requests")
Cc: stable@vger.kernel.org
Reviewed-by: default avatarMat Martineau <martineau@kernel.org>
Signed-off-by: default avatarMatthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 039564d2
...@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send, ...@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk; struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow); __entry->active = mptcp_subflow_active(subflow);
__entry->backup = subflow->backup; __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock)) if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock); __entry->free = sk_stream_memory_free(subflow->tcp_sock);
......
...@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) ...@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
} }
mptcp_for_each_subflow(msk, subflow) { mptcp_for_each_subflow(msk, subflow) {
bool backup = subflow->backup || subflow->request_bkup;
trace_mptcp_subflow_get_send(subflow); trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow); ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow)) if (!mptcp_subflow_active(subflow))
continue; continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow)); tout = max(tout, mptcp_timeout_from_subflow(subflow));
nr_active += !subflow->backup; nr_active += !backup;
pace = subflow->avg_pacing_rate; pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) { if (unlikely(!pace)) {
/* init pacing rate from socket */ /* init pacing rate from socket */
...@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) ...@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
} }
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
if (linger_time < send_info[subflow->backup].linger_time) { if (linger_time < send_info[backup].linger_time) {
send_info[subflow->backup].ssk = ssk; send_info[backup].ssk = ssk;
send_info[subflow->backup].linger_time = linger_time; send_info[backup].linger_time = linger_time;
} }
} }
__mptcp_set_timeout(sk, tout); __mptcp_set_timeout(sk, tout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment