Commit f1f26512 authored by Paolo Abeni's avatar Paolo Abeni Committed by Jakub Kicinski

mptcp: use plain bool instead of custom binary enum

The 'data_avail' subflow field is already used as plain boolean,
drop the custom binary enum type and switch to bool.

No functional changed intended.
Reviewed-by: default avatarMat Martineau <martineau@kernel.org>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarMat Martineau <martineau@kernel.org>
Link: https://lore.kernel.org/r/20231023-send-net-next-20231023-2-v1-3-9dc60939d371@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent bf0e9610
......@@ -434,11 +434,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
return (struct mptcp_subflow_request_sock *)rsk;
}
enum mptcp_data_avail {
MPTCP_SUBFLOW_NODATA,
MPTCP_SUBFLOW_DATA_AVAIL,
};
struct mptcp_delegated_action {
struct napi_struct napi;
struct list_head head;
......@@ -494,7 +489,7 @@ struct mptcp_subflow_context {
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
__unused : 9;
enum mptcp_data_avail data_avail;
bool data_avail;
bool scheduled;
u32 remote_nonce;
u64 thmac;
......
......@@ -1237,7 +1237,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue))
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
WRITE_ONCE(subflow->data_avail, false);
if (subflow->data_avail)
return true;
......@@ -1271,7 +1271,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
continue;
}
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
WRITE_ONCE(subflow->data_avail, true);
break;
}
return true;
......@@ -1293,7 +1293,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
goto reset;
}
mptcp_subflow_fail(msk, ssk);
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
WRITE_ONCE(subflow->data_avail, true);
return true;
}
......@@ -1310,7 +1310,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
tcp_send_active_reset(ssk, GFP_ATOMIC);
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
WRITE_ONCE(subflow->data_avail, false);
return false;
}
......@@ -1322,7 +1322,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
WRITE_ONCE(subflow->data_avail, true);
return true;
}
......@@ -1334,7 +1334,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
if (subflow->map_valid &&
mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
subflow->map_valid = 0;
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
WRITE_ONCE(subflow->data_avail, false);
pr_debug("Done with mapping: seq=%u data_len=%u",
subflow->map_subflow_seq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment