Commit 9724343e authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'mptcp-allow-privileged-operations-from-user-ns-cleanup'

Matthieu Baerts says:

====================
mptcp: allow privileged operations from user ns & cleanup

This series allows privileged Netlink operations from user namespaces. When a
non-root user configures MPTCP endpoints, the memory allocation is now accounted
to this user. See patches 4 and 5.

Apart from that, there are some cleanup:

 - Patch 1 adds a macro to improve code readability

 - Patch 2 regroups similar checks all together

 - Patch 3 uses an explicit boolean instead of a counter to do one more check
====================

Link: https://lore.kernel.org/r/20220906205545.1623193-1-matthieu.baerts@tessares.netSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 006534ec 3eb9a6b6
...@@ -796,7 +796,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, ...@@ -796,7 +796,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
u8 rm_id = rm_list->ids[i]; u8 rm_id = rm_list->ids[i];
bool removed = false; bool removed = false;
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN; int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow->local_id; u8 id = subflow->local_id;
...@@ -1327,7 +1327,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) ...@@ -1327,7 +1327,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
return -EINVAL; return -EINVAL;
} }
entry = kmalloc(sizeof(*entry), GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
if (!entry) { if (!entry) {
GENL_SET_ERR_MSG(info, "can't allocate addr"); GENL_SET_ERR_MSG(info, "can't allocate addr");
return -ENOMEM; return -ENOMEM;
...@@ -2218,17 +2218,17 @@ static const struct genl_small_ops mptcp_pm_ops[] = { ...@@ -2218,17 +2218,17 @@ static const struct genl_small_ops mptcp_pm_ops[] = {
{ {
.cmd = MPTCP_PM_CMD_ADD_ADDR, .cmd = MPTCP_PM_CMD_ADD_ADDR,
.doit = mptcp_nl_cmd_add_addr, .doit = mptcp_nl_cmd_add_addr,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_DEL_ADDR, .cmd = MPTCP_PM_CMD_DEL_ADDR,
.doit = mptcp_nl_cmd_del_addr, .doit = mptcp_nl_cmd_del_addr,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_FLUSH_ADDRS, .cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
.doit = mptcp_nl_cmd_flush_addrs, .doit = mptcp_nl_cmd_flush_addrs,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_GET_ADDR, .cmd = MPTCP_PM_CMD_GET_ADDR,
...@@ -2238,7 +2238,7 @@ static const struct genl_small_ops mptcp_pm_ops[] = { ...@@ -2238,7 +2238,7 @@ static const struct genl_small_ops mptcp_pm_ops[] = {
{ {
.cmd = MPTCP_PM_CMD_SET_LIMITS, .cmd = MPTCP_PM_CMD_SET_LIMITS,
.doit = mptcp_nl_cmd_set_limits, .doit = mptcp_nl_cmd_set_limits,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_GET_LIMITS, .cmd = MPTCP_PM_CMD_GET_LIMITS,
...@@ -2247,27 +2247,27 @@ static const struct genl_small_ops mptcp_pm_ops[] = { ...@@ -2247,27 +2247,27 @@ static const struct genl_small_ops mptcp_pm_ops[] = {
{ {
.cmd = MPTCP_PM_CMD_SET_FLAGS, .cmd = MPTCP_PM_CMD_SET_FLAGS,
.doit = mptcp_nl_cmd_set_flags, .doit = mptcp_nl_cmd_set_flags,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_ANNOUNCE, .cmd = MPTCP_PM_CMD_ANNOUNCE,
.doit = mptcp_nl_cmd_announce, .doit = mptcp_nl_cmd_announce,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_REMOVE, .cmd = MPTCP_PM_CMD_REMOVE,
.doit = mptcp_nl_cmd_remove, .doit = mptcp_nl_cmd_remove,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_SUBFLOW_CREATE, .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE,
.doit = mptcp_nl_cmd_sf_create, .doit = mptcp_nl_cmd_sf_create,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
{ {
.cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY, .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY,
.doit = mptcp_nl_cmd_sf_destroy, .doit = mptcp_nl_cmd_sf_destroy,
.flags = GENL_ADMIN_PERM, .flags = GENL_UNS_ADMIN_PERM,
}, },
}; };
......
...@@ -1538,8 +1538,9 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) ...@@ -1538,8 +1538,9 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
struct mptcp_sendmsg_info info = { struct mptcp_sendmsg_info info = {
.flags = flags, .flags = flags,
}; };
bool do_check_data_fin = false;
struct mptcp_data_frag *dfrag; struct mptcp_data_frag *dfrag;
int len, copied = 0; int len;
while ((dfrag = mptcp_send_head(sk))) { while ((dfrag = mptcp_send_head(sk))) {
info.sent = dfrag->already_sent; info.sent = dfrag->already_sent;
...@@ -1574,8 +1575,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) ...@@ -1574,8 +1575,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
goto out; goto out;
} }
do_check_data_fin = true;
info.sent += ret; info.sent += ret;
copied += ret;
len -= ret; len -= ret;
mptcp_update_post_push(msk, dfrag, ret); mptcp_update_post_push(msk, dfrag, ret);
...@@ -1591,7 +1592,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) ...@@ -1591,7 +1592,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
/* ensure the rtx timer is running */ /* ensure the rtx timer is running */
if (!mptcp_timer_pending(sk)) if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk); mptcp_reset_timer(sk);
if (copied) if (do_check_data_fin)
__mptcp_check_send_data_fin(sk); __mptcp_check_send_data_fin(sk);
} }
...@@ -2357,7 +2358,7 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk) ...@@ -2357,7 +2358,7 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
might_sleep(); might_sleep();
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (inet_sk_state_load(ssk) != TCP_CLOSE) if (inet_sk_state_load(ssk) != TCP_CLOSE)
...@@ -2400,7 +2401,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) ...@@ -2400,7 +2401,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
mptcp_token_destroy(msk); mptcp_token_destroy(msk);
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
bool slow; bool slow;
...@@ -3047,7 +3048,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) ...@@ -3047,7 +3048,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
__mptcp_clear_xmit(sk); __mptcp_clear_xmit(sk);
/* join list will be eventually flushed (with rst) at sock lock release time */ /* join list will be eventually flushed (with rst) at sock lock release time */
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) mptcp_for_each_subflow_safe(msk, subflow, tmp)
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */ /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
......
...@@ -314,6 +314,8 @@ struct mptcp_sock { ...@@ -314,6 +314,8 @@ struct mptcp_sock {
#define mptcp_for_each_subflow(__msk, __subflow) \ #define mptcp_for_each_subflow(__msk, __subflow) \
list_for_each_entry(__subflow, &((__msk)->conn_list), node) list_for_each_entry(__subflow, &((__msk)->conn_list), node)
#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
static inline void msk_owned_by_me(const struct mptcp_sock *msk) static inline void msk_owned_by_me(const struct mptcp_sock *msk)
{ {
......
...@@ -706,6 +706,7 @@ do_transfer() ...@@ -706,6 +706,7 @@ do_transfer()
addr_nr_ns1=${addr_nr_ns1:10} addr_nr_ns1=${addr_nr_ns1:10}
fi fi
local flags="subflow"
if [[ "${addr_nr_ns2}" = "fastclose_"* ]]; then if [[ "${addr_nr_ns2}" = "fastclose_"* ]]; then
# disconnect # disconnect
extra_args="$extra_args -I ${addr_nr_ns2:10}" extra_args="$extra_args -I ${addr_nr_ns2:10}"
...@@ -713,6 +714,9 @@ do_transfer() ...@@ -713,6 +714,9 @@ do_transfer()
elif [[ "${addr_nr_ns2}" = "userspace_"* ]]; then elif [[ "${addr_nr_ns2}" = "userspace_"* ]]; then
userspace_pm=1 userspace_pm=1
addr_nr_ns2=${addr_nr_ns2:10} addr_nr_ns2=${addr_nr_ns2:10}
elif [[ "${addr_nr_ns2}" = "fullmesh_"* ]]; then
flags="${flags},fullmesh"
addr_nr_ns2=${addr_nr_ns2:9}
fi fi
if [ $userspace_pm -eq 1 ]; then if [ $userspace_pm -eq 1 ]; then
...@@ -832,12 +836,6 @@ do_transfer() ...@@ -832,12 +836,6 @@ do_transfer()
fi fi
fi fi
local flags="subflow"
if [[ "${addr_nr_ns2}" = "fullmesh_"* ]]; then
flags="${flags},fullmesh"
addr_nr_ns2=${addr_nr_ns2:9}
fi
# if newly added endpoints must be deleted, give the background msk # if newly added endpoints must be deleted, give the background msk
# some time to created them # some time to created them
[ $addr_nr_ns1 -gt 0 ] && [ $addr_nr_ns2 -lt 0 ] && sleep 1 [ $addr_nr_ns1 -gt 0 ] && [ $addr_nr_ns2 -lt 0 ] && sleep 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment