Commit 282c0c79 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Daniel Borkmann

net/mlx5e: Allow XSK frames smaller than a page

Relax the requirements to the XSK frame size to allow it to be smaller
than a page and even not a power of two. The current implementation can
work in this mode, both with Striding RQ and without it.

The code that checks `mtu + headroom <= XSK frame size` is modified
accordingly. Any frame size between 2048 and PAGE_SIZE is accepted.

Functions that worked with pages only now work with XSK frames, even if
their size is different from PAGE_SIZE.

With XSK queues, regardless of the frame size, Striding RQ uses the
stride size of PAGE_SIZE, and UMR MTTs are posted using starting
addresses of frames, but PAGE_SIZE as page size. MTU guarantees that no
packet data will overlap with other frames. UMR MTT size is made equal
to the stride size of the RQ, because UMEM frames may come in random
order, and we need to handle them one by one. PAGE_SIZE is just a power
of two that is bigger than any allowed XSK frame size, and also it
doesn't require making additional changes to the code.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Acked-by: default avatarJonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent beb3e4b2
...@@ -25,18 +25,33 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, ...@@ -25,18 +25,33 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return headroom; return headroom;
} }
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk); u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
u32 frag_sz = linear_rq_headroom + hw_mtu;
return linear_rq_headroom + hw_mtu;
}
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
/* AF_XDP doesn't build SKBs in place. */ /* AF_XDP doesn't build SKBs in place. */
if (!xsk) if (!xsk)
frag_sz = MLX5_SKB_FRAG_SZ(frag_sz); frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
/* XDP in mlx5e doesn't support multiple packets per page. */ /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
* special case. It can run with frames smaller than a page, as it
* doesn't allocate pages dynamically. However, here we pretend that
* fragments are page-sized: it allows to treat XSK frames like pages
* by redirecting alloc and free operations to XSK rings and by using
* the fact there are no multiple packets per "page" (which is a frame).
* The latter is important, because frames may come in a random order,
* and we will have trouble assemblying a real page of multiple frames.
*/
if (mlx5e_rx_is_xdp(params, xsk)) if (mlx5e_rx_is_xdp(params, xsk))
frag_sz = max_t(u32, frag_sz, PAGE_SIZE); frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
......
...@@ -76,6 +76,8 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, ...@@ -76,6 +76,8 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
......
...@@ -105,7 +105,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -105,7 +105,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* head_offset is not used in this function, because di->xsk.data and /* head_offset is not used in this function, because di->xsk.data and
* di->addr point directly to the necessary place. Furthermore, in the * di->addr point directly to the necessary place. Furthermore, in the
* current implementation, one page = one packet = one frame, so * current implementation, UMR pages are mapped to XSK frames, so
* head_offset should always be 0. * head_offset should always be 0.
*/ */
WARN_ON_ONCE(head_offset); WARN_ON_ONCE(head_offset);
......
...@@ -4,18 +4,23 @@ ...@@ -4,18 +4,23 @@
#include "setup.h" #include "setup.h"
#include "en/params.h" #include "en/params.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
* RQ, keep this check in the driver.
*/
#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
bool mlx5e_validate_xsk_param(struct mlx5e_params *params, bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev) struct mlx5_core_dev *mdev)
{ {
/* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
* mlx5e XDP implementation doesn't support multiple packets per page. if (xsk->chunk_size > PAGE_SIZE ||
*/ xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
if (xsk->chunk_size != PAGE_SIZE)
return false; return false;
/* Current MTU and XSK headroom don't allow packets to fit the frames. */ /* Current MTU and XSK headroom don't allow packets to fit the frames. */
if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size) if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
return false; return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear /* frag_sz is different for regular and XSK RQs, so ensure that linear
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment