Commit f52ac702 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: RX, Add XDP multi-buffer support in Striding RQ

Here we add support for multi-buffer XDP handling in Striding RQ, which
is our default out-of-the-box RQ type. Before this series, loading such
an XDP program would fail, until you switch to the legacy RQ (by
unsetting the rx_striding_rq priv-flag).

To overcome the lack of headroom and tailroom between the strides, we
allocate a side page to be used for the descriptor (xdp_buff / skb) and
the linear part. When an XDP program is attached, we structure the
xdp_buff so that it contains no data in the linear part, and the whole
packet resides in the fragments.

In case of XDP_PASS, where an SKB still needs to be created, we copy up
to 256 bytes to its linear part, to match the current behavior, and
satisfy functions that assume finding the packet headers in the SKB
linear part (like eth_type_trans).

Performance testing:

Packet rate test, 64 bytes, 32 channels, MTU 9000 bytes.
CPU: Intel(R) Xeon(R) Platinum 8380 CPU @ 2.30GHz.
NIC: ConnectX-6 Dx, at 100 Gbps.

+----------+-------------+-------------+---------+
| Test     | Legacy RQ   | Striding RQ | Speedup |
+----------+-------------+-------------+---------+
| XDP_DROP | 101,615,544 | 117,191,020 | +15%    |
+----------+-------------+-------------+---------+
| XDP_TX   |  95,608,169 | 117,043,422 | +22%    |
+----------+-------------+-------------+---------+
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2cb0e27d
......@@ -587,6 +587,7 @@ union mlx5e_alloc_units {
struct mlx5e_mpw_info {
u16 consumed_strides;
DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
struct mlx5e_frag_page linear_page;
union mlx5e_alloc_units alloc_units;
};
......
......@@ -323,6 +323,20 @@ static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
}
bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides,
page_shift, umr_mode);
}
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
......@@ -405,6 +419,10 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
/* XDP in mlx5e doesn't support multiple packets per page. */
if (params->xdp_prog)
return PAGE_SHIFT;
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
......@@ -575,9 +593,6 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
return -EOPNOTSUPP;
if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
return -EINVAL;
return 0;
}
......
......@@ -153,6 +153,9 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
......
......@@ -803,6 +803,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
pool_size *= 2; /* additional page per packet for the linear part */
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
......@@ -4060,10 +4063,9 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
val |= NETDEV_XDP_ACT_RX_SG;
xdp_set_features_flag(netdev, val);
}
......@@ -4261,23 +4263,20 @@ static bool mlx5e_params_validate_xdp(struct net_device *netdev,
mlx5e_rx_is_linear_skb(mdev, params, NULL) :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
/* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
* supported with the new parameters: if PAGE_SIZE is bigger than
* MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
* the MTU is small enough for the linear mode, because XDP uses strides
* of PAGE_SIZE on regular RQs.
*/
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
if (!is_linear) {
if (!params->xdp_prog->aux->xdp_has_frags) {
netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
}
return true;
......
......@@ -1982,36 +1982,51 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct skb_shared_info *sinfo;
struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
u16 linear_data_len;
dma_addr_t addr;
u16 linear_hr;
void *va;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
return NULL;
}
va = skb->head;
net_prefetchw(skb->data);
prog = rcu_dereference(rq->xdp_prog);
frag_offset += headlen;
byte_cnt -= headlen;
linear_hr = skb_headroom(skb);
linear_data_len = headlen;
linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
if (unlikely(frag_offset >= PAGE_SIZE)) {
frag_page++;
frag_offset -= PAGE_SIZE;
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
net_prefetchw(page_address(frag_page->page) + frag_offset);
if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
va = page_address(wi->linear_page.page);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
} else {
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
return NULL;
}
skb_mark_for_recycle(skb);
va = skb->head;
net_prefetchw(va); /* xdp_frame data area */
net_prefetchw(skb->data);
frag_offset += headlen;
byte_cnt -= headlen;
linear_hr = skb_headroom(skb);
linear_data_len = headlen;
linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
if (unlikely(frag_offset >= PAGE_SIZE)) {
frag_page++;
frag_offset -= PAGE_SIZE;
}
}
skb_mark_for_recycle(skb);
mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
net_prefetch(mxbuf.xdp.data);
sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
......@@ -2030,25 +2045,71 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
frag_offset = 0;
frag_page++;
}
if (xdp_buff_has_frags(&mxbuf.xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
if (prog) {
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
for (i = 0; i < sinfo->nr_frags; i++)
/* non-atomic */
__set_bit(page_idx + i, wi->skip_release_bitmap);
return NULL;
}
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
linear_frame_sz,
mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
}
pagep = frag_page - sinfo->nr_frags;
do
pagep->frags++;
while (++pagep < frag_page);
}
/* copy header */
addr = page_pool_get_dma_addr(head_page->page);
mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
skb_mark_for_recycle(skb);
wi->linear_page.frags++;
mlx5e_page_release_fragmented(rq, &wi->linear_page);
if (xdp_buff_has_frags(&mxbuf.xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, frag_page - head_page,
sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
pagep = head_page;
do
pagep->frags++;
while (++pagep < frag_page);
}
__pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
if (xdp_buff_has_frags(&mxbuf.xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
pagep = frag_page - sinfo->nr_frags;
do
pagep->frags++;
while (++pagep < frag_page);
}
/* copy header */
addr = page_pool_get_dma_addr(head_page->page);
mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
}
return skb;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment