Commit a752b2ed authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: xsk: Support XDP metadata on XSK RQs

Add support for XDP metadata on XSK RQs for cross-program
communication. The driver no longer calls xdp_set_data_meta_invalid and
copies the metadata to a newly allocated SKB on XDP_PASS.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent ddb7afee
...@@ -158,18 +158,24 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) ...@@ -158,18 +158,24 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
return wqe_bulk; return wqe_bulk;
} }
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
u32 cqe_bcnt)
{ {
u32 totallen = xdp->data_end - xdp->data_meta;
u32 metalen = xdp->data - xdp->data_meta;
struct sk_buff *skb; struct sk_buff *skb;
skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); skb = napi_alloc_skb(rq->cq.napi, totallen);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats->buff_alloc_err++; rq->stats->buff_alloc_err++;
return NULL; return NULL;
} }
skb_put_data(skb, data, cqe_bcnt); skb_put_data(skb, xdp->data_meta, totallen);
if (metalen) {
skb_metadata_set(skb, metalen);
__skb_pull(skb, metalen);
}
return skb; return skb;
} }
...@@ -197,7 +203,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -197,7 +203,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
WARN_ON_ONCE(head_offset); WARN_ON_ONCE(head_offset);
xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_set_size(xdp, cqe_bcnt);
xdp_set_data_meta_invalid(xdp);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data); net_prefetch(xdp->data);
...@@ -226,7 +231,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -226,7 +231,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned. * frame. On SKB allocation failure, NULL is returned.
*/ */
return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); return mlx5e_xsk_construct_skb(rq, xdp);
} }
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
...@@ -244,7 +249,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -244,7 +249,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
WARN_ON_ONCE(wi->offset); WARN_ON_ONCE(wi->offset);
xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_set_size(xdp, cqe_bcnt);
xdp_set_data_meta_invalid(xdp);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data); net_prefetch(xdp->data);
...@@ -256,5 +260,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -256,5 +260,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
* will be handled by mlx5e_free_rx_wqe. * will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned. * On SKB allocation failure, NULL is returned.
*/ */
return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data); return mlx5e_xsk_construct_skb(rq, xdp);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment