Commit 1feeab80 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: XDP, Add array for WQE info descriptors

Each xdp_wqe_info instance describes the number of data-segments
and WQEBBs of the WQE.
This is useful for a downstream patch that adds support for
Multi-Packet TX WQE feature.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent fea28dd6
......@@ -411,6 +411,11 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
u8 num_ds;
};
struct mlx5e_xdpsq {
/* data path */
......@@ -430,6 +435,7 @@ struct mlx5e_xdpsq {
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
struct {
struct mlx5e_xdp_wqe_info *wqe_info;
struct mlx5e_xdp_info_fifo xdpi_fifo;
} db;
void __iomem *uar_map;
......
......@@ -199,19 +199,27 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
get_cqe_opcode(cqe));
do {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
struct mlx5e_xdp_wqe_info *wi;
u16 ci, j;
last_wqe = (sqcc == wqe_counter);
sqcc++;
if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, true);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
for (j = 0; j < wi->num_ds; j++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, true);
}
}
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
......@@ -233,18 +241,26 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
bool is_redirect = !rq;
while (sq->cc != sq->pc) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
struct mlx5e_xdp_wqe_info *wi;
u16 ci, i;
sq->cc++;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false);
sq->cc += wi->num_wqebbs;
for (i = 0; i < wi->num_ds; i++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false);
}
}
}
}
......
......@@ -993,6 +993,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi_fifo.xi);
kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
......@@ -1015,8 +1016,14 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int err;
sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
err = mlx5e_alloc_xdpsq_fifo(sq, numa);
if (err) {
mlx5e_free_xdpsq_db(sq);
......@@ -1606,6 +1613,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
......@@ -1616,6 +1624,9 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
wi->num_ds = 1;
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment