Commit 18187fb2 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Code movements in RX UMR WQE post

Gets the process of a UMR WQE post in one function,
in preparation for a downstream patch that inlines
the WQE data.
No functional change here.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 73281b78
...@@ -347,39 +347,44 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, ...@@ -347,39 +347,44 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
} }
} }
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
struct mlx5_wq_cyc *wq = &sq->wq; int i;
struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
u16 pi;
/* fill sq edge with nops to avoid wqe wrap around */ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; mlx5e_page_release(rq, dma_info, true);
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
} }
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi); static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); {
wqe->ctrl.opmod_idx_opcode = struct mlx5_wq_ll *wq = &rq->wq;
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
MLX5_OPCODE_UMR);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; rq->mpwqe.umr_in_progress = false;
sq->pc += num_wqebbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
mlx5_wq_ll_update_db_record(wq);
} }
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
int pg_strides = mlx5e_mpwqe_strides_per_page(rq); int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
int err; int err;
u16 pi;
int i; int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
...@@ -393,6 +398,24 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -393,6 +398,24 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE);
wi->consumed_strides = 0; wi->consumed_strides = 0;
rq->mpwqe.umr_in_progress = true;
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += num_wqebbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
return 0; return 0;
err_unmap: err_unmap:
...@@ -401,51 +424,11 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -401,51 +424,11 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
page_ref_sub(dma_info->page, pg_strides); page_ref_sub(dma_info->page, pg_strides);
mlx5e_page_release(rq, dma_info, true); mlx5e_page_release(rq, dma_info, true);
} }
rq->stats.buff_alloc_err++;
return err; return err;
} }
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{
int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
mlx5e_page_release(rq, dma_info, true);
}
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
rq->mpwqe.umr_in_progress = false;
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
mlx5_wq_ll_update_db_record(wq);
}
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
int err;
err = mlx5e_alloc_rx_umr_mpwqe(rq, ix);
if (unlikely(err)) {
rq->stats.buff_alloc_err++;
return err;
}
rq->mpwqe.umr_in_progress = true;
mlx5e_post_umr_wqe(rq, ix);
return 0;
}
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment