Commit 132857d9 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Use non-XSK page allocator in SHAMPO

The SHAMPO flow is not compatible with XSK, it can call the page pool
allocator directly to save a branch.

mlx5e_page_alloc is removed, as it's no longer used in any flow.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent cf544517
......@@ -293,16 +293,6 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u
return 0;
}
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
if (rq->xsk_pool) {
au->xsk = xsk_buff_alloc(rq->xsk_pool);
return likely(au->xsk) ? 0 : -ENOMEM;
} else {
return mlx5e_page_alloc_pool(rq, au);
}
}
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
......@@ -562,7 +552,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
if (!(header_offset & (PAGE_SIZE - 1))) {
union mlx5e_alloc_unit au;
err = mlx5e_page_alloc(rq, &au);
err = mlx5e_page_alloc_pool(rq, &au);
if (unlikely(err))
goto err_unmap;
page = dma_info->page = au.page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment