Commit 0b7cfa40 authored by Aya Levin's avatar Aya Levin Committed by Saeed Mahameed

net/mlx5e: Fix page DMA map/unmap attributes

Driver initiates DMA sync, hence it may skip CPU sync. Add
DMA_ATTR_SKIP_CPU_SYNC as input attribute both to dma_map_page and
dma_unmap_page to avoid redundant sync with the CPU.
When forcing the device to work with SWIOTLB, the extra sync might cause
data corruption. The driver unmaps the whole page while the hardware
used just a part of the bounce buffer. So syncing overrides the entire
page with bounce buffer that only partially contains real data.

Fixes: bc77b240 ("net/mlx5e: Add fragmented memory support for RX multi packet WQE")
Fixes: db05815b ("net/mlx5e: Add XSK zero-copy support")
Signed-off-by: default avatarAya Levin <ayal@nvidia.com>
Reviewed-by: default avatarGal Pressman <gal@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 36595d8a
...@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, ...@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
{ {
struct device *dev = mlx5_core_dma_dev(priv->mdev); struct device *dev = mlx5_core_dma_dev(priv->mdev);
return xsk_pool_dma_map(pool, dev, 0); return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
} }
static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv, static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool) struct xsk_buff_pool *pool)
{ {
return xsk_pool_dma_unmap(pool, 0); return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
} }
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
......
...@@ -278,8 +278,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, ...@@ -278,8 +278,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
if (unlikely(!dma_info->page)) if (unlikely(!dma_info->page))
return -ENOMEM; return -ENOMEM;
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
PAGE_SIZE, rq->buff.map_dir); rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
page_pool_recycle_direct(rq->page_pool, dma_info->page); page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL; dma_info->page = NULL;
...@@ -300,7 +300,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, ...@@ -300,7 +300,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{ {
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC);
} }
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment