Commit 578fcfd2 authored by Somnath Kotur's avatar Somnath Kotur Committed by Jakub Kicinski

bnxt_en: Let the page pool manage the DMA mapping

Use the page pool's ability to maintain DMA mappings for us.
This avoids re-mapping of the recycled pages.

Link: https://lore.kernel.org/netdev/20230728231829.235716-4-michael.chan@broadcom.com/Signed-off-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230817231911.165035-3-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 86b05508
...@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, ...@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
unsigned int *offset, unsigned int *offset,
gfp_t gfp) gfp_t gfp)
{ {
struct device *dev = &bp->pdev->dev;
struct page *page; struct page *page;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
...@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, ...@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page) if (!page)
return NULL; return NULL;
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE, *mapping = page_pool_get_dma_addr(page) + *offset;
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
return page; return page;
} }
...@@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, ...@@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL; return NULL;
} }
dma_addr -= bp->rx_dma_offset; dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING); bp->rx_dir);
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) { if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page); page_pool_recycle_direct(rxr->page_pool, page);
...@@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, ...@@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL; return NULL;
} }
dma_addr -= bp->rx_dma_offset; dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING); bp->rx_dir);
if (unlikely(!payload)) if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len); payload = eth_get_headlen(bp->dev, data_ptr, len);
...@@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, ...@@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0; return 0;
} }
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
bp->rx_dir, bp->rx_dir);
DMA_ATTR_WEAK_ORDERING);
total_frag_len += frag_len; total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod); prod = NEXT_RX_AGG(prod);
...@@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) ...@@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
rx_buf->data = NULL; rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data); page_pool_recycle_direct(rxr->page_pool, data);
} else { } else {
dma_unmap_single_attrs(&pdev->dev, mapping, dma_unmap_single_attrs(&pdev->dev, mapping,
...@@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) ...@@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
if (!page) if (!page)
continue; continue;
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL; rx_agg_buf->page = NULL;
__clear_bit(i, rxr->rx_agg_bmap); __clear_bit(i, rxr->rx_agg_bmap);
...@@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, ...@@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.nid = dev_to_node(&bp->pdev->dev); pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi; pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev; pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL; pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG; pp.flags |= PP_FLAG_PAGE_FRAG;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment