Commit c519fe9a authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

bnxt: add dma mapping attributes

On the SPARC platform we need to use the DMA_ATTR_WEAK_ORDERING attribute
in our Rx path dma mapping in order to get the expected performance out
of the receive path.  Adding it to the Tx path has little effect, so
that's not a part of this patch.
Signed-off-by: default avatarShannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: default avatarTushar Dave <tushar.n.dave@oracle.com>
Reviewed-by: default avatarTom Saeger <tom.saeger@oracle.com>
Acked-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9efa27bf
...@@ -582,7 +582,8 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, ...@@ -582,7 +582,8 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page) if (!page)
return NULL; return NULL;
*mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir); *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) { if (dma_mapping_error(dev, *mapping)) {
__free_page(page); __free_page(page);
return NULL; return NULL;
...@@ -601,8 +602,9 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, ...@@ -601,8 +602,9 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data) if (!data)
return NULL; return NULL;
*mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset, *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
bp->rx_buf_use_size, bp->rx_dir); bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) { if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data); kfree(data);
...@@ -705,8 +707,9 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, ...@@ -705,8 +707,9 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return -ENOMEM; return -ENOMEM;
} }
mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE, mapping = dma_map_page_attrs(&pdev->dev, page, offset,
PCI_DMA_FROMDEVICE); BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, mapping)) { if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page); __free_page(page);
return -EIO; return -EIO;
...@@ -799,7 +802,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, ...@@ -799,7 +802,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL; return NULL;
} }
dma_addr -= bp->rx_dma_offset; dma_addr -= bp->rx_dma_offset;
dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir); dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload)) if (unlikely(!payload))
payload = eth_get_headlen(data_ptr, len); payload = eth_get_headlen(data_ptr, len);
...@@ -841,8 +845,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, ...@@ -841,8 +845,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
} }
skb = build_skb(data, 0); skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir); bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) { if (!skb) {
kfree(data); kfree(data);
return NULL; return NULL;
...@@ -909,8 +913,9 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, ...@@ -909,8 +913,9 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL; return NULL;
} }
dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE,
DMA_ATTR_WEAK_ORDERING);
skb->data_len += frag_len; skb->data_len += frag_len;
skb->len += frag_len; skb->len += frag_len;
...@@ -1329,8 +1334,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, ...@@ -1329,8 +1334,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping; tpa_info->mapping = new_mapping;
skb = build_skb(data, 0); skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_dir); bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (!skb) { if (!skb) {
kfree(data); kfree(data);
...@@ -1971,9 +1977,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) ...@@ -1971,9 +1977,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data) if (!data)
continue; continue;
dma_unmap_single(&pdev->dev, tpa_info->mapping, dma_unmap_single_attrs(&pdev->dev,
bp->rx_buf_use_size, tpa_info->mapping,
bp->rx_dir); bp->rx_buf_use_size,
bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL; tpa_info->data = NULL;
...@@ -1993,13 +2001,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) ...@@ -1993,13 +2001,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (BNXT_RX_PAGE_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset; mapping -= bp->rx_dma_offset;
dma_unmap_page(&pdev->dev, mapping, dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir); PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
__free_page(data); __free_page(data);
} else { } else {
dma_unmap_single(&pdev->dev, mapping, dma_unmap_single_attrs(&pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_buf_use_size,
bp->rx_dir); bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
kfree(data); kfree(data);
} }
} }
...@@ -2012,8 +2022,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) ...@@ -2012,8 +2022,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page) if (!page)
continue; continue;
dma_unmap_page(&pdev->dev, rx_agg_buf->mapping, dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL; rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap); __clear_bit(j, rxr->rx_agg_bmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment