Commit cfbfd86b authored by Lendacky, Thomas's avatar Lendacky, Thomas Committed by David S. Miller

amd-xgbe: Fix DMA API debug warning

When running a kernel configured with CONFIG_DMA_API_DEBUG=y a warning
is issued:
  DMA-API: device driver tries to sync DMA memory it has not allocated

This warning is the result of mapping the full range of the Rx buffer
pages allocated and then performing a dma_sync_single_for_cpu against
a calculated DMA address. The proper thing to do is to use the
dma_sync_single_range_for_cpu with a base DMA address and an offset.
Reported-by: default avatarKim Phillips <kim.phillips@arm.com>
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Tested-by: default avatarKim Phillips <kim.phillips@arm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95ec655b
...@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, ...@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
get_page(pa->pages); get_page(pa->pages);
bd->pa = *pa; bd->pa = *pa;
bd->dma = pa->pages_dma + pa->pages_offset; bd->dma_base = pa->pages_dma;
bd->dma_off = pa->pages_offset;
bd->dma_len = len; bd->dma_len = len;
pa->pages_offset += len; pa->pages_offset += len;
......
...@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, ...@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
unsigned int rx_usecs = pdata->rx_usecs; unsigned int rx_usecs = pdata->rx_usecs;
unsigned int rx_frames = pdata->rx_frames; unsigned int rx_frames = pdata->rx_frames;
unsigned int inte; unsigned int inte;
dma_addr_t hdr_dma, buf_dma;
if (!rx_usecs && !rx_frames) { if (!rx_usecs && !rx_frames) {
/* No coalescing, interrupt for every descriptor */ /* No coalescing, interrupt for every descriptor */
...@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, ...@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and * Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE * set control bits OWN and INTE
*/ */
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
......
...@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, ...@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
/* Start with the header buffer which may contain just the header /* Start with the header buffer which may contain just the header
* or the header plus data * or the header plus data
*/ */
dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); rdata->rx.hdr.dma_off,
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) + packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset; rdata->rx.hdr.pa.pages_offset;
...@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, ...@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
len -= copy_len; len -= copy_len;
if (len) { if (len) {
/* Add the remaining data as a frag */ /* Add the remaining data as a frag */
dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_len, DMA_FROM_DEVICE); rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages, rdata->rx.buf.pa.pages,
...@@ -1945,8 +1949,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -1945,8 +1949,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!skb) if (!skb)
error = 1; error = 1;
} else if (rdesc_len) { } else if (rdesc_len) {
dma_sync_single_for_cpu(pdata->dev, dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma, rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len, rdata->rx.buf.dma_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
......
...@@ -337,7 +337,8 @@ struct xgbe_buffer_data { ...@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
struct xgbe_page_alloc pa; struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap; struct xgbe_page_alloc pa_unmap;
dma_addr_t dma; dma_addr_t dma_base;
unsigned long dma_off;
unsigned int dma_len; unsigned int dma_len;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment