Commit 3a68f19d authored by Ben Hutchings's avatar Ben Hutchings

sfc: Properly sync RX DMA buffer when it is not the last in the page

We may currently allocate two RX DMA buffers to a page, and only unmap
the page when the second is completed.  We do not sync the first RX
buffer to be completed; this can result in packet loss or corruption
if the last RX buffer completed in a NAPI poll is the first in a page
and is not DMA-coherent.  (In the middle of a NAPI poll, we will
handle the following RX completion and unmap the page *before* looking
at the content of the first buffer.)
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent eb970ff0
...@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
} }
static void efx_unmap_rx_buffer(struct efx_nic *efx, static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf,
unsigned int used_len)
{ {
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state; struct efx_rx_page_state *state;
...@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr, state->dma_addr,
efx_rx_buf_size(efx), efx_rx_buf_size(efx),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev,
rx_buf->dma_addr, used_len,
DMA_FROM_DEVICE);
} }
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
...@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, ...@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf)
{ {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf); efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf); efx_free_rx_buffer(rx_queue->efx, rx_buf);
} }
...@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out; goto out;
} }
/* Release card resources - assumes all RX buffers consumed in-order /* Release and/or sync DMA mapping - assumes all RX buffers
* per RX queue * consumed in-order per RX queue
*/ */
efx_unmap_rx_buffer(efx, rx_buf); efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by /* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it. * the time we look at it.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment