Commit 1c6d3d1d authored by Ben Hutchings's avatar Ben Hutchings Committed by Greg Kroah-Hartman

sfc: Fix memory leak when discarding scattered packets

[ Upstream commit 734d4e15 ]

Commit 2768935a ('sfc: reuse pages to avoid DMA mapping/unmapping
costs') did not fully take account of DMA scattering which was
introduced immediately before.  If a received packet is invalid and
must be discarded, we only drop a reference to the first buffer's
page, but we need to drop a reference for each buffer the packet
used.

I think this bug was missed partly because efx_recycle_rx_buffers()
was not renamed and so no longer does what its name says.  It does not
change the state of buffers, but only prepares the underlying pages
for recycling.  Rename it accordingly.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c3a54912
...@@ -282,7 +282,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, ...@@ -282,7 +282,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
} }
/* Recycle the pages that are used by buffers that have just been received. */ /* Recycle the pages that are used by buffers that have just been received. */
static void efx_recycle_rx_buffers(struct efx_channel *channel, static void efx_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
unsigned int n_frags) unsigned int n_frags)
{ {
...@@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel, ...@@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel,
} while (--n_frags); } while (--n_frags);
} }
static void efx_discard_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
efx_recycle_rx_pages(channel, rx_buf, n_frags);
do {
efx_free_rx_buffer(rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
}
/** /**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly * efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue * @rx_queue: RX descriptor queue
...@@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
put_page(rx_buf->page); efx_discard_rx_packet(channel, rx_buf, n_frags);
efx_recycle_rx_buffers(channel, rx_buf, n_frags);
return; return;
} }
...@@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
} }
/* All fragments have been DMA-synced, so recycle buffers and pages. */ /* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index);
efx_recycle_rx_buffers(channel, rx_buf, n_frags); efx_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be /* Pipeline receives so that we give time for packet headers to be
* prefetched into cache. * prefetched into cache.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment