Commit daf37b55 authored by Jon Cooper's avatar Jon Cooper Committed by David S. Miller

sfc: PIO:Restrict to 64bit arch and use 64-bit writes.

Fixes:ee45fd92
("sfc: Use TX PIO for sufficiently small packets")

The linux net driver uses memcpy_toio() in order to copy into
the PIO buffers.
Even on a 64bit machine this causes 32bit accesses to a write-
combined memory region.
There are hardware limitations that mean that only 64bit
naturally aligned accesses are safe in all cases.
Due to being write-combined memory region two 32bit accesses
may be coalesced to form a 64bit non 64bit aligned access.
Solution was to open-code the memory copy routines using pointers
and to only enable PIO for x86_64 machines.

Not tested on platforms other than x86_64 because this patch
disables the PIO feature on other platforms.
Compile-tested on x86 to ensure that works.

The WARN_ON_ONCE() code in the previous version of this patch
has been moved into the internal sfc debug driver as the
assertion was unnecessary in the upstream kernel code.

This bug fix applies to v3.13 and v3.14 stable branches.
Signed-off-by: default avatarShradha Shah <sshah@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5882a07c
...@@ -66,10 +66,17 @@ ...@@ -66,10 +66,17 @@
#define EFX_USE_QWORD_IO 1 #define EFX_USE_QWORD_IO 1
#endif #endif
/* Hardware issue requires that only 64-bit naturally aligned writes
* are seen by hardware. Its not strictly necessary to restrict to
* x86_64 arch, but done for safety since unusual write combining behaviour
* can break PIO.
*/
#ifdef CONFIG_X86_64
/* PIO is a win only if write-combining is possible */ /* PIO is a win only if write-combining is possible */
#ifdef ARCH_HAS_IOREMAP_WC #ifdef ARCH_HAS_IOREMAP_WC
#define EFX_USE_PIO 1 #define EFX_USE_PIO 1
#endif #endif
#endif
#ifdef EFX_USE_QWORD_IO #ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value, static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
......
...@@ -189,6 +189,18 @@ struct efx_short_copy_buffer { ...@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
u8 buf[L1_CACHE_BYTES]; u8 buf[L1_CACHE_BYTES];
}; };
/* Copy in explicit 64-bit writes. */
static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
{
u64 *src64 = src;
u64 __iomem *dest64 = dest;
size_t l64 = len / 8;
size_t i;
for (i = 0; i < l64; i++)
writeq(src64[i], &dest64[i]);
}
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer. * Advances piobuf pointer. Leaves additional data in the copy buffer.
*/ */
...@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, ...@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
{ {
int block_len = len & ~(sizeof(copy_buf->buf) - 1); int block_len = len & ~(sizeof(copy_buf->buf) - 1);
memcpy_toio(*piobuf, data, block_len); efx_memcpy_64(*piobuf, data, block_len);
*piobuf += block_len; *piobuf += block_len;
len -= block_len; len -= block_len;
...@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, ...@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
if (copy_buf->used < sizeof(copy_buf->buf)) if (copy_buf->used < sizeof(copy_buf->buf))
return; return;
memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
*piobuf += sizeof(copy_buf->buf); *piobuf += sizeof(copy_buf->buf);
data += copy_to_buf; data += copy_to_buf;
len -= copy_to_buf; len -= copy_to_buf;
...@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, ...@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
{ {
/* if there's anything in it, write the whole buffer, including junk */ /* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used) if (copy_buf->used)
memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
} }
/* Traverse skb structure and copy fragments in to PIO buffer. /* Traverse skb structure and copy fragments in to PIO buffer.
...@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
*/ */
BUILD_BUG_ON(L1_CACHE_BYTES > BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
memcpy_toio(tx_queue->piobuf, skb->data, efx_memcpy_64(tx_queue->piobuf, skb->data,
ALIGN(skb->len, L1_CACHE_BYTES)); ALIGN(skb->len, L1_CACHE_BYTES));
} }
EFX_POPULATE_QWORD_5(buffer->option, EFX_POPULATE_QWORD_5(buffer->option,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment