Commit 26c5f03b authored by Feng Tang's avatar Feng Tang Committed by David S. Miller

net: alx: use custom skb allocator

This patch follows Eric Dumazet's commit 7b701764 for Atheros
atl1c driver to fix one exactly same bug in alx driver, that the
network link will be lost in 1-5 minutes after the device is up.

My laptop Lenovo Y580 with Atheros AR8161 ethernet device hit the
same problem with kernel 4.4, and it will be cured by Jarod Wilson's
commit c406700c for alx driver which get merged in 4.5. But there
are still some alx devices can't function well even with Jarod's
patch, while this patch could make them work fine. More details on
	https://bugzilla.kernel.org/show_bug.cgi?id=70761

The debug shows the issue is very likely to be related with the RX
DMA address, specifically 0x...f80, if RX buffer get 0x...f80 several
times, their will be RX overflow error and device will stop working.

For kernel 4.5.0 with Jarod's patch which works fine with my
AR8161/Lennov Y580, if I made some change to the
	__netdev_alloc_skb
		--> __alloc_page_frag()
to make the allocated buffer can get an address with 0x...f80,
then the same error happens. If I make it to 0x...f40 or 0x....fc0,
everything will be still fine. So I tend to believe that the
0x..f80 address cause the silicon to behave abnormally.

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=70761
Cc: Eric Dumazet <edumazet@google.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Jarod Wilson <jarod@redhat.com>
Signed-off-by: default avatarFeng Tang <feng.tang@intel.com>
Tested-by: default avatarOle Lukoie <olelukoie@mail.ru>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f6988cb6
......@@ -96,6 +96,10 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
struct page *rx_page;
unsigned int rx_page_offset;
unsigned int rx_frag_size;
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
......
......@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
{
struct sk_buff *skb;
struct page *page;
if (alx->rx_frag_size > PAGE_SIZE)
return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
page = alx->rx_page;
if (!page) {
alx->rx_page = page = alloc_page(gfp);
if (unlikely(!page))
return NULL;
alx->rx_page_offset = 0;
}
skb = build_skb(page_address(page) + alx->rx_page_offset,
alx->rx_frag_size);
if (likely(skb)) {
alx->rx_page_offset += alx->rx_frag_size;
if (alx->rx_page_offset >= PAGE_SIZE)
alx->rx_page = NULL;
else
get_page(page);
}
return skb;
}
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
......@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
skb = alx_alloc_skb(alx, gfp);
if (!skb)
break;
dma = dma_map_single(&alx->hw.pdev->dev,
......@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
return count;
}
......@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
if (alx->rx_page) {
put_page(alx->rx_page);
alx->rx_page = NULL;
}
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
......@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
......@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
......@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
alx->rx_frag_size = roundup_pow_of_two(head_size);
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
......@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
......@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment