Commit 13c822f6 authored by John Crispin's avatar John Crispin Committed by David S. Miller

net: mediatek: fix stop and wakeup of queue

The driver supports 2 MACs. Both run on the same DMA ring. If we go
above/below the TX rings threshold value, we always need to wake/stop
the queue of both devices. Not doing to can cause TX stalls and packet
drops on one of the devices.
Signed-off-by: default avatarJohn Crispin <blogic@openwrt.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 13439eec
...@@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) ...@@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
return nfrags; return nfrags;
} }
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
netif_wake_queue(eth->netdev[i]);
}
}
static void mtk_stop_queue(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
netif_stop_queue(eth->netdev[i]);
}
}
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mtk_mac *mac = netdev_priv(dev); struct mtk_mac *mac = netdev_priv(dev);
...@@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_num = mtk_cal_txd_req(skb); tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev); mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev, netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n"); "Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop; goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
netif_stop_queue(dev); mtk_stop_queue(eth);
if (unlikely(atomic_read(&ring->free_count) > if (unlikely(atomic_read(&ring->free_count) >
ring->thresh)) ring->thresh))
netif_wake_queue(dev); mtk_wake_queue(eth);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) ...@@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
if (!total) if (!total)
return 0; return 0;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i] ||
unlikely(!netif_queue_stopped(eth->netdev[i])))
continue;
if (atomic_read(&ring->free_count) > ring->thresh) if (atomic_read(&ring->free_count) > ring->thresh)
netif_wake_queue(eth->netdev[i]); mtk_wake_queue(eth);
}
return total; return total;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment