Commit 92bf2008 authored by Tino Reichardt's avatar Tino Reichardt Committed by David S. Miller

net: via-rhine: add BQL support

Add Byte Queue Limits (BQL) support to via-rhine driver.

[edumazet] tweaked patch and changed TX_RING_SIZE from 16 to 64
Signed-off-by: default avatarTino Reichardt <milky-kernel@mcmilk.de>
Tested-by: default avatarJamie Gloudon <jamie.gloudon@gmail.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 49e64dcd
...@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; ...@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32;
/* Operational parameters that are set at compile time. */ /* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for compile efficiency. /* Keep the ring sizes a power of two for compile efficiency.
The compiler will convert <unsigned>'%'<2^N> into a bit mask. * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
Making the Tx ring too large decreases the effectiveness of channel * Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority. * bonding and packet priority.
There are no ill effects from too-large receive rings. */ * With BQL support, we can increase TX ring safely.
#define TX_RING_SIZE 16 * There are no ill effects from too-large receive rings.
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ */
#define TX_RING_SIZE 64
#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
#define RX_RING_SIZE 64 #define RX_RING_SIZE 64
/* Operational parameters that usually are not changed. */ /* Operational parameters that usually are not changed. */
...@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) ...@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev)
} }
rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
netdev_reset_queue(dev);
} }
static void free_tbufs(struct net_device* dev) static void free_tbufs(struct net_device* dev)
...@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, ...@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
else else
rp->tx_ring[entry].tx_status = 0; rp->tx_ring[entry].tx_status = 0;
netdev_sent_queue(dev, skb->len);
/* lock eth irq */ /* lock eth irq */
wmb(); wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
...@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) ...@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev); struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent; struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct sk_buff *skb;
/* find and cleanup dirty tx descriptors */ /* find and cleanup dirty tx descriptors */
while (rp->dirty_tx != rp->cur_tx) { while (rp->dirty_tx != rp->cur_tx) {
...@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) ...@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev)
entry, txstatus); entry, txstatus);
if (txstatus & DescOwn) if (txstatus & DescOwn)
break; break;
skb = rp->tx_skbuff[entry];
if (txstatus & 0x8000) { if (txstatus & 0x8000) {
netif_dbg(rp, tx_done, dev, netif_dbg(rp, tx_done, dev,
"Transmit error, Tx status %08x\n", txstatus); "Transmit error, Tx status %08x\n", txstatus);
...@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) ...@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev)
(txstatus >> 3) & 0xF, txstatus & 0xF); (txstatus >> 3) & 0xF, txstatus & 0xF);
u64_stats_update_begin(&rp->tx_stats.syncp); u64_stats_update_begin(&rp->tx_stats.syncp);
rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; rp->tx_stats.bytes += skb->len;
rp->tx_stats.packets++; rp->tx_stats.packets++;
u64_stats_update_end(&rp->tx_stats.syncp); u64_stats_update_end(&rp->tx_stats.syncp);
} }
...@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) ...@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev)
if (rp->tx_skbuff_dma[entry]) { if (rp->tx_skbuff_dma[entry]) {
dma_unmap_single(hwdev, dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry], rp->tx_skbuff_dma[entry],
rp->tx_skbuff[entry]->len, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
dev_consume_skb_any(rp->tx_skbuff[entry]); bytes_compl += skb->len;
pkts_compl++;
dev_consume_skb_any(skb);
rp->tx_skbuff[entry] = NULL; rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE; entry = (++rp->dirty_tx) % TX_RING_SIZE;
} }
netdev_completed_queue(dev, pkts_compl, bytes_compl);
if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
netif_wake_queue(dev); netif_wake_queue(dev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment