Commit ee9e4956 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by David S. Miller

net: mv643xx_eth: Limit the TSO segments and adjust stop/wake thresholds

Currently small MSS values may require too many TSO descriptors for
the default queue size. This commit prevents this situation by fixing
the maximum supported TSO number of segments to 100 and by setting a
minimum Tx queue size. The minimum Tx queue size is set so that at
least 2 worst-case skb can be accommodated.

In addition, the queue stop and wake thresholds values are adjusted
accordingly. The queue is stopped when there's room for only 1 worst-case
skb and waked when the number of descriptors is half that value.
Signed-off-by: default avatarEzequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dd11680d
...@@ -185,6 +185,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; ...@@ -185,6 +185,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define TSO_HEADER_SIZE 128 #define TSO_HEADER_SIZE 128
/* Max number of allowed TCP segments for software TSO */
#define MV643XX_MAX_TSO_SEGS 100
#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
/* /*
* RX/TX descriptors. * RX/TX descriptors.
*/ */
...@@ -348,6 +352,9 @@ struct tx_queue { ...@@ -348,6 +352,9 @@ struct tx_queue {
int tx_curr_desc; int tx_curr_desc;
int tx_used_desc; int tx_used_desc;
int tx_stop_threshold;
int tx_wake_threshold;
char *tso_hdrs; char *tso_hdrs;
dma_addr_t tso_hdrs_dma; dma_addr_t tso_hdrs_dma;
...@@ -497,7 +504,7 @@ static void txq_maybe_wake(struct tx_queue *txq) ...@@ -497,7 +504,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
if (netif_tx_queue_stopped(nq)) { if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id()); __netif_tx_lock(nq, smp_processor_id());
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) if (txq->tx_desc_count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq); netif_tx_wake_queue(nq);
__netif_tx_unlock(nq); __netif_tx_unlock(nq);
} }
...@@ -897,7 +904,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) ...@@ -897,7 +904,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
} }
} }
static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
struct net_device *dev)
{ {
struct mv643xx_eth_private *mp = txq_to_mp(txq); struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
...@@ -910,11 +918,15 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) ...@@ -910,11 +918,15 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
cmd_sts = 0; cmd_sts = 0;
l4i_chk = 0; l4i_chk = 0;
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
return -EBUSY;
}
ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
if (ret) { if (ret)
dev_kfree_skb_any(skb);
return ret; return ret;
}
cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
tx_index = txq->tx_curr_desc++; tx_index = txq->tx_curr_desc++;
...@@ -972,28 +984,17 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -972,28 +984,17 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
txq->tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
length = skb->len; length = skb->len;
if (skb_is_gso(skb)) if (skb_is_gso(skb))
ret = txq_submit_tso(txq, skb, dev); ret = txq_submit_tso(txq, skb, dev);
else else
ret = txq_submit_skb(txq, skb); ret = txq_submit_skb(txq, skb, dev);
if (!ret) { if (!ret) {
int entries_left;
txq->tx_bytes += length; txq->tx_bytes += length;
txq->tx_packets++; txq->tx_packets++;
entries_left = txq->tx_ring_size - txq->tx_desc_count; if (txq->tx_desc_count >= txq->tx_stop_threshold)
if (entries_left < MAX_SKB_FRAGS + 1)
netif_tx_stop_queue(nq); netif_tx_stop_queue(nq);
} else if (ret == -EBUSY) { } else if (ret == -EBUSY) {
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -1617,7 +1618,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) ...@@ -1617,7 +1618,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
return -EINVAL; return -EINVAL;
mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
mp->tx_ring_size, er->tx_pending);
if (netif_running(dev)) { if (netif_running(dev)) {
mv643xx_eth_stop(dev); mv643xx_eth_stop(dev);
...@@ -1993,6 +1998,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) ...@@ -1993,6 +1998,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_ring_size = mp->tx_ring_size; txq->tx_ring_size = mp->tx_ring_size;
/* A queue must always have room for at least one skb.
* Therefore, stop the queue when the free entries reaches
* the maximum number of descriptors per skb.
*/
txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
txq->tx_desc_count = 0; txq->tx_desc_count = 0;
txq->tx_curr_desc = 0; txq->tx_curr_desc = 0;
txq->tx_used_desc = 0; txq->tx_used_desc = 0;
...@@ -2852,6 +2864,7 @@ static void set_params(struct mv643xx_eth_private *mp, ...@@ -2852,6 +2864,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct mv643xx_eth_platform_data *pd) struct mv643xx_eth_platform_data *pd)
{ {
struct net_device *dev = mp->dev; struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
if (is_valid_ether_addr(pd->mac_addr)) if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
...@@ -2866,9 +2879,16 @@ static void set_params(struct mv643xx_eth_private *mp, ...@@ -2866,9 +2879,16 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->rxq_count = pd->rx_queue_count ? : 1; mp->rxq_count = pd->rx_queue_count ? : 1;
mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
if (pd->tx_queue_size) if (pd->tx_queue_size)
mp->tx_ring_size = pd->tx_queue_size; tx_ring_size = pd->tx_queue_size;
mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != tx_ring_size)
netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
mp->tx_ring_size, tx_ring_size);
mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_addr = pd->tx_sram_addr;
mp->tx_desc_sram_size = pd->tx_sram_size; mp->tx_desc_sram_size = pd->tx_sram_size;
...@@ -3095,6 +3115,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) ...@@ -3095,6 +3115,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features; dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT; dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment