Commit 6f19e12f authored by David S. Miller's avatar David S. Miller

igb: flush when in xmit_more mode and under descriptor pressure

Mirror the changes made to ixgbe in commit 2367a173
("ixgbe: flush when in xmit_more mode and under descriptor pressure")
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2367a173
......@@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
struct net_device *netdev = tx_ring->netdev;
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
/* We need to check again in a case another CPU has just
* made room available.
*/
if (igb_desc_unused(tx_ring) < size)
return -EBUSY;
/* A reprieve! */
netif_wake_subqueue(netdev, tx_ring->queue_index);
u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring->tx_stats.restart_queue2++;
u64_stats_update_end(&tx_ring->tx_syncp2);
return 0;
}
static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(tx_ring, size);
}
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
......@@ -4915,7 +4950,10 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i;
if (!skb->xmit_more) {
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
......@@ -4942,41 +4980,6 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i;
}
static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
struct net_device *netdev = tx_ring->netdev;
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
/* We need to check again in a case another CPU has just
* made room available.
*/
if (igb_desc_unused(tx_ring) < size)
return -EBUSY;
/* A reprieve! */
netif_wake_subqueue(netdev, tx_ring->queue_index);
u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring->tx_stats.restart_queue2++;
u64_stats_update_end(&tx_ring->tx_syncp2);
return 0;
}
static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(tx_ring, size);
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
......@@ -5047,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
out_drop:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment