Commit 980f1404 authored by Lino Sanfilippo's avatar Lino Sanfilippo Committed by David S. Miller

net: ethernet: sxgbe: remove private tx queue lock

The driver uses a private lock for synchronization of the xmit function and
the xmit completion handler, but since the NETIF_F_LLTX flag is not set,
the xmit function is also called with the xmit_lock held.

On the other hand the completion handler uses the reverse locking order by
first taking the private lock and (in case that the tx queue had been
stopped) then the xmit_lock.

Improve the locking by removing the private lock and using only the
xmit_lock for synchronization instead.
Signed-off-by: default avatarLino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c280b482
......@@ -384,7 +384,6 @@ struct sxgbe_tx_queue {
dma_addr_t *tx_skbuff_dma;
struct sk_buff **tx_skbuff;
struct timer_list txtimer;
spinlock_t tx_lock; /* lock for tx queues */
unsigned int cur_tx;
unsigned int dirty_tx;
u32 tx_count_frames;
......
......@@ -426,9 +426,6 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
tx_ring->dirty_tx = 0;
tx_ring->cur_tx = 0;
/* initialise TX queue lock */
spin_lock_init(&tx_ring->tx_lock);
return 0;
dmamem_err:
......@@ -743,7 +740,7 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
spin_lock(&tqueue->tx_lock);
__netif_tx_lock(dev_txq, smp_processor_id());
priv->xstats.tx_clean++;
while (tqueue->dirty_tx != tqueue->cur_tx) {
......@@ -781,18 +778,13 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
/* wake up queue */
if (unlikely(netif_tx_queue_stopped(dev_txq) &&
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
netif_tx_lock(priv->dev);
if (netif_tx_queue_stopped(dev_txq) &&
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
if (netif_msg_tx_done(priv))
pr_debug("%s: restart transmit\n", __func__);
netif_tx_wake_queue(dev_txq);
}
netif_tx_unlock(priv->dev);
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
if (netif_msg_tx_done(priv))
pr_debug("%s: restart transmit\n", __func__);
netif_tx_wake_queue(dev_txq);
}
spin_unlock(&tqueue->tx_lock);
__netif_tx_unlock(dev_txq);
}
/**
......@@ -1304,9 +1296,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
tqueue->hwts_tx_en)))
ctxt_desc_req = 1;
/* get the spinlock */
spin_lock(&tqueue->tx_lock);
if (priv->tx_path_in_lpi_mode)
sxgbe_disable_eee_mode(priv);
......@@ -1316,8 +1305,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
__func__, txq_index);
}
/* release the spin lock in case of BUSY */
spin_unlock(&tqueue->tx_lock);
return NETDEV_TX_BUSY;
}
......@@ -1436,8 +1423,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
spin_unlock(&tqueue->tx_lock);
return NETDEV_TX_OK;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment