Commit d96febed authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller

net: stmmac: arrange Tx tail pointer update to stmmac_flush_tx_descriptors

This patch organizes TX tail pointer update into a new function called
stmmac_flush_tx_descriptors() so that we can reuse it in stmmac_xmit(),
stmmac_tso_xmit() and up-coming XDP implementation.

Changes to v2:
 - Fix for warning: unused variable ‘desc_size’
   https://patchwork.hopto.org/static/nipa/457321/12170149/build_32bit/stderrSigned-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d08d32d1
...@@ -3518,6 +3518,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, ...@@ -3518,6 +3518,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
} }
} }
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int desc_size;
if (likely(priv->extend_desc))
desc_size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
}
/** /**
* stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
* @skb : the socket buffer * @skb : the socket buffer
...@@ -3549,10 +3571,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3549,10 +3571,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct dma_desc *desc, *first, *mss_desc = NULL; struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb); u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets; unsigned int first_entry, tx_packets;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic; bool has_vlan, set_ic;
u8 proto_hdr_len, hdr; u8 proto_hdr_len, hdr;
...@@ -3750,12 +3772,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3750,12 +3772,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_tx_owner(priv, mss_desc); stmmac_set_tx_owner(priv, mss_desc);
} }
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
...@@ -3766,13 +3782,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3766,13 +3782,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
if (tx_q->tbs & STMMAC_TBS_AVAIL) stmmac_flush_tx_descriptors(priv, queue);
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue); stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -3802,10 +3812,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3802,10 +3812,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type; int gso = skb_shinfo(skb)->gso_type;
struct dma_edesc *tbs_desc = NULL; struct dma_edesc *tbs_desc = NULL;
int entry, desc_size, first_tx;
struct dma_desc *desc, *first; struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic; bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des; dma_addr_t des;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->tx_queue[queue];
...@@ -4007,25 +4017,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4007,25 +4017,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_tx_owner(priv, first); stmmac_set_tx_owner(priv, first);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_enable_dma_transmission(priv, priv->ioaddr);
if (likely(priv->extend_desc)) stmmac_flush_tx_descriptors(priv, queue);
desc_size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue); stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment