Commit a1f2d60a authored by David S. Miller's avatar David S. Miller

Merge branch 'r8169-xmit_more'

Heiner Kallweit says:

====================
r8169: make use of xmit_more and __netdev_sent_queue

This series adds helper __netdev_sent_queue to the core and makes use
of it in the r8169 driver.

Heiner Kallweit (2):
  net: core: add __netdev_sent_queue as variant of __netdev_tx_sent_queue
  r8169: make use of xmit_more and __netdev_sent_queue

v2:
- fix minor style issue
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 358be656 2e6eedb4
...@@ -6069,6 +6069,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -6069,6 +6069,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp); struct device *d = tp_to_dev(tp);
dma_addr_t mapping; dma_addr_t mapping;
u32 opts[2], len; u32 opts[2], len;
bool stop_queue;
int frags; int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
...@@ -6110,8 +6111,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -6110,8 +6111,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]); txd->opts2 = cpu_to_le32(opts[1]);
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */ /* Force memory writes to complete before releasing descriptor */
...@@ -6124,16 +6123,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -6124,16 +6123,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1; tp->cur_tx += frags + 1;
RTL_W8(tp, TxPoll, NPQ); stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
if (unlikely(stop_queue))
netif_stop_queue(dev);
mmiowb(); if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) {
RTL_W8(tp, TxPoll, NPQ);
mmiowb();
}
if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
/* Sync with rtl_tx: /* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier) * - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier). * - refresh dirty_tx ring index (read barrier).
......
...@@ -3226,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) ...@@ -3226,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
} }
static inline bool __netdev_sent_queue(struct net_device *dev,
unsigned int bytes,
bool xmit_more)
{
return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
xmit_more);
}
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes) unsigned int pkts, unsigned int bytes)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment