Commit 10c51b56 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

net: add skb_get_tx_queue() helper

Replace occurences of skb_get_queue_mapping() and follow-up
netdev_get_tx_queue() with an actual helper function.
Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3bf5c42
...@@ -1747,6 +1747,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, ...@@ -1747,6 +1747,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
return &dev->_tx[index]; return &dev->_tx[index];
} }
static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
const struct sk_buff *skb)
{
return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
}
static inline void netdev_for_each_tx_queue(struct net_device *dev, static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *, void (*f)(struct net_device *,
struct netdev_queue *, struct netdev_queue *,
......
...@@ -115,7 +115,7 @@ static void queue_process(struct work_struct *work) ...@@ -115,7 +115,7 @@ static void queue_process(struct work_struct *work)
continue; continue;
} }
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); txq = skb_get_tx_queue(dev, skb);
local_irq_save(flags); local_irq_save(flags);
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
......
...@@ -3286,7 +3286,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3286,7 +3286,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{ {
struct net_device *odev = pkt_dev->odev; struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq; struct netdev_queue *txq;
u16 queue_map;
int ret; int ret;
/* If device is offline, then don't send */ /* If device is offline, then don't send */
...@@ -3324,8 +3323,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3324,8 +3323,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->delay && pkt_dev->last_ok) if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx); spin(pkt_dev, pkt_dev->next_tx);
queue_map = skb_get_queue_mapping(pkt_dev->skb); txq = skb_get_tx_queue(odev, pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
local_bh_disable(); local_bh_disable();
......
...@@ -243,7 +243,6 @@ static int packet_direct_xmit(struct sk_buff *skb) ...@@ -243,7 +243,6 @@ static int packet_direct_xmit(struct sk_buff *skb)
netdev_features_t features; netdev_features_t features;
struct netdev_queue *txq; struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY; int ret = NETDEV_TX_BUSY;
u16 queue_map;
if (unlikely(!netif_running(dev) || if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev))) !netif_carrier_ok(dev)))
...@@ -254,8 +253,7 @@ static int packet_direct_xmit(struct sk_buff *skb) ...@@ -254,8 +253,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
__skb_linearize(skb)) __skb_linearize(skb))
goto drop; goto drop;
queue_map = skb_get_queue_mapping(skb); txq = skb_get_tx_queue(dev, skb);
txq = netdev_get_tx_queue(dev, queue_map);
local_bh_disable(); local_bh_disable();
......
...@@ -63,7 +63,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) ...@@ -63,7 +63,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
if (unlikely(skb)) { if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */ /* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb)); txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) { if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL; q->gso_skb = NULL;
q->q.qlen--; q->q.qlen--;
...@@ -183,10 +183,12 @@ static inline int qdisc_restart(struct Qdisc *q) ...@@ -183,10 +183,12 @@ static inline int qdisc_restart(struct Qdisc *q)
skb = dequeue_skb(q); skb = dequeue_skb(q);
if (unlikely(!skb)) if (unlikely(!skb))
return 0; return 0;
WARN_ON_ONCE(skb_dst_is_noref(skb)); WARN_ON_ONCE(skb_dst_is_noref(skb));
root_lock = qdisc_lock(q); root_lock = qdisc_lock(q);
dev = qdisc_dev(q); dev = qdisc_dev(q);
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); txq = skb_get_tx_queue(dev, skb);
return sch_direct_xmit(skb, q, dev, txq, root_lock); return sch_direct_xmit(skb, q, dev, txq, root_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment