Commit 4bd97d51 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

net: dev: rename queue selection helpers.

With the following patches, we are going to use __netdev_pick_tx() in
many modules. Rename it to netdev_pick_tx(), to make it clear is
a public API.

Also rename the existing netdev_pick_tx() to netdev_core_pick_tx(),
to avoid name clashes.
Suggested-by: default avatarEric Dumazet <edumazet@google.com>
Suggested-by: default avatarDavid Miller <davem@davemloft.net>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0b963ef2
...@@ -2152,9 +2152,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, ...@@ -2152,9 +2152,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
&qdisc_xmit_lock_key); \ &qdisc_xmit_lock_key); \
} }
struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev); struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account /* returns the headroom that the master device needs to take in account
* when forwarding to this dev * when forwarding to this dev
......
...@@ -3704,8 +3704,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, ...@@ -3704,8 +3704,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
} }
EXPORT_SYMBOL(dev_pick_tx_cpu_id); EXPORT_SYMBOL(dev_pick_tx_cpu_id);
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, static u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev) struct net_device *sb_dev)
{ {
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk); int queue_index = sk_tx_queue_get(sk);
...@@ -3730,9 +3730,9 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, ...@@ -3730,9 +3730,9 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index; return queue_index;
} }
struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev) struct net_device *sb_dev)
{ {
int queue_index = 0; int queue_index = 0;
...@@ -3748,9 +3748,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, ...@@ -3748,9 +3748,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (ops->ndo_select_queue) if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, sb_dev, queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx); netdev_pick_tx);
else else
queue_index = __netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index); queue_index = netdev_cap_txqueue(dev, queue_index);
} }
...@@ -3824,7 +3824,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) ...@@ -3824,7 +3824,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
else else
skb_dst_force(skb); skb_dst_force(skb);
txq = netdev_pick_tx(dev, skb, sb_dev); txq = netdev_core_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc); q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb); trace_net_dev_queue(skb);
...@@ -4429,7 +4429,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) ...@@ -4429,7 +4429,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
bool free_skb = true; bool free_skb = true;
int cpu, rc; int cpu, rc;
txq = netdev_pick_tx(dev, skb, NULL); txq = netdev_core_pick_tx(dev, skb, NULL);
cpu = smp_processor_id(); cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu); HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) { if (!netif_xmit_stopped(txq)) {
......
...@@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, ...@@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq; struct netdev_queue *txq;
txq = netdev_pick_tx(dev, skb, NULL); txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */ /* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
......
...@@ -247,7 +247,7 @@ void xfrm_dev_resume(struct sk_buff *skb) ...@@ -247,7 +247,7 @@ void xfrm_dev_resume(struct sk_buff *skb)
unsigned long flags; unsigned long flags;
rcu_read_lock(); rcu_read_lock();
txq = netdev_pick_tx(dev, skb, NULL); txq = netdev_core_pick_tx(dev, skb, NULL);
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq)) if (!netif_xmit_frozen_or_stopped(txq))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment