Commit 7bbde83b authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

net: sched: drop qdisc_reset from dev_graft_qdisc

In qdisc_graft_qdisc a "new" qdisc is attached and the 'qdisc_destroy'
operation is called on the old qdisc. The destroy operation will wait
a rcu grace period and call qdisc_rcu_free(). At which point
gso_cpu_skb is free'd along with all stats so no need to zero stats
and gso_cpu_skb from the graft operation itself.

Further after dropping the qdisc locks we can not continue to call
qdisc_reset before waiting an rcu grace period so that the qdisc is
detached from all cpus. By removing the qdisc_reset() here we get
the correct property of waiting an rcu grace period and letting the
qdisc_destroy operation clean up the qdisc correctly.

Note, a refcnt greater than 1 would cause the destroy operation to
be aborted however if this ever happened the reference to the qdisc
would be lost and we would have a memory leak.
Signed-off-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a53851e2
...@@ -819,10 +819,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, ...@@ -819,10 +819,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
root_lock = qdisc_lock(oqdisc); root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock); spin_lock_bh(root_lock);
/* Prune old scheduler */
if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
qdisc_reset(oqdisc);
/* ... and graft new one */ /* ... and graft new one */
if (qdisc == NULL) if (qdisc == NULL)
qdisc = &noop_qdisc; qdisc = &noop_qdisc;
...@@ -977,6 +973,16 @@ static bool some_qdisc_is_busy(struct net_device *dev) ...@@ -977,6 +973,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
return false; return false;
} }
static void dev_qdisc_reset(struct net_device *dev,
struct netdev_queue *dev_queue,
void *none)
{
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
if (qdisc)
qdisc_reset(qdisc);
}
/** /**
* dev_deactivate_many - deactivate transmissions on several devices * dev_deactivate_many - deactivate transmissions on several devices
* @head: list of devices to deactivate * @head: list of devices to deactivate
...@@ -987,7 +993,6 @@ static bool some_qdisc_is_busy(struct net_device *dev) ...@@ -987,7 +993,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
void dev_deactivate_many(struct list_head *head) void dev_deactivate_many(struct list_head *head)
{ {
struct net_device *dev; struct net_device *dev;
bool sync_needed = false;
list_for_each_entry(dev, head, close_list) { list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue, netdev_for_each_tx_queue(dev, dev_deactivate_queue,
...@@ -997,20 +1002,25 @@ void dev_deactivate_many(struct list_head *head) ...@@ -997,20 +1002,25 @@ void dev_deactivate_many(struct list_head *head)
&noop_qdisc); &noop_qdisc);
dev_watchdog_down(dev); dev_watchdog_down(dev);
sync_needed |= !dev->dismantle;
} }
/* Wait for outstanding qdisc-less dev_queue_xmit calls. /* Wait for outstanding qdisc-less dev_queue_xmit calls.
* This is avoided if all devices are in dismantle phase : * This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us * Caller will call synchronize_net() for us
*/ */
if (sync_needed) synchronize_net();
synchronize_net();
/* Wait for outstanding qdisc_run calls. */ /* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev)) while (some_qdisc_is_busy(dev))
yield(); yield();
/* The new qdisc is assigned at this point so we can safely
* unwind stale skb lists and qdisc statistics
*/
netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
if (dev_ingress_queue(dev))
dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
}
} }
void dev_deactivate(struct net_device *dev) void dev_deactivate(struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment