Commit a57d8c21 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by Jakub Kicinski

net: dsa: flush switchdev workqueue before tearing down CPU/DSA ports

Sometimes when unbinding the mv88e6xxx driver on Turris MOX, these error
messages appear:

mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete be:79:b4:9e:9e:96 vid 1 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete be:79:b4:9e:9e:96 vid 0 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 100 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 1 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 0 from fdb: -2

(and similarly for other ports)

What happens is that DSA has a policy "even if there are bugs, let's at
least not leak memory" and dsa_port_teardown() clears the dp->fdbs and
dp->mdbs lists, which are supposed to be empty.

But deleting that cleanup code, the warnings go away.

=> the FDB and MDB lists (used for refcounting on shared ports, aka CPU
and DSA ports) will eventually be empty, but are not empty by the time
we tear down those ports. Aka we are deleting them too soon.

The addresses that DSA complains about are host-trapped addresses: the
local addresses of the ports, and the MAC address of the bridge device.

The problem is that offloading those entries happens from a deferred
work item scheduled by the SWITCHDEV_FDB_DEL_TO_DEVICE handler, and this
races with the teardown of the CPU and DSA ports where the refcounting
is kept.

In fact, not only it races, but fundamentally speaking, if we iterate
through the port list linearly, we might end up tearing down the shared
ports even before we delete a DSA user port which has a bridge upper.

So as it turns out, we need to first tear down the user ports (and the
unused ones, for no better place of doing that), then the shared ports
(the CPU and DSA ports). In between, we need to ensure that all work
items scheduled by our switchdev handlers (which only run for user
ports, hence the reason why we tear them down first) have finished.

Fixes: 161ca59d ("net: dsa: reference count the MDB entries at the cross-chip notifier level")
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/20210914134726.2305133-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 301de697
...@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp) ...@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_USER; return dp->type == DSA_PORT_TYPE_USER;
} }
static inline bool dsa_port_is_unused(struct dsa_port *dp)
{
return dp->type == DSA_PORT_TYPE_UNUSED;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{ {
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
......
...@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work) ...@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
return queue_work(dsa_owq, work); return queue_work(dsa_owq, work);
} }
void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
int dsa_devlink_param_get(struct devlink *dl, u32 id, int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx) struct devlink_param_gset_ctx *ctx)
{ {
......
...@@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds) ...@@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->setup = false; ds->setup = false;
} }
/* First tear down the non-shared, then the shared ports. This ensures that
* all work items scheduled by our switchdev handlers for user ports have
* completed before we destroy the refcounting kept on the shared ports.
*/
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
dsa_port_teardown(dp);
dsa_flush_workqueue();
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
dsa_port_teardown(dp);
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{ {
struct dsa_port *dp; struct dsa_port *dp;
...@@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) ...@@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
return 0; return 0;
teardown: teardown:
list_for_each_entry(dp, &dst->ports, list) dsa_tree_teardown_ports(dst);
dsa_port_teardown(dp);
list_for_each_entry(dp, &dst->ports, list) dsa_tree_teardown_switches(dst);
dsa_switch_teardown(dp->ds);
return err; return err;
} }
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_port_teardown(dp);
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_master(struct dsa_switch_tree *dst) static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{ {
struct dsa_port *dp; struct dsa_port *dp;
...@@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) ...@@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_master(dst); dsa_tree_teardown_master(dst);
dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst); dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst); dsa_tree_teardown_cpu_ports(dst);
......
...@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops); ...@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf); const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work); bool dsa_schedule_work(struct work_struct *work);
void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops); const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops) static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment