Commit 52412f55 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'dsa-changes-for-multiple-cpu-ports-part-3'

Vladimir Oltean says:

====================
DSA changes for multiple CPU ports (part 3)

Those who have been following part 1:
https://patchwork.kernel.org/project/netdevbpf/cover/20220511095020.562461-1-vladimir.oltean@nxp.com/
and part 2:
https://patchwork.kernel.org/project/netdevbpf/cover/20220521213743.2735445-1-vladimir.oltean@nxp.com/
will know that I am trying to enable the second internal port pair from
the NXP LS1028A Felix switch for DSA-tagged traffic via "ocelot-8021q".
This series represents part 3 of that effort.

Covered here are some preparations in DSA for handling multiple DSA
masters:
- when changing the tagging protocol via sysfs
- when the masters go down
as well as preparation for monitoring the upper devices of a DSA master
(to support DSA masters under a LAG).

There are also 2 small preparations for the ocelot driver, for the case
where multiple tag_8021q CPU ports are used in a LAG. Both those changes
have to do with PGID forwarding domains.

Compared to v1, the patches were trimmed down to just another
preparation stage, and the UAPI changes were pushed further out to part 4.
https://patchwork.kernel.org/project/netdevbpf/cover/20220523104256.3556016-1-olteanv@gmail.com/

Compared to v2, I had to export a symbol I forgot to
(ocelot_port_teardown_dsa_8021q_cpu), to avoid a build breakage when the
felix and seville drivers are built as modules.
====================

Link: https://lore.kernel.org/r/20220819174820.3585002-1-vladimir.oltean@nxp.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 139b5fbd 291ac151
......@@ -445,6 +445,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
if (err)
return err;
dsa_switch_for_each_cpu_port(dp, ds)
ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
dp->cpu_dp->index);
......@@ -493,6 +496,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
dsa_switch_for_each_cpu_port(dp, ds)
ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
}
......
......@@ -2064,6 +2064,16 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
return __ffs(bond_mask);
}
/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
* Note that when CPU ports are in a LAG, the user ports are assigned to the
* 'primary' CPU port, the one whose physical port number gives the logical
* port number of the LAG.
*
* We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
* (to which no user port is assigned), but it appears that forwarding from
* this secondary CPU port looks at the PGID_SRC associated with the logical
* port ID that it's assigned to, which *is* configured properly.
*/
static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
struct ocelot_port *cpu)
{
......@@ -2080,9 +2090,15 @@ static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
mask |= BIT(port);
}
if (cpu->bond)
mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
return mask;
}
/* Returns the DSA tag_8021q CPU port that the given port is assigned to,
* or the bit mask of CPU ports if said CPU port is in a LAG.
*/
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
......@@ -2091,6 +2107,9 @@ u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
if (!cpu_port)
return 0;
if (cpu_port->bond)
return ocelot_get_bond_mask(ocelot, cpu_port->bond);
return BIT(cpu_port->index);
}
EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
......@@ -2214,61 +2233,61 @@ static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
}
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
int cpu)
void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
if (!cpu_port->is_dsa_8021q_cpu) {
cpu_port->is_dsa_8021q_cpu = true;
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_add(ocelot, cpu, vid, true);
cpu_port->is_dsa_8021q_cpu = true;
ocelot_update_pgid_cpu(ocelot);
}
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_add(ocelot, cpu, vid, true);
ocelot_apply_bridge_fwd_mask(ocelot, true);
ocelot_update_pgid_cpu(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu;
bool keep = false;
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
int p;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->dsa_8021q_cpu = NULL;
cpu_port->is_dsa_8021q_cpu = false;
for (p = 0; p < ocelot->num_phys_ports; p++) {
if (!ocelot->ports[p])
continue;
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) {
keep = true;
break;
}
}
ocelot_update_pgid_cpu(ocelot);
if (!keep) {
cpu_port->is_dsa_8021q_cpu = false;
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_update_pgid_cpu(ocelot);
}
ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->dsa_8021q_cpu = NULL;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
......
......@@ -559,6 +559,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
list_for_each_entry((_dp), &(_dst)->ports, list) \
if (dsa_port_is_user((_dp)))
#define dsa_tree_for_each_cpu_port(_dp, _dst) \
list_for_each_entry((_dp), &(_dst)->ports, list) \
if (dsa_port_is_cpu((_dp)))
#define dsa_switch_for_each_port(_dp, _ds) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds))
......
......@@ -1024,6 +1024,8 @@ void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
void ocelot_deinit_port(struct ocelot *ocelot, int port);
void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
......
......@@ -568,26 +568,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
/* Also don't allow bridging of net devices that are DSA masters, since
* the bridge layer rx_handler prevents the DSA fake ethertype handler
* to be invoked, so we don't get the chance to strip off and parse the
* DSA switch tag protocol header (the bridge layer just returns
* RX_HANDLER_CONSUMED, stopping RX processing for these frames).
* The only case where that would not be an issue is when bridging can
* already be offloaded, such as when the DSA master is itself a DSA
* or plain switchdev port, and is bridged only with other ports from
* the same hardware device.
*/
if (netdev_uses_dsa(dev)) {
list_for_each_entry(p, &br->port_list, list) {
if (!netdev_port_same_parent_id(dev, p->dev)) {
NL_SET_ERR_MSG(extack,
"Cannot do software bridging with a DSA master");
return -EINVAL;
}
}
}
/* No bridging of bridges */
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
NL_SET_ERR_MSG(extack,
......
......@@ -1060,26 +1060,24 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
struct dsa_port *cpu_dp;
int err = 0;
rtnl_lock();
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
struct net_device *master = dp->master;
bool admin_up = (master->flags & IFF_UP) &&
!qdisc_tx_is_noop(master);
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
struct net_device *master = cpu_dp->master;
bool admin_up = (master->flags & IFF_UP) &&
!qdisc_tx_is_noop(master);
err = dsa_master_setup(master, dp);
if (err)
break;
err = dsa_master_setup(master, cpu_dp);
if (err)
break;
/* Replay master state event */
dsa_tree_master_admin_state_change(dst, master, admin_up);
dsa_tree_master_oper_state_change(dst, master,
netif_oper_up(master));
}
/* Replay master state event */
dsa_tree_master_admin_state_change(dst, master, admin_up);
dsa_tree_master_oper_state_change(dst, master,
netif_oper_up(master));
}
rtnl_unlock();
......@@ -1089,22 +1087,20 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
struct dsa_port *cpu_dp;
rtnl_lock();
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
struct net_device *master = dp->master;
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
struct net_device *master = cpu_dp->master;
/* Synthesizing an "admin down" state is sufficient for
* the switches to get a notification if the master is
* currently up and running.
*/
dsa_tree_master_admin_state_change(dst, master, false);
/* Synthesizing an "admin down" state is sufficient for
* the switches to get a notification if the master is
* currently up and running.
*/
dsa_tree_master_admin_state_change(dst, master, false);
dsa_master_teardown(master);
}
dsa_master_teardown(master);
}
rtnl_unlock();
......@@ -1252,7 +1248,6 @@ static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
* they would have formed disjoint trees (different "dsa,member" values).
*/
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
struct net_device *master,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops)
{
......@@ -1268,14 +1263,11 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
* attempts to change the tagging protocol. If we ever lift the IFF_UP
* restriction, there needs to be another mutex which serializes this.
*/
if (master->flags & IFF_UP)
goto out_unlock;
list_for_each_entry(dp, &dst->ports, list) {
if (!dsa_port_is_user(dp))
continue;
if (dsa_port_is_cpu(dp) && (dp->master->flags & IFF_UP))
goto out_unlock;
if (dp->slave->flags & IFF_UP)
if (dsa_port_is_user(dp) && (dp->slave->flags & IFF_UP))
goto out_unlock;
}
......
......@@ -545,7 +545,6 @@ struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
struct net_device *master,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
......
......@@ -307,7 +307,7 @@ static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
*/
goto out;
err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, dev, new_tag_ops,
err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
old_tag_ops);
if (err) {
/* On failure the old tagger is restored, so we don't need the
......
......@@ -2476,6 +2476,9 @@ static int dsa_slave_changeupper(struct net_device *dev,
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
if (!dsa_slave_dev_check(dev))
return err;
extack = netdev_notifier_info_to_extack(&info->info);
if (netif_is_bridge_master(info->upper_dev)) {
......@@ -2531,6 +2534,9 @@ static int dsa_slave_prechangeupper(struct net_device *dev,
{
struct dsa_port *dp = dsa_slave_to_port(dev);
if (!dsa_slave_dev_check(dev))
return NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
dsa_port_pre_bridge_leave(dp, info->upper_dev);
else if (netif_is_lag_master(info->upper_dev) && !info->linking)
......@@ -2551,6 +2557,9 @@ dsa_slave_lag_changeupper(struct net_device *dev,
int err = NOTIFY_DONE;
struct dsa_port *dp;
if (!netif_is_lag_master(dev))
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!dsa_slave_dev_check(lower))
continue;
......@@ -2580,6 +2589,9 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
int err = NOTIFY_DONE;
struct dsa_port *dp;
if (!netif_is_lag_master(dev))
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!dsa_slave_dev_check(lower))
continue;
......@@ -2687,6 +2699,75 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
return NOTIFY_DONE;
}
static int
dsa_master_prechangeupper_sanity_check(struct net_device *master,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
if (!netdev_uses_dsa(master))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
/* Allow DSA switch uppers */
if (dsa_slave_dev_check(info->upper_dev))
return NOTIFY_DONE;
/* Allow bridge uppers of DSA masters, subject to further
* restrictions in dsa_bridge_prechangelower_sanity_check()
*/
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;
extack = netdev_notifier_info_to_extack(&info->info);
NL_SET_ERR_MSG_MOD(extack,
"DSA master cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}
/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
* layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
* frames).
* The only case where that would not be an issue is when bridging can already
* be offloaded, such as when the DSA master is itself a DSA or plain switchdev
* port, and is bridged only with other ports from the same hardware device.
*/
static int
dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *br = info->upper_dev;
struct netlink_ext_ack *extack;
struct net_device *lower;
struct list_head *iter;
if (!netif_is_bridge_master(br))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
extack = netdev_notifier_info_to_extack(&info->info);
netdev_for_each_lower_dev(br, lower, iter) {
if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
continue;
if (!netdev_port_same_parent_id(lower, new_lower)) {
NL_SET_ERR_MSG(extack,
"Cannot do software bridging with a DSA master");
return notifier_from_errno(-EINVAL);
}
}
return NOTIFY_DONE;
}
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
......@@ -2698,25 +2779,40 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
int err;
err = dsa_slave_prechangeupper_sanity_check(dev, info);
if (err != NOTIFY_DONE)
if (notifier_to_errno(err))
return err;
err = dsa_master_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
if (dsa_slave_dev_check(dev))
return dsa_slave_prechangeupper(dev, ptr);
err = dsa_bridge_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
if (netif_is_lag_master(dev))
return dsa_slave_lag_prechangeupper(dev, ptr);
err = dsa_slave_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
err = dsa_slave_lag_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
break;
}
case NETDEV_CHANGEUPPER:
if (dsa_slave_dev_check(dev))
return dsa_slave_changeupper(dev, ptr);
case NETDEV_CHANGEUPPER: {
int err;
if (netif_is_lag_master(dev))
return dsa_slave_lag_changeupper(dev, ptr);
err = dsa_slave_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
err = dsa_slave_lag_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
break;
}
case NETDEV_CHANGELOWERSTATE: {
struct netdev_notifier_changelowerstate_info *info = ptr;
struct dsa_port *dp;
......@@ -2777,6 +2873,9 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (!dsa_port_is_user(dp))
continue;
if (dp->cpu_dp != cpu_dp)
continue;
list_add(&dp->slave->close_list, &close_list);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment