Commit b455dbd9 authored by David S. Miller's avatar David S. Miller

Merge branch 'lan966x-lag-support'

Horatiu Vultur says:

====================
net: lan966x: Add lag support

Add lag support for lan966x.
First 4 patches don't do any changes to the current behaviour, they
just prepare for lag support. While the rest is to add the lag support.

v3->v4:
- aggregation configuration is global for all bonds, so make sure that
  there can't be enabled multiple configurations at the same time
- return error faster from lan966x_foreign_bridging_check, don't
  continue the search if the error is seen already
- flush fdb workqueue when a port leaves a bridge or lag.

v2->v3:
- return error code from 'switchdev_bridge_port_offload()'
- fix lan966x_foreign_dev_check(), it was missing lag support
- remove lan966x_lag_mac_add_entry and lan966x_mac_del_entry as
  they are not needed
- fix race conditions when accessing port->bond
- move FDB entries when a new port joins the lag if it has a lower

v1->v2:
- fix the LAG PGIDs when ports go down, in this way is not
  needed anymore the last patch of the series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e46c5b8e e09ce977
......@@ -4,6 +4,7 @@ config LAN966X_SWITCH
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
depends on BRIDGE || BRIDGE=n
select PHYLINK
select PACKING
help
......
......@@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
lan966x_ptp.o lan966x_fdma.o
lan966x_ptp.o lan966x_fdma.o lan966x_lag.o
......@@ -8,6 +8,7 @@ struct lan966x_fdb_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
struct net_device *orig_dev;
struct lan966x *lan966x;
unsigned long event;
};
......@@ -127,75 +128,119 @@ void lan966x_fdb_deinit(struct lan966x *lan966x)
lan966x_fdb_purge_entries(lan966x);
}
static void lan966x_fdb_event_work(struct work_struct *work)
void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
{
flush_workqueue(lan966x->fdb_work);
}
static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
{
struct lan966x_fdb_event_work *fdb_work =
container_of(work, struct lan966x_fdb_event_work, work);
struct switchdev_notifier_fdb_info *fdb_info;
struct net_device *dev = fdb_work->dev;
struct lan966x_port *port;
struct lan966x *lan966x;
int ret;
fdb_info = &fdb_work->fdb_info;
lan966x = fdb_work->lan966x;
port = netdev_priv(fdb_work->orig_dev);
fdb_info = &fdb_work->fdb_info;
if (lan966x_netdevice_check(dev)) {
port = netdev_priv(dev);
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
fdb_info->vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
lan966x_mac_del_entry(lan966x, fdb_info->addr,
fdb_info->vid);
break;
}
}
static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
{
struct switchdev_notifier_fdb_info *fdb_info;
struct lan966x *lan966x;
int ret;
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
fdb_info->vid);
lan966x = fdb_work->lan966x;
fdb_info = &fdb_work->fdb_info;
/* In case the bridge is called */
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
/* If there is no front port in this vlan, there is no
* point to copy the frame to CPU because it would be
* just dropped at later point. So add it only if
* there is a port but it is required to store the fdb
* entry for later point when a port actually gets in
* the vlan.
*/
lan966x_fdb_add_entry(lan966x, fdb_info);
if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
fdb_info->vid))
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
lan966x_mac_del_entry(lan966x, fdb_info->addr,
fdb_info->vid);
lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
fdb_info->vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
ret = lan966x_fdb_del_entry(lan966x, fdb_info);
if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
fdb_info->vid))
break;
}
} else {
if (!netif_is_bridge_master(dev))
goto out;
/* In case the bridge is called */
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
/* If there is no front port in this vlan, there is no
* point to copy the frame to CPU because it would be
* just dropped at later point. So add it only if
* there is a port but it is required to store the fdb
* entry for later point when a port actually gets in
* the vlan.
*/
lan966x_fdb_add_entry(lan966x, fdb_info);
if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
fdb_info->vid))
break;
lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
fdb_info->vid);
if (ret)
lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
fdb_info->vid);
break;
}
}
static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
{
struct switchdev_notifier_fdb_info *fdb_info;
struct lan966x_port *port;
struct lan966x *lan966x;
if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
return;
lan966x = fdb_work->lan966x;
port = netdev_priv(fdb_work->dev);
fdb_info = &fdb_work->fdb_info;
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
ret = lan966x_fdb_del_entry(lan966x, fdb_info);
if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
fdb_info->vid))
break;
if (ret)
lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
fdb_info->vid);
lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
fdb_info->vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (!fdb_info->added_by_user)
break;
}
lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
break;
}
}
static void lan966x_fdb_event_work(struct work_struct *work)
{
struct lan966x_fdb_event_work *fdb_work =
container_of(work, struct lan966x_fdb_event_work, work);
if (lan966x_netdevice_check(fdb_work->orig_dev))
lan966x_fdb_port_event_work(fdb_work);
else if (netif_is_bridge_master(fdb_work->orig_dev))
lan966x_fdb_bridge_event_work(fdb_work);
else if (netif_is_lag_master(fdb_work->orig_dev))
lan966x_fdb_lag_event_work(fdb_work);
out:
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
dev_put(dev);
}
int lan966x_handle_fdb(struct net_device *dev,
......@@ -221,7 +266,8 @@ int lan966x_handle_fdb(struct net_device *dev,
if (!fdb_work)
return -ENOMEM;
fdb_work->dev = orig_dev;
fdb_work->dev = dev;
fdb_work->orig_dev = orig_dev;
fdb_work->lan966x = lan966x;
fdb_work->event = event;
INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
......@@ -231,7 +277,6 @@ int lan966x_handle_fdb(struct net_device *dev,
goto err_addr_alloc;
ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
dev_hold(orig_dev);
queue_work(lan966x->fdb_work, &fdb_work->work);
break;
......
// SPDX-License-Identifier: GPL-2.0+
#include <linux/if_bridge.h>
#include "lan966x_main.h"
static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
{
u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
int p, lag, i;
/* Reset destination and aggregation PGIDS */
for (p = 0; p < lan966x->num_phys_ports; ++p)
lan_wr(ANA_PGID_PGID_SET(BIT(p)),
lan966x, ANA_PGID(p));
for (p = PGID_AGGR; p < PGID_SRC; ++p)
lan_wr(ANA_PGID_PGID_SET(visited),
lan966x, ANA_PGID(p));
/* The visited ports bitmask holds the list of ports offloading any
* bonding interface. Initially we mark all these ports as unvisited,
* then every time we visit a port in this bitmask, we know that it is
* the lowest numbered port, i.e. the one whose logical ID == physical
* port ID == LAG ID. So we mark as visited all further ports in the
* bitmask that are offloading the same bonding interface. This way,
* we set up the aggregation PGIDs only once per bonding interface.
*/
for (p = 0; p < lan966x->num_phys_ports; ++p) {
struct lan966x_port *port = lan966x->ports[p];
if (!port || !port->bond)
continue;
visited &= ~BIT(p);
}
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
struct net_device *bond = lan966x->ports[lag]->bond;
int num_active_ports = 0;
unsigned long bond_mask;
u8 aggr_idx[16];
if (!bond || (visited & BIT(lag)))
continue;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
struct lan966x_port *port = lan966x->ports[p];
lan_wr(ANA_PGID_PGID_SET(bond_mask),
lan966x, ANA_PGID(p));
if (port->lag_tx_active)
aggr_idx[num_active_ports++] = p;
}
for (i = PGID_AGGR; i < PGID_SRC; ++i) {
u32 ac;
ac = lan_rd(lan966x, ANA_PGID(i));
ac &= ~bond_mask;
/* Don't do division by zero if there was no active
* port. Just make all aggregation codes zero.
*/
if (num_active_ports)
ac |= BIT(aggr_idx[i % num_active_ports]);
lan_wr(ANA_PGID_PGID_SET(ac),
lan966x, ANA_PGID(i));
}
/* Mark all ports in the same LAG as visited to avoid applying
* the same config again.
*/
for (p = lag; p < lan966x->num_phys_ports; p++) {
struct lan966x_port *port = lan966x->ports[p];
if (!port)
continue;
if (port->bond == bond)
visited |= BIT(p);
}
}
}
static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
{
struct lan966x_port *port;
u32 bond_mask;
u32 lag_id;
int p;
for (p = 0; p < lan966x->num_phys_ports; ++p) {
port = lan966x->ports[p];
if (!port)
continue;
lag_id = port->chip_port;
bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
if (bond_mask)
lag_id = __ffs(bond_mask);
lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
ANA_PORT_CFG_PORTID_VAL,
lan966x, ANA_PORT_CFG(port->chip_port));
}
}
static void lan966x_lag_update_ids(struct lan966x *lan966x)
{
lan966x_lag_set_port_ids(lan966x);
lan966x_update_fwd_mask(lan966x);
lan966x_lag_set_aggr_pgids(lan966x);
}
int lan966x_lag_port_join(struct lan966x_port *port,
struct net_device *brport_dev,
struct net_device *bond,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = port->lan966x;
struct net_device *dev = port->dev;
u32 lag_id = -1;
u32 bond_mask;
int err;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
if (bond_mask)
lag_id = __ffs(bond_mask);
port->bond = bond;
lan966x_lag_update_ids(lan966x);
err = switchdev_bridge_port_offload(brport_dev, dev, port,
&lan966x_switchdev_nb,
&lan966x_switchdev_blocking_nb,
false, extack);
if (err)
goto out;
lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
if (lan966x_lag_first_port(port->bond, port->dev) &&
lag_id != -1)
lan966x_mac_lag_replace_port_entry(lan966x,
lan966x->ports[lag_id],
port);
return 0;
out:
port->bond = NULL;
lan966x_lag_update_ids(lan966x);
return err;
}
void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
{
struct lan966x *lan966x = port->lan966x;
u32 bond_mask;
u32 lag_id;
if (lan966x_lag_first_port(port->bond, port->dev)) {
bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
bond_mask &= ~BIT(port->chip_port);
if (bond_mask) {
lag_id = __ffs(bond_mask);
lan966x_mac_lag_replace_port_entry(lan966x, port,
lan966x->ports[lag_id]);
} else {
lan966x_mac_lag_remove_port_entry(lan966x, port);
}
}
port->bond = NULL;
lan966x_lag_update_ids(lan966x);
lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
}
static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
enum netdev_lag_hash hash_type)
{
int p;
for (p = 0; p < lan966x->num_phys_ports; ++p) {
struct lan966x_port *port = lan966x->ports[p];
if (!port || !port->bond)
continue;
if (port->hash_type != hash_type)
return false;
}
return true;
}
int lan966x_lag_port_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct netdev_lag_upper_info *lui;
struct netlink_ext_ack *extack;
extack = netdev_notifier_info_to_extack(&info->info);
lui = info->upper_info;
if (!lui) {
port->hash_type = NETDEV_LAG_HASH_NONE;
return NOTIFY_DONE;
}
if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack,
"LAG device using unsupported Tx type");
return -EINVAL;
}
if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
NL_SET_ERR_MSG_MOD(extack,
"LAG devices can have only the same hash_type");
return -EINVAL;
}
switch (lui->hash_type) {
case NETDEV_LAG_HASH_L2:
lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
lan966x, ANA_AGGR_CFG);
break;
case NETDEV_LAG_HASH_L34:
lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
lan966x, ANA_AGGR_CFG);
break;
case NETDEV_LAG_HASH_L23:
lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
lan966x, ANA_AGGR_CFG);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"LAG device using unsupported hash type");
return -EINVAL;
}
port->hash_type = lui->hash_type;
return NOTIFY_OK;
}
int lan966x_lag_port_changelowerstate(struct net_device *dev,
struct netdev_notifier_changelowerstate_info *info)
{
struct netdev_lag_lower_state_info *lag = info->lower_state_info;
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
bool is_active;
if (!port->bond)
return NOTIFY_DONE;
is_active = lag->link_up && lag->tx_enabled;
if (port->lag_tx_active == is_active)
return NOTIFY_DONE;
port->lag_tx_active = is_active;
lan966x_lag_set_aggr_pgids(lan966x);
return NOTIFY_OK;
}
int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port;
struct net_device *lower;
struct list_head *iter;
int err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!lan966x_netdevice_check(lower))
continue;
port = netdev_priv(lower);
if (port->bond != dev)
continue;
err = lan966x_port_prechangeupper(lower, dev, info);
if (err)
return err;
}
return NOTIFY_DONE;
}
int lan966x_lag_netdev_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port;
struct net_device *lower;
struct list_head *iter;
int err;
netdev_for_each_lower_dev(dev, lower, iter) {
if (!lan966x_netdevice_check(lower))
continue;
port = netdev_priv(lower);
if (port->bond != dev)
continue;
err = lan966x_port_changeupper(lower, dev, info);
if (err)
return err;
}
return NOTIFY_DONE;
}
bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
unsigned long bond_mask;
if (port->bond != lag)
return false;
bond_mask = lan966x_lag_get_mask(lan966x, lag);
if (bond_mask && port->chip_port == __ffs(bond_mask))
return true;
return false;
}
u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
{
struct lan966x_port *port;
u32 mask = 0;
int p;
if (!bond)
return mask;
for (p = 0; p < lan966x->num_phys_ports; p++) {
port = lan966x->ports[p];
if (!port)
continue;
if (port->bond == bond)
mask |= BIT(p);
}
return mask;
}
......@@ -22,6 +22,7 @@ struct lan966x_mac_entry {
u16 vid;
u16 port_index;
int row;
bool lag;
};
struct lan966x_mac_raw_entry {
......@@ -69,15 +70,14 @@ static void lan966x_mac_select(struct lan966x *lan966x,
lan_wr(mach, lan966x, ANA_MACHDATA);
}
static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
bool cpu_copy,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
bool cpu_copy,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
{
int ret;
lockdep_assert_held(&lan966x->mac_lock);
spin_lock(&lan966x->mac_lock);
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
......@@ -89,7 +89,19 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
ret = lan966x_mac_wait_for_completion(lan966x);
return lan966x_mac_wait_for_completion(lan966x);
}
static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
bool cpu_copy,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
{
int ret;
spin_lock(&lan966x->mac_lock);
ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
spin_unlock(&lan966x->mac_lock);
return ret;
......@@ -119,6 +131,16 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
{
WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
}
static int lan966x_mac_forget_locked(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
......@@ -178,8 +200,9 @@ void lan966x_mac_init(struct lan966x *lan966x)
INIT_LIST_HEAD(&lan966x->mac_entries);
}
static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
u16 vid, u16 port_index)
static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
const unsigned char *mac,
u16 vid)
{
struct lan966x_mac_entry *mac_entry;
......@@ -189,8 +212,9 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
memcpy(mac_entry->mac, mac, ETH_ALEN);
mac_entry->vid = vid;
mac_entry->port_index = port_index;
mac_entry->port_index = port->chip_port;
mac_entry->row = LAN966X_MAC_INVALID_ROW;
mac_entry->lag = port->bond ? true : false;
return mac_entry;
}
......@@ -269,7 +293,7 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
goto mac_learn;
}
mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
......@@ -278,7 +302,8 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
port->bond ?: port->dev);
mac_learn:
lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
......@@ -309,6 +334,50 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
return 0;
}
void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
struct lan966x_port *src,
struct lan966x_port *dst)
{
struct lan966x_mac_entry *mac_entry;
spin_lock(&lan966x->mac_lock);
list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
if (mac_entry->port_index == src->chip_port &&
mac_entry->lag) {
lan966x_mac_forget_locked(lan966x, mac_entry->mac,
mac_entry->vid,
ENTRYTYPE_LOCKED);
lan966x_mac_learn_locked(lan966x, dst->chip_port,
mac_entry->mac, mac_entry->vid,
ENTRYTYPE_LOCKED);
mac_entry->port_index = dst->chip_port;
}
}
spin_unlock(&lan966x->mac_lock);
}
void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
struct lan966x_port *src)
{
struct lan966x_mac_entry *mac_entry, *tmp;
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
list) {
if (mac_entry->port_index == src->chip_port &&
mac_entry->lag) {
lan966x_mac_forget_locked(lan966x, mac_entry->mac,
mac_entry->vid,
ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
}
}
spin_unlock(&lan966x->mac_lock);
}
void lan966x_mac_purge_entries(struct lan966x *lan966x)
{
struct lan966x_mac_entry *mac_entry, *tmp;
......@@ -354,6 +423,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
struct list_head mac_deleted_entries;
struct lan966x_port *port;
u32 dest_idx;
u32 column;
u16 vid;
......@@ -406,9 +476,10 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
/* Notify the bridge that the entry doesn't exist
* anymore in the HW
*/
port = lan966x->ports[mac_entry->port_index];
lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
mac_entry->mac, mac_entry->vid,
lan966x->ports[mac_entry->port_index]->dev);
port->bond ?: port->dev);
list_del(&mac_entry->list);
kfree(mac_entry);
}
......@@ -440,7 +511,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
continue;
}
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
port = lan966x->ports[dest_idx];
mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return;
......@@ -451,7 +523,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
spin_unlock(&lan966x->mac_lock);
lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
mac, vid, lan966x->ports[dest_idx]->dev);
mac, vid, port->bond ?: port->dev);
}
}
......
......@@ -292,11 +292,17 @@ struct lan966x_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
struct net_device *bond;
bool lag_tx_active;
enum netdev_lag_hash hash_type;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
extern struct notifier_block lan966x_switchdev_nb __read_mostly;
extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
bool lan966x_netdevice_check(const struct net_device *dev);
......@@ -345,6 +351,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x,
struct lan966x_port *port,
const unsigned char *addr,
u16 vid);
void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
struct lan966x_port *src,
struct lan966x_port *dst);
void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
struct lan966x_port *src);
void lan966x_mac_purge_entries(struct lan966x *lan966x);
irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
......@@ -369,6 +380,7 @@ void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
int lan966x_fdb_init(struct lan966x *lan966x);
void lan966x_fdb_deinit(struct lan966x *lan966x);
void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
int lan966x_handle_fdb(struct net_device *dev,
struct net_device *orig_dev,
unsigned long event, const void *ctx,
......@@ -406,6 +418,33 @@ int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
int lan966x_lag_port_join(struct lan966x_port *port,
struct net_device *brport_dev,
struct net_device *bond,
struct netlink_ext_ack *extack);
void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
int lan966x_lag_port_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info);
int lan966x_lag_port_changelowerstate(struct net_device *dev,
struct netdev_notifier_changelowerstate_info *info);
int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info);
int lan966x_lag_netdev_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info);
bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
int lan966x_port_changeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info);
int lan966x_port_prechangeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info);
void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
void lan966x_port_ageing_set(struct lan966x_port *port,
unsigned long ageing_clock_t);
void lan966x_update_fwd_mask(struct lan966x *lan966x);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
......
......@@ -363,6 +363,51 @@ enum lan966x_target {
#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
/* ANA:COMMON:AGGR_CFG */
#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
......
......@@ -6,8 +6,6 @@
#include "lan966x_main.h"
static struct notifier_block lan966x_netdevice_nb __read_mostly;
static struct notifier_block lan966x_switchdev_nb __read_mostly;
static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
u32 pgid_ip)
......@@ -132,7 +130,7 @@ static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
return 0;
}
static void lan966x_update_fwd_mask(struct lan966x *lan966x)
void lan966x_update_fwd_mask(struct lan966x *lan966x)
{
int i;
......@@ -140,9 +138,14 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
struct lan966x_port *port = lan966x->ports[i];
unsigned long mask = 0;
if (port && lan966x->bridge_fwd_mask & BIT(i))
if (port && lan966x->bridge_fwd_mask & BIT(i)) {
mask = lan966x->bridge_fwd_mask & ~BIT(i);
if (port->bond)
mask &= ~lan966x_lag_get_mask(lan966x,
port->bond);
}
mask |= BIT(CPU_PORT);
lan_wr(ANA_PGID_PGID_SET(mask),
......@@ -150,7 +153,7 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
}
}
static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
{
struct lan966x *lan966x = port->lan966x;
bool learn_ena = false;
......@@ -171,8 +174,8 @@ static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
lan966x_update_fwd_mask(lan966x);
}
static void lan966x_port_ageing_set(struct lan966x_port *port,
unsigned long ageing_clock_t)
void lan966x_port_ageing_set(struct lan966x_port *port,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
......@@ -241,6 +244,7 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
}
static int lan966x_port_bridge_join(struct lan966x_port *port,
struct net_device *brport_dev,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
......@@ -258,7 +262,7 @@ static int lan966x_port_bridge_join(struct lan966x_port *port,
}
}
err = switchdev_bridge_port_offload(dev, dev, port,
err = switchdev_bridge_port_offload(brport_dev, dev, port,
&lan966x_switchdev_nb,
&lan966x_switchdev_blocking_nb,
false, extack);
......@@ -295,8 +299,9 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_apply(port);
}
static int lan966x_port_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
int lan966x_port_changeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
......@@ -306,44 +311,68 @@ static int lan966x_port_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
err = lan966x_port_bridge_join(port, info->upper_dev,
err = lan966x_port_bridge_join(port, brport_dev,
info->upper_dev,
extack);
else
lan966x_port_bridge_leave(port, info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking)
err = lan966x_lag_port_join(port, info->upper_dev,
info->upper_dev,
extack);
else
lan966x_lag_port_leave(port, info->upper_dev);
}
return err;
}
static int lan966x_port_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
int lan966x_port_prechangeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
int err = NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
switchdev_bridge_port_unoffload(port->dev, port,
NULL, NULL);
if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
lan966x_fdb_flush_workqueue(port->lan966x);
}
return NOTIFY_DONE;
if (netif_is_lag_master(info->upper_dev)) {
err = lan966x_lag_port_prechangeupper(dev, info);
if (err || info->linking)
return err;
switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
lan966x_fdb_flush_workqueue(port->lan966x);
}
return err;
}
static int lan966x_foreign_bridging_check(struct net_device *bridge,
static int lan966x_foreign_bridging_check(struct net_device *upper,
bool *has_foreign,
bool *seen_lan966x,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = NULL;
bool has_foreign = false;
struct net_device *dev;
struct list_head *iter;
if (!netif_is_bridge_master(bridge))
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper))
return 0;
netdev_for_each_lower_dev(bridge, dev, iter) {
netdev_for_each_lower_dev(upper, dev, iter) {
if (lan966x_netdevice_check(dev)) {
struct lan966x_port *port = netdev_priv(dev);
if (lan966x) {
/* Bridge already has at least one port of a
/* Upper already has at least one port of a
* lan966x switch inside it, check that it's
* the same instance of the driver.
*/
......@@ -354,15 +383,24 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
}
} else {
/* This is the first lan966x port inside this
* bridge
* upper device
*/
lan966x = port->lan966x;
*seen_lan966x = true;
}
} else if (netif_is_lag_master(dev)) {
/* Allow to have bond interfaces that have only lan966x
* devices
*/
if (lan966x_foreign_bridging_check(dev, has_foreign,
seen_lan966x,
extack))
return -EINVAL;
} else {
has_foreign = true;
*has_foreign = true;
}
if (lan966x && has_foreign) {
if (*seen_lan966x && *has_foreign) {
NL_SET_ERR_MSG_MOD(extack,
"Bridging lan966x ports with foreign interfaces disallowed");
return -EINVAL;
......@@ -375,7 +413,12 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
static int lan966x_bridge_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
bool has_foreign = false;
bool seen_lan966x = false;
return lan966x_foreign_bridging_check(info->upper_dev,
&has_foreign,
&seen_lan966x,
info->info.extack);
}
......@@ -386,21 +429,44 @@ static int lan966x_netdevice_port_event(struct net_device *dev,
int err = 0;
if (!lan966x_netdevice_check(dev)) {
if (event == NETDEV_CHANGEUPPER)
return lan966x_bridge_check(dev, ptr);
switch (event) {
case NETDEV_CHANGEUPPER:
case NETDEV_PRECHANGEUPPER:
err = lan966x_bridge_check(dev, ptr);
if (err)
return err;
if (netif_is_lag_master(dev)) {
if (event == NETDEV_CHANGEUPPER)
err = lan966x_lag_netdev_changeupper(dev,
ptr);
else
err = lan966x_lag_netdev_prechangeupper(dev,
ptr);
return err;
}
break;
default:
return 0;
}
return 0;
}
switch (event) {
case NETDEV_PRECHANGEUPPER:
err = lan966x_port_prechangeupper(dev, ptr);
err = lan966x_port_prechangeupper(dev, dev, ptr);
break;
case NETDEV_CHANGEUPPER:
err = lan966x_bridge_check(dev, ptr);
if (err)
return err;
err = lan966x_port_changeupper(dev, ptr);
err = lan966x_port_changeupper(dev, dev, ptr);
break;
case NETDEV_CHANGELOWERSTATE:
err = lan966x_lag_port_changelowerstate(dev, ptr);
break;
}
......@@ -418,19 +484,23 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
/* We don't offload uppers such as LAG as bridge ports, so every device except
* the bridge itself is foreign.
*/
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
int i;
if (netif_is_bridge_master(foreign_dev))
if (lan966x->bridge == foreign_dev)
return false;
if (netif_is_lag_master(foreign_dev))
for (i = 0; i < lan966x->num_phys_ports; ++i)
if (lan966x->ports[i] &&
lan966x->ports[i]->bond == foreign_dev)
return false;
return true;
}
......@@ -571,11 +641,11 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly = {
.notifier_call = lan966x_netdevice_event,
};
static struct notifier_block lan966x_switchdev_nb __read_mostly = {
struct notifier_block lan966x_switchdev_nb __read_mostly = {
.notifier_call = lan966x_switchdev_event,
};
static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
.notifier_call = lan966x_switchdev_blocking_event,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment