Commit c5b8b34c authored by David S. Miller's avatar David S. Miller

Merge branch 'bonding-team-offload'

Jiri Pirko says:

====================
bonding/team offload + mlxsw implementation

This patchset introduces needed infrastructure for link aggregation
offload - for both team and bonding. It also implements the offload
in mlxsw driver.

Particulary, this patchset introduces possibility for upper driver
(bond/team/bridge/..) to pass type-specific info down to notifier listeners.
Info is passed along with NETDEV_CHANGEUPPER/NETDEV_PRECHANGEUPPER
notifiers. Listeners (drivers of netdevs being enslaved) can react
accordingly.

Other extension is for run-time use. This patchset introduces
new netdev notifier type - NETDEV_CHANGELOWERSTATE. Along with this
notification, the upper driver (bond/team/bridge/..) can pass some
information about lower device change, particulary link-up and
TX-enabled states. Listeners (drivers of netdevs being enslaved)
can react accordingly.

The last part of the patchset is implementation of LAG offload in mlxsw,
using both previously introduced infrastructre extensions.

Note that bond-speficic (and ugly) NETDEV_BONDING_INFO used by mlx4
can be removed and mlx4 can use the extensions this patchset adds.
I plan to convert it and get rid of NETDEV_BONDING_INFO in
a follow-up patchset.

v2->v3:
- one small fix in patch 1
v1->v2:
- added patch 1 and 2 per Andy's request
- couple of more or less cosmetic changes described in couple other patches
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3b195843 74581206
...@@ -103,6 +103,7 @@ Netdevice notifier events which can be failed are: ...@@ -103,6 +103,7 @@ Netdevice notifier events which can be failed are:
* NETDEV_POST_INIT * NETDEV_POST_INIT
* NETDEV_PRECHANGEMTU * NETDEV_PRECHANGEMTU
* NETDEV_PRECHANGEUPPER * NETDEV_PRECHANGEUPPER
* NETDEV_CHANGEUPPER
Example: Inject netdevice mtu change error (-22 == -EINVAL) Example: Inject netdevice mtu change error (-22 == -EINVAL)
......
...@@ -93,7 +93,8 @@ enum ad_link_speed_type { ...@@ -93,7 +93,8 @@ enum ad_link_speed_type {
AD_LINK_SPEED_10000MBPS, AD_LINK_SPEED_10000MBPS,
AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_20000MBPS,
AD_LINK_SPEED_40000MBPS, AD_LINK_SPEED_40000MBPS,
AD_LINK_SPEED_56000MBPS AD_LINK_SPEED_56000MBPS,
AD_LINK_SPEED_100000MBPS,
}; };
/* compare MAC addresses */ /* compare MAC addresses */
...@@ -258,6 +259,7 @@ static inline int __check_agg_selection_timer(struct port *port) ...@@ -258,6 +259,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_20000MBPS
* %AD_LINK_SPEED_40000MBPS * %AD_LINK_SPEED_40000MBPS
* %AD_LINK_SPEED_56000MBPS * %AD_LINK_SPEED_56000MBPS
* %AD_LINK_SPEED_100000MBPS
*/ */
static u16 __get_link_speed(struct port *port) static u16 __get_link_speed(struct port *port)
{ {
...@@ -305,6 +307,10 @@ static u16 __get_link_speed(struct port *port) ...@@ -305,6 +307,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_56000MBPS; speed = AD_LINK_SPEED_56000MBPS;
break; break;
case SPEED_100000:
speed = AD_LINK_SPEED_100000MBPS;
break;
default: default:
/* unknown speed value from ethtool. shouldn't happen */ /* unknown speed value from ethtool. shouldn't happen */
speed = 0; speed = 0;
...@@ -681,6 +687,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) ...@@ -681,6 +687,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_56000MBPS: case AD_LINK_SPEED_56000MBPS:
bandwidth = aggregator->num_of_ports * 56000; bandwidth = aggregator->num_of_ports * 56000;
break; break;
case AD_LINK_SPEED_100000MBPS:
bandwidth = aggregator->num_of_ports * 100000;
break;
default: default:
bandwidth = 0; /* to silence the compiler */ bandwidth = 0; /* to silence the compiler */
} }
......
...@@ -830,7 +830,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) ...@@ -830,7 +830,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
} }
new_active->delay = 0; new_active->delay = 0;
bond_set_slave_link_state(new_active, BOND_LINK_UP); bond_set_slave_link_state(new_active, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_8023AD) if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP); bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
...@@ -1198,26 +1199,43 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) ...@@ -1198,26 +1199,43 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
return ret; return ret;
} }
static int bond_master_upper_dev_link(struct net_device *bond_dev, static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
struct net_device *slave_dev,
struct slave *slave)
{ {
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
case BOND_MODE_ACTIVEBACKUP:
return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
case BOND_MODE_BROADCAST:
return NETDEV_LAG_TX_TYPE_BROADCAST;
case BOND_MODE_XOR:
case BOND_MODE_8023AD:
return NETDEV_LAG_TX_TYPE_HASH;
default:
return NETDEV_LAG_TX_TYPE_UNKNOWN;
}
}
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave)
{
struct netdev_lag_upper_info lag_upper_info;
int err; int err;
err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave); lag_upper_info.tx_type = bond_lag_tx_type(bond);
err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
&lag_upper_info);
if (err) if (err)
return err; return err;
slave_dev->flags |= IFF_SLAVE; slave->dev->flags |= IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
return 0; return 0;
} }
static void bond_upper_dev_unlink(struct net_device *bond_dev, static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
struct net_device *slave_dev)
{ {
netdev_upper_dev_unlink(slave_dev, bond_dev); netdev_upper_dev_unlink(slave->dev, bond->dev);
slave_dev->flags &= ~IFF_SLAVE; slave->dev->flags &= ~IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
} }
static struct slave *bond_alloc_slave(struct bonding *bond) static struct slave *bond_alloc_slave(struct bonding *bond)
...@@ -1299,6 +1317,16 @@ void bond_queue_slave_event(struct slave *slave) ...@@ -1299,6 +1317,16 @@ void bond_queue_slave_event(struct slave *slave)
queue_delayed_work(slave->bond->wq, &nnw->work, 0); queue_delayed_work(slave->bond->wq, &nnw->work, 0);
} }
void bond_lower_state_changed(struct slave *slave)
{
struct netdev_lag_lower_state_info info;
info.link_up = slave->link == BOND_LINK_UP ||
slave->link == BOND_LINK_FAIL;
info.tx_enabled = bond_is_active_slave(slave);
netdev_lower_state_changed(slave->dev, &info);
}
/* enslave device <slave> to bond device <master> */ /* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{ {
...@@ -1563,21 +1591,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1563,21 +1591,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) { if (bond->params.updelay) {
bond_set_slave_link_state(new_slave, bond_set_slave_link_state(new_slave,
BOND_LINK_BACK); BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_NOW);
new_slave->delay = bond->params.updelay; new_slave->delay = bond->params.updelay;
} else { } else {
bond_set_slave_link_state(new_slave, bond_set_slave_link_state(new_slave,
BOND_LINK_UP); BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
} }
} else { } else {
bond_set_slave_link_state(new_slave, BOND_LINK_DOWN); bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
} }
} else if (bond->params.arp_interval) { } else if (bond->params.arp_interval) {
bond_set_slave_link_state(new_slave, bond_set_slave_link_state(new_slave,
(netif_carrier_ok(slave_dev) ? (netif_carrier_ok(slave_dev) ?
BOND_LINK_UP : BOND_LINK_DOWN)); BOND_LINK_UP : BOND_LINK_DOWN),
BOND_SLAVE_NOTIFY_NOW);
} else { } else {
bond_set_slave_link_state(new_slave, BOND_LINK_UP); bond_set_slave_link_state(new_slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
} }
if (new_slave->link != BOND_LINK_DOWN) if (new_slave->link != BOND_LINK_DOWN)
...@@ -1662,7 +1695,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1662,7 +1695,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_detach; goto err_detach;
} }
res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave); res = bond_master_upper_dev_link(bond, new_slave);
if (res) { if (res) {
netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res); netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister; goto err_unregister;
...@@ -1698,7 +1731,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1698,7 +1731,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* Undo stages on error */ /* Undo stages on error */
err_upper_unlink: err_upper_unlink:
bond_upper_dev_unlink(bond_dev, slave_dev); bond_upper_dev_unlink(bond, new_slave);
err_unregister: err_unregister:
netdev_rx_handler_unregister(slave_dev); netdev_rx_handler_unregister(slave_dev);
...@@ -1799,12 +1832,14 @@ static int __bond_release_one(struct net_device *bond_dev, ...@@ -1799,12 +1832,14 @@ static int __bond_release_one(struct net_device *bond_dev,
return -EINVAL; return -EINVAL;
} }
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
bond_sysfs_slave_del(slave); bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */ /* recompute stats just before removing the slave */
bond_get_stats(bond->dev, &bond->bond_stats); bond_get_stats(bond->dev, &bond->bond_stats);
bond_upper_dev_unlink(bond_dev, slave_dev); bond_upper_dev_unlink(bond, slave);
/* unregister rx_handler early so bond_handle_frame wouldn't be called /* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore. * for this slave anymore.
*/ */
...@@ -1996,7 +2031,8 @@ static int bond_miimon_inspect(struct bonding *bond) ...@@ -1996,7 +2031,8 @@ static int bond_miimon_inspect(struct bonding *bond)
if (link_state) if (link_state)
continue; continue;
bond_set_slave_link_state(slave, BOND_LINK_FAIL); bond_set_slave_link_state(slave, BOND_LINK_FAIL,
BOND_SLAVE_NOTIFY_LATER);
slave->delay = bond->params.downdelay; slave->delay = bond->params.downdelay;
if (slave->delay) { if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
...@@ -2011,7 +2047,8 @@ static int bond_miimon_inspect(struct bonding *bond) ...@@ -2011,7 +2047,8 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_FAIL: case BOND_LINK_FAIL:
if (link_state) { if (link_state) {
/* recovered before downdelay expired */ /* recovered before downdelay expired */
bond_set_slave_link_state(slave, BOND_LINK_UP); bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_LATER);
slave->last_link_up = jiffies; slave->last_link_up = jiffies;
netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
(bond->params.downdelay - slave->delay) * (bond->params.downdelay - slave->delay) *
...@@ -2033,7 +2070,8 @@ static int bond_miimon_inspect(struct bonding *bond) ...@@ -2033,7 +2070,8 @@ static int bond_miimon_inspect(struct bonding *bond)
if (!link_state) if (!link_state)
continue; continue;
bond_set_slave_link_state(slave, BOND_LINK_BACK); bond_set_slave_link_state(slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
slave->delay = bond->params.updelay; slave->delay = bond->params.updelay;
if (slave->delay) { if (slave->delay) {
...@@ -2047,7 +2085,8 @@ static int bond_miimon_inspect(struct bonding *bond) ...@@ -2047,7 +2085,8 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK: case BOND_LINK_BACK:
if (!link_state) { if (!link_state) {
bond_set_slave_link_state(slave, bond_set_slave_link_state(slave,
BOND_LINK_DOWN); BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_LATER);
netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
(bond->params.updelay - slave->delay) * (bond->params.updelay - slave->delay) *
bond->params.miimon, bond->params.miimon,
...@@ -2085,7 +2124,8 @@ static void bond_miimon_commit(struct bonding *bond) ...@@ -2085,7 +2124,8 @@ static void bond_miimon_commit(struct bonding *bond)
continue; continue;
case BOND_LINK_UP: case BOND_LINK_UP:
bond_set_slave_link_state(slave, BOND_LINK_UP); bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
slave->last_link_up = jiffies; slave->last_link_up = jiffies;
primary = rtnl_dereference(bond->primary_slave); primary = rtnl_dereference(bond->primary_slave);
...@@ -2125,7 +2165,8 @@ static void bond_miimon_commit(struct bonding *bond) ...@@ -2125,7 +2165,8 @@ static void bond_miimon_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++; slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN); bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
BOND_MODE(bond) == BOND_MODE_8023AD) BOND_MODE(bond) == BOND_MODE_8023AD)
...@@ -2708,7 +2749,8 @@ static void bond_ab_arp_commit(struct bonding *bond) ...@@ -2708,7 +2749,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *current_arp_slave; struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave); current_arp_slave = rtnl_dereference(bond->current_arp_slave);
bond_set_slave_link_state(slave, BOND_LINK_UP); bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (current_arp_slave) { if (current_arp_slave) {
bond_set_slave_inactive_flags( bond_set_slave_inactive_flags(
current_arp_slave, current_arp_slave,
...@@ -2731,7 +2773,8 @@ static void bond_ab_arp_commit(struct bonding *bond) ...@@ -2731,7 +2773,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++; slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN); bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
bond_set_slave_inactive_flags(slave, bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW); BOND_SLAVE_NOTIFY_NOW);
...@@ -2810,7 +2853,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) ...@@ -2810,7 +2853,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
* up when it is actually down * up when it is actually down
*/ */
if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
bond_set_slave_link_state(slave, BOND_LINK_DOWN); bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_LATER);
if (slave->link_failure_count < UINT_MAX) if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++; slave->link_failure_count++;
...@@ -2830,7 +2874,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) ...@@ -2830,7 +2874,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (!new_slave) if (!new_slave)
goto check_state; goto check_state;
bond_set_slave_link_state(new_slave, BOND_LINK_BACK); bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave); bond_arp_send_all(bond, new_slave);
new_slave->last_link_up = jiffies; new_slave->last_link_up = jiffies;
...@@ -2838,7 +2883,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) ...@@ -2838,7 +2883,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
check_state: check_state:
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->should_notify) { if (slave->should_notify || slave->should_notify_link) {
should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
break; break;
} }
...@@ -2893,8 +2938,10 @@ static void bond_activebackup_arp_mon(struct work_struct *work) ...@@ -2893,8 +2938,10 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
if (should_notify_peers) if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev); bond->dev);
if (should_notify_rtnl) if (should_notify_rtnl) {
bond_slave_state_notify(bond); bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
}
rtnl_unlock(); rtnl_unlock();
} }
......
...@@ -105,6 +105,9 @@ struct mlxsw_core { ...@@ -105,6 +105,9 @@ struct mlxsw_core {
struct debugfs_blob_wrapper vsd_blob; struct debugfs_blob_wrapper vsd_blob;
struct debugfs_blob_wrapper psid_blob; struct debugfs_blob_wrapper psid_blob;
} dbg; } dbg;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_hwmon *hwmon; struct mlxsw_hwmon *hwmon;
unsigned long driver_priv[0]; unsigned long driver_priv[0];
/* driver_priv has to be always the last item */ /* driver_priv has to be always the last item */
...@@ -815,6 +818,17 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, ...@@ -815,6 +818,17 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats; goto err_alloc_stats;
} }
if (mlxsw_driver->profile->used_max_lag &&
mlxsw_driver->profile->used_max_port_per_lag) {
alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
mlxsw_driver->profile->max_port_per_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
goto err_alloc_lag_mapping;
}
}
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
if (err) if (err)
goto err_bus_init; goto err_bus_init;
...@@ -847,6 +861,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, ...@@ -847,6 +861,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_emad_init: err_emad_init:
mlxsw_bus->fini(bus_priv); mlxsw_bus->fini(bus_priv);
err_bus_init: err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
free_percpu(mlxsw_core->pcpu_stats); free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats: err_alloc_stats:
kfree(mlxsw_core); kfree(mlxsw_core);
...@@ -865,6 +881,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) ...@@ -865,6 +881,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
mlxsw_hwmon_fini(mlxsw_core->hwmon); mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_emad_fini(mlxsw_core); mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv); mlxsw_core->bus->fini(mlxsw_core->bus_priv);
kfree(mlxsw_core->lag.mapping);
free_percpu(mlxsw_core->pcpu_stats); free_percpu(mlxsw_core->pcpu_stats);
kfree(mlxsw_core); kfree(mlxsw_core);
mlxsw_core_driver_put(device_kind); mlxsw_core_driver_put(device_kind);
...@@ -1196,11 +1213,25 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, ...@@ -1196,11 +1213,25 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
struct mlxsw_rx_listener_item *rxl_item; struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl; const struct mlxsw_rx_listener *rxl;
struct mlxsw_core_pcpu_stats *pcpu_stats; struct mlxsw_core_pcpu_stats *pcpu_stats;
u8 local_port = rx_info->sys_port; u8 local_port;
bool found = false; bool found = false;
dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", if (rx_info->is_lag) {
__func__, rx_info->sys_port, rx_info->trap_id); dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
__func__, rx_info->u.lag_id,
rx_info->trap_id);
/* Upper layer does not care if the skb came from LAG or not,
* so just get the local_port for the lag port and push it up.
*/
local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
rx_info->u.lag_id,
rx_info->lag_port_index);
} else {
local_port = rx_info->u.sys_port;
}
dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
__func__, local_port, rx_info->trap_id);
if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
(local_port >= MLXSW_PORT_MAX_PORTS)) (local_port >= MLXSW_PORT_MAX_PORTS))
...@@ -1244,6 +1275,48 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, ...@@ -1244,6 +1275,48 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
} }
EXPORT_SYMBOL(mlxsw_core_skb_receive); EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
port_index;
}
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index, u8 local_port)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
mlxsw_core->lag.mapping[index] = local_port;
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
return mlxsw_core->lag.mapping[index];
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port)
{
int i;
for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
if (mlxsw_core->lag.mapping[index] == local_port)
mlxsw_core->lag.mapping[index] = 0;
}
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct, u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size, char *in_mbox, size_t in_mbox_size,
......
...@@ -112,13 +112,25 @@ int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, ...@@ -112,13 +112,25 @@ int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload); const struct mlxsw_reg_info *reg, char *payload);
struct mlxsw_rx_info { struct mlxsw_rx_info {
bool is_lag;
union {
u16 sys_port; u16 sys_port;
u16 lag_id;
} u;
u8 lag_port_index;
int trap_id; int trap_id;
}; };
void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
struct mlxsw_rx_info *rx_info); struct mlxsw_rx_info *rx_info);
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index, u8 local_port);
u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
struct mlxsw_swid_config { struct mlxsw_swid_config {
......
...@@ -686,11 +686,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, ...@@ -686,11 +686,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (q->consumer_counter++ != consumer_counter_limit) if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
/* We do not support lag now */ if (mlxsw_pci_cqe_lag_get(cqe)) {
if (mlxsw_pci_cqe_lag_get(cqe)) rx_info.is_lag = true;
goto drop; rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
} else {
rx_info.is_lag = false;
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
}
rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
byte_count = mlxsw_pci_cqe_byte_count_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
...@@ -699,7 +703,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, ...@@ -699,7 +703,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
skb_put(skb, byte_count); skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
put_new_skb:
memset(wqe, 0, q->elem_size); memset(wqe, 0, q->elem_size);
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
if (err) if (err)
...@@ -708,10 +711,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, ...@@ -708,10 +711,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
q->producer_counter++; q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return; return;
drop:
dev_kfree_skb_any(skb);
goto put_new_skb;
} }
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
......
...@@ -129,13 +129,15 @@ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); ...@@ -129,13 +129,15 @@ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
*/ */
MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
/* pci_cqe_system_port /* pci_cqe_system_port/lag_id
* When lag=0: System port on which the packet was received * When lag=0: System port on which the packet was received
* When lag=1: * When lag=1:
* bits [15:4] LAG ID on which the packet was received * bits [15:4] LAG ID on which the packet was received
* bits [3:0] sub_port on which the packet was received * bits [3:0] sub_port on which the packet was received
*/ */
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
/* pci_cqe_wqe_counter /* pci_cqe_wqe_counter
* WQE count of the WQEs completed on the associated dqn * WQE count of the WQEs completed on the associated dqn
......
This diff is collapsed.
...@@ -46,9 +46,16 @@ ...@@ -46,9 +46,16 @@
#include "core.h" #include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
struct mlxsw_sp_port; struct mlxsw_sp_port;
struct mlxsw_sp_upper {
struct net_device *dev;
unsigned int ref_count;
};
struct mlxsw_sp { struct mlxsw_sp {
unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
...@@ -63,12 +70,16 @@ struct mlxsw_sp { ...@@ -63,12 +70,16 @@ struct mlxsw_sp {
} fdb_notify; } fdb_notify;
#define MLXSW_SP_DEFAULT_AGEING_TIME 300 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time; u32 ageing_time;
struct { struct mlxsw_sp_upper master_bridge;
struct net_device *dev; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
unsigned int ref_count;
} master_bridge;
}; };
static inline struct mlxsw_sp_upper *
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
return &mlxsw_sp->lags[lag_id];
}
struct mlxsw_sp_port_pcpu_stats { struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
...@@ -87,8 +98,10 @@ struct mlxsw_sp_port { ...@@ -87,8 +98,10 @@ struct mlxsw_sp_port {
u8 learning:1, u8 learning:1,
learning_sync:1, learning_sync:1,
uc_flood:1, uc_flood:1,
bridged:1; bridged:1,
lagged:1;
u16 pvid; u16 pvid;
u16 lag_id;
/* 802.1Q bridge VLANs */ /* 802.1Q bridge VLANs */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* VLAN interfaces */ /* VLAN interfaces */
...@@ -96,6 +109,18 @@ struct mlxsw_sp_port { ...@@ -96,6 +109,18 @@ struct mlxsw_sp_port {
u16 nr_vfids; u16 nr_vfids;
}; };
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u8 local_port;
local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
lag_id, port_index);
mlxsw_sp_port = mlxsw_sp->ports[local_port];
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
enum mlxsw_sp_flood_table { enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BM, MLXSW_SP_FLOOD_TABLE_BM,
......
...@@ -490,32 +490,56 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -490,32 +490,56 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
untagged_flag, pvid_flag); untagged_flag, pvid_flag);
} }
static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
{
return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
const char *mac, u16 vid, bool adding, const char *mac, u16 vid, bool adding,
bool dynamic) bool dynamic)
{ {
enum mlxsw_reg_sfd_rec_policy policy; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
enum mlxsw_reg_sfd_op op;
char *sfd_pl; char *sfd_pl;
int err; int err;
if (!vid)
vid = mlxsw_sp_port->pvid;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl) if (!sfd_pl)
return -ENOMEM; return -ENOMEM;
policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
MLXSW_REG_SFD_OP_WRITE_REMOVE;
mlxsw_reg_sfd_pack(sfd_pl, op, 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_port->local_port); mlxsw_sp_port->local_port);
err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
sfd_pl); kfree(sfd_pl);
return err;
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
const char *mac, u16 vid, bool adding,
bool dynamic)
{
char *sfd_pl;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); kfree(sfd_pl);
return err; return err;
...@@ -526,11 +550,21 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -526,11 +550,21 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb, const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans) struct switchdev_trans *trans)
{ {
u16 vid = fdb->vid;
if (switchdev_trans_ph_prepare(trans)) if (switchdev_trans_ph_prepare(trans))
return 0; return 0;
return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, if (!vid)
true, false); vid = mlxsw_sp_port->pvid;
if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
fdb->addr, vid, true, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->lag_id,
fdb->addr, vid, true, false);
} }
static int mlxsw_sp_port_obj_add(struct net_device *dev, static int mlxsw_sp_port_obj_add(struct net_device *dev,
...@@ -645,7 +679,14 @@ static int ...@@ -645,7 +679,14 @@ static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb) const struct switchdev_obj_port_fdb *fdb)
{ {
return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
fdb->addr, fdb->vid,
false, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->lag_id,
fdb->addr, fdb->vid,
false, false); false, false);
} }
...@@ -672,14 +713,30 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, ...@@ -672,14 +713,30 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
return err; return err;
} }
static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
int i;
for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
}
return NULL;
}
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb, struct switchdev_obj_port_fdb *fdb,
switchdev_obj_dump_cb_t *cb) switchdev_obj_dump_cb_t *cb)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *sfd_pl; char *sfd_pl;
char mac[ETH_ALEN]; char mac[ETH_ALEN];
u16 vid; u16 vid;
u8 local_port; u8 local_port;
u16 lag_id;
u8 num_rec; u8 num_rec;
int stored_err = 0; int stored_err = 0;
int i; int i;
...@@ -692,8 +749,7 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -692,8 +749,7 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do { do {
mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
MLXSW_REG(sfd), sfd_pl);
if (err) if (err)
goto out; goto out;
...@@ -718,6 +774,20 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -718,6 +774,20 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (err) if (err)
stored_err = err; stored_err = err;
} }
break;
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
mac, &vid, &lag_id);
if (mlxsw_sp_port ==
mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
fdb->vid = vid;
err = cb(&fdb->obj);
if (err)
stored_err = err;
}
break;
} }
} }
} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
...@@ -779,6 +849,21 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { ...@@ -779,6 +849,21 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
}; };
static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
bool adding, char *mac, u16 vid,
struct net_device *dev)
{
struct switchdev_notifier_fdb_info info;
unsigned long notifier_type;
if (learning && learning_sync) {
info.addr = mac;
info.vid = vid;
notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
call_switchdev_notifiers(notifier_type, dev, &info.info);
}
}
static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index, char *sfn_pl, int rec_index,
bool adding) bool adding)
...@@ -796,7 +881,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, ...@@ -796,7 +881,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
return; return;
} }
err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, vid,
adding && mlxsw_sp_port->learning, true); adding && mlxsw_sp_port->learning, true);
if (err) { if (err) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -804,16 +889,41 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, ...@@ -804,16 +889,41 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
return; return;
} }
if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
struct switchdev_notifier_fdb_info info; mlxsw_sp_port->learning_sync,
unsigned long notifier_type; adding, mac, vid, mlxsw_sp_port->dev);
}
info.addr = mac; static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
info.vid = vid; char *sfn_pl, int rec_index,
notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; bool adding)
call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, {
&info.info); struct mlxsw_sp_port *mlxsw_sp_port;
char mac[ETH_ALEN];
u16 lag_id;
u16 vid;
int err;
mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &vid, &lag_id);
mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
if (!mlxsw_sp_port) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
return;
} }
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, vid,
adding && mlxsw_sp_port->learning,
true);
if (err) {
if (net_ratelimit())
netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
return;
}
mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
mlxsw_sp_port->learning_sync,
adding, mac, vid,
mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
} }
static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
...@@ -828,6 +938,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, ...@@ -828,6 +938,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
rec_index, false); rec_index, false);
break; break;
case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
rec_index, true);
break;
case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
rec_index, false);
break;
} }
} }
......
...@@ -91,10 +91,24 @@ void team_modeop_port_change_dev_addr(struct team *team, ...@@ -91,10 +91,24 @@ void team_modeop_port_change_dev_addr(struct team *team,
} }
EXPORT_SYMBOL(team_modeop_port_change_dev_addr); EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
static void team_lower_state_changed(struct team_port *port)
{
struct netdev_lag_lower_state_info info;
info.link_up = port->linkup;
info.tx_enabled = team_port_enabled(port);
netdev_lower_state_changed(port->dev, &info);
}
static void team_refresh_port_linkup(struct team_port *port) static void team_refresh_port_linkup(struct team_port *port)
{ {
port->linkup = port->user.linkup_enabled ? port->user.linkup : bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
port->state.linkup; port->state.linkup;
if (port->linkup != new_linkup) {
port->linkup = new_linkup;
team_lower_state_changed(port);
}
} }
...@@ -932,6 +946,7 @@ static void team_port_enable(struct team *team, ...@@ -932,6 +946,7 @@ static void team_port_enable(struct team *team,
team->ops.port_enabled(team, port); team->ops.port_enabled(team, port);
team_notify_peers(team); team_notify_peers(team);
team_mcast_rejoin(team); team_mcast_rejoin(team);
team_lower_state_changed(port);
} }
static void __reconstruct_port_hlist(struct team *team, int rm_index) static void __reconstruct_port_hlist(struct team *team, int rm_index)
...@@ -963,6 +978,7 @@ static void team_port_disable(struct team *team, ...@@ -963,6 +978,7 @@ static void team_port_disable(struct team *team,
team_adjust_ops(team); team_adjust_ops(team);
team_notify_peers(team); team_notify_peers(team);
team_mcast_rejoin(team); team_mcast_rejoin(team);
team_lower_state_changed(port);
} }
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
...@@ -1078,23 +1094,24 @@ static void team_port_disable_netpoll(struct team_port *port) ...@@ -1078,23 +1094,24 @@ static void team_port_disable_netpoll(struct team_port *port)
} }
#endif #endif
static int team_upper_dev_link(struct net_device *dev, static int team_upper_dev_link(struct team *team, struct team_port *port)
struct net_device *port_dev)
{ {
struct netdev_lag_upper_info lag_upper_info;
int err; int err;
err = netdev_master_upper_dev_link(port_dev, dev); lag_upper_info.tx_type = team->mode->lag_tx_type;
err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
&lag_upper_info);
if (err) if (err)
return err; return err;
port_dev->priv_flags |= IFF_TEAM_PORT; port->dev->priv_flags |= IFF_TEAM_PORT;
return 0; return 0;
} }
static void team_upper_dev_unlink(struct net_device *dev, static void team_upper_dev_unlink(struct team *team, struct team_port *port)
struct net_device *port_dev)
{ {
netdev_upper_dev_unlink(port_dev, dev); netdev_upper_dev_unlink(port->dev, team->dev);
port_dev->priv_flags &= ~IFF_TEAM_PORT; port->dev->priv_flags &= ~IFF_TEAM_PORT;
} }
static void __team_port_change_port_added(struct team_port *port, bool linkup); static void __team_port_change_port_added(struct team_port *port, bool linkup);
...@@ -1194,7 +1211,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) ...@@ -1194,7 +1211,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_handler_register; goto err_handler_register;
} }
err = team_upper_dev_link(dev, port_dev); err = team_upper_dev_link(team, port);
if (err) { if (err) {
netdev_err(dev, "Device %s failed to set upper link\n", netdev_err(dev, "Device %s failed to set upper link\n",
portname); portname);
...@@ -1220,7 +1237,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) ...@@ -1220,7 +1237,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return 0; return 0;
err_option_port_add: err_option_port_add:
team_upper_dev_unlink(dev, port_dev); team_upper_dev_unlink(team, port);
err_set_upper_link: err_set_upper_link:
netdev_rx_handler_unregister(port_dev); netdev_rx_handler_unregister(port_dev);
...@@ -1264,7 +1281,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) ...@@ -1264,7 +1281,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_disable(team, port); team_port_disable(team, port);
list_del_rcu(&port->list); list_del_rcu(&port->list);
team_upper_dev_unlink(dev, port_dev); team_upper_dev_unlink(team, port);
netdev_rx_handler_unregister(port_dev); netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port); team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev); vlan_vids_del_by_dev(port_dev, dev);
...@@ -2054,6 +2071,7 @@ static void team_setup(struct net_device *dev) ...@@ -2054,6 +2071,7 @@ static void team_setup(struct net_device *dev)
dev->flags |= IFF_MULTICAST; dev->flags |= IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_TEAM;
/* /*
* Indicate we support unicast address filtering. That way core won't * Indicate we support unicast address filtering. That way core won't
...@@ -2420,9 +2438,13 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) ...@@ -2420,9 +2438,13 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
struct nlattr *nl_option; struct nlattr *nl_option;
LIST_HEAD(opt_inst_list); LIST_HEAD(opt_inst_list);
rtnl_lock();
team = team_nl_team_get(info); team = team_nl_team_get(info);
if (!team) if (!team) {
return -EINVAL; err = -EINVAL;
goto rtnl_unlock;
}
err = -EINVAL; err = -EINVAL;
if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
...@@ -2549,7 +2571,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) ...@@ -2549,7 +2571,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
team_put: team_put:
team_nl_team_put(team); team_nl_team_put(team);
rtnl_unlock:
rtnl_unlock();
return err; return err;
} }
......
...@@ -127,6 +127,7 @@ static const struct team_mode ab_mode = { ...@@ -127,6 +127,7 @@ static const struct team_mode ab_mode = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.priv_size = sizeof(struct ab_priv), .priv_size = sizeof(struct ab_priv),
.ops = &ab_mode_ops, .ops = &ab_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
}; };
static int __init ab_init_module(void) static int __init ab_init_module(void)
......
...@@ -56,6 +56,7 @@ static const struct team_mode bc_mode = { ...@@ -56,6 +56,7 @@ static const struct team_mode bc_mode = {
.kind = "broadcast", .kind = "broadcast",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.ops = &bc_mode_ops, .ops = &bc_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_BROADCAST,
}; };
static int __init bc_init_module(void) static int __init bc_init_module(void)
......
...@@ -661,6 +661,7 @@ static const struct team_mode lb_mode = { ...@@ -661,6 +661,7 @@ static const struct team_mode lb_mode = {
.priv_size = sizeof(struct lb_priv), .priv_size = sizeof(struct lb_priv),
.port_priv_size = sizeof(struct lb_port_priv), .port_priv_size = sizeof(struct lb_port_priv),
.ops = &lb_mode_ops, .ops = &lb_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_HASH,
}; };
static int __init lb_init_module(void) static int __init lb_init_module(void)
......
...@@ -46,6 +46,7 @@ static const struct team_mode rnd_mode = { ...@@ -46,6 +46,7 @@ static const struct team_mode rnd_mode = {
.kind = "random", .kind = "random",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.ops = &rnd_mode_ops, .ops = &rnd_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_RANDOM,
}; };
static int __init rnd_init_module(void) static int __init rnd_init_module(void)
......
...@@ -58,6 +58,7 @@ static const struct team_mode rr_mode = { ...@@ -58,6 +58,7 @@ static const struct team_mode rr_mode = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.priv_size = sizeof(struct rr_priv), .priv_size = sizeof(struct rr_priv),
.ops = &rr_mode_ops, .ops = &rr_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_ROUNDROBIN,
}; };
static int __init rr_init_module(void) static int __init rr_init_module(void)
......
...@@ -624,7 +624,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) ...@@ -624,7 +624,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
goto out_fail; goto out_fail;
} }
ret = netdev_master_upper_dev_link(port_dev, dev); ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
if (ret < 0) if (ret < 0)
goto out_unregister; goto out_unregister;
......
...@@ -164,6 +164,7 @@ struct team_mode { ...@@ -164,6 +164,7 @@ struct team_mode {
size_t priv_size; size_t priv_size;
size_t port_priv_size; size_t port_priv_size;
const struct team_mode_ops *ops; const struct team_mode_ops *ops;
enum netdev_lag_tx_type lag_tx_type;
}; };
#define TEAM_PORT_HASHBITS 4 #define TEAM_PORT_HASHBITS 4
......
...@@ -1273,6 +1273,7 @@ struct net_device_ops { ...@@ -1273,6 +1273,7 @@ struct net_device_ops {
* @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
*/ */
enum netdev_priv_flags { enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0, IFF_802_1Q_VLAN = 1<<0,
...@@ -1299,6 +1300,7 @@ enum netdev_priv_flags { ...@@ -1299,6 +1300,7 @@ enum netdev_priv_flags {
IFF_NO_QUEUE = 1<<21, IFF_NO_QUEUE = 1<<21,
IFF_OPENVSWITCH = 1<<22, IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23, IFF_L3MDEV_SLAVE = 1<<23,
IFF_TEAM = 1<<24,
}; };
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
...@@ -1325,6 +1327,7 @@ enum netdev_priv_flags { ...@@ -1325,6 +1327,7 @@ enum netdev_priv_flags {
#define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
/** /**
* struct net_device - The DEVICE structure. * struct net_device - The DEVICE structure.
...@@ -2107,6 +2110,24 @@ struct pcpu_sw_netstats { ...@@ -2107,6 +2110,24 @@ struct pcpu_sw_netstats {
#define netdev_alloc_pcpu_stats(type) \ #define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL); __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
NETDEV_LAG_TX_TYPE_BROADCAST,
NETDEV_LAG_TX_TYPE_ROUNDROBIN,
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
NETDEV_LAG_TX_TYPE_HASH,
};
struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type;
};
struct netdev_lag_lower_state_info {
u8 link_up : 1,
tx_enabled : 1;
};
#include <linux/notifier.h> #include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink /* netdevice notifier chain. Please remember to update the rtnetlink
...@@ -2142,6 +2163,7 @@ struct pcpu_sw_netstats { ...@@ -2142,6 +2163,7 @@ struct pcpu_sw_netstats {
#define NETDEV_CHANGEINFODATA 0x0018 #define NETDEV_CHANGEINFODATA 0x0018
#define NETDEV_BONDING_INFO 0x0019 #define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
int register_netdevice_notifier(struct notifier_block *nb); int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb);
...@@ -2160,6 +2182,12 @@ struct netdev_notifier_changeupper_info { ...@@ -2160,6 +2182,12 @@ struct netdev_notifier_changeupper_info {
struct net_device *upper_dev; /* new upper dev */ struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */ bool master; /* is upper dev master */
bool linking; /* is the nofication for link or unlink */ bool linking; /* is the nofication for link or unlink */
void *upper_info; /* upper dev info */
};
struct netdev_notifier_changelowerstate_info {
struct netdev_notifier_info info; /* must be first */
void *lower_state_info; /* is lower dev state */
}; };
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
...@@ -3616,15 +3644,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev); ...@@ -3616,15 +3644,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
int netdev_master_upper_dev_link(struct net_device *dev, int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev);
int netdev_master_upper_dev_link_private(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
void *private); void *upper_priv, void *upper_info);
void netdev_upper_dev_unlink(struct net_device *dev, void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev); struct net_device *upper_dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev, void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev); struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
/* RSS keys are 40 or 52 bytes long */ /* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52 #define NETDEV_RSS_KEY_LEN 52
...@@ -3889,6 +3917,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) ...@@ -3889,6 +3917,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH; return dev->priv_flags & IFF_OPENVSWITCH;
} }
static inline bool netif_is_team_master(struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
}
static inline bool netif_is_team_port(struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM_PORT;
}
static inline bool netif_is_lag_master(struct net_device *dev)
{
return netif_is_bond_master(dev) || netif_is_team_master(dev);
}
static inline bool netif_is_lag_port(struct net_device *dev)
{
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev) static inline void netif_keep_dst(struct net_device *dev)
{ {
......
...@@ -165,7 +165,8 @@ struct slave { ...@@ -165,7 +165,8 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */ inactive:1, /* indicates inactive slave */
should_notify:1; /* indicateds whether the state changed */ should_notify:1, /* indicates whether the state changed */
should_notify_link:1; /* indicates whether the link changed */
u8 duplex; u8 duplex;
u32 original_mtu; u32 original_mtu;
u32 link_failure_count; u32 link_failure_count;
...@@ -246,6 +247,7 @@ struct bonding { ...@@ -246,6 +247,7 @@ struct bonding {
((struct slave *) rtnl_dereference(dev->rx_handler_data)) ((struct slave *) rtnl_dereference(dev->rx_handler_data))
void bond_queue_slave_event(struct slave *slave); void bond_queue_slave_event(struct slave *slave);
void bond_lower_state_changed(struct slave *slave);
struct bond_vlan_tag { struct bond_vlan_tag {
__be16 vlan_proto; __be16 vlan_proto;
...@@ -327,6 +329,7 @@ static inline void bond_set_active_slave(struct slave *slave) ...@@ -327,6 +329,7 @@ static inline void bond_set_active_slave(struct slave *slave)
if (slave->backup) { if (slave->backup) {
slave->backup = 0; slave->backup = 0;
bond_queue_slave_event(slave); bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
} }
} }
...@@ -336,6 +339,7 @@ static inline void bond_set_backup_slave(struct slave *slave) ...@@ -336,6 +339,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
if (!slave->backup) { if (!slave->backup) {
slave->backup = 1; slave->backup = 1;
bond_queue_slave_event(slave); bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
} }
} }
...@@ -348,6 +352,7 @@ static inline void bond_set_slave_state(struct slave *slave, ...@@ -348,6 +352,7 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state; slave->backup = slave_state;
if (notify) { if (notify) {
bond_lower_state_changed(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
bond_queue_slave_event(slave); bond_queue_slave_event(slave);
slave->should_notify = 0; slave->should_notify = 0;
...@@ -379,6 +384,7 @@ static inline void bond_slave_state_notify(struct bonding *bond) ...@@ -379,6 +384,7 @@ static inline void bond_slave_state_notify(struct bonding *bond)
bond_for_each_slave(bond, tmp, iter) { bond_for_each_slave(bond, tmp, iter) {
if (tmp->should_notify) { if (tmp->should_notify) {
bond_lower_state_changed(tmp);
rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC); rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
tmp->should_notify = 0; tmp->should_notify = 0;
} }
...@@ -504,10 +510,37 @@ static inline bool bond_is_slave_inactive(struct slave *slave) ...@@ -504,10 +510,37 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive; return slave->inactive;
} }
static inline void bond_set_slave_link_state(struct slave *slave, int state) static inline void bond_set_slave_link_state(struct slave *slave, int state,
bool notify)
{ {
if (slave->link == state)
return;
slave->link = state; slave->link = state;
if (notify) {
bond_queue_slave_event(slave); bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
slave->should_notify_link = 0;
} else {
if (slave->should_notify_link)
slave->should_notify_link = 0;
else
slave->should_notify_link = 1;
}
}
static inline void bond_slave_link_notify(struct bonding *bond)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave(bond, tmp, iter) {
if (tmp->should_notify_link) {
bond_queue_slave_event(tmp);
bond_lower_state_changed(tmp);
tmp->should_notify_link = 0;
}
}
} }
static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
......
...@@ -18,6 +18,7 @@ static struct notifier_err_inject netdev_notifier_err_inject = { ...@@ -18,6 +18,7 @@ static struct notifier_err_inject netdev_notifier_err_inject = {
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) }, { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) }, { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) }, { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
{} {}
} }
}; };
......
...@@ -464,7 +464,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, ...@@ -464,7 +464,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface; hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface); bat_priv = netdev_priv(hard_iface->soft_iface);
ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface); ret = netdev_master_upper_dev_link(hard_iface->net_dev,
soft_iface, NULL, NULL);
if (ret) if (ret)
goto err_dev; goto err_dev;
......
...@@ -493,7 +493,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) ...@@ -493,7 +493,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev->priv_flags |= IFF_BRIDGE_PORT; dev->priv_flags |= IFF_BRIDGE_PORT;
err = netdev_master_upper_dev_link(dev, br->dev); err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL);
if (err) if (err)
goto err5; goto err5;
......
...@@ -5421,7 +5421,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, ...@@ -5421,7 +5421,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
static int __netdev_upper_dev_link(struct net_device *dev, static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master, struct net_device *upper_dev, bool master,
void *private) void *upper_priv, void *upper_info)
{ {
struct netdev_notifier_changeupper_info changeupper_info; struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_adjacent *i, *j, *to_i, *to_j; struct netdev_adjacent *i, *j, *to_i, *to_j;
...@@ -5445,6 +5445,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, ...@@ -5445,6 +5445,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
changeupper_info.upper_dev = upper_dev; changeupper_info.upper_dev = upper_dev;
changeupper_info.master = master; changeupper_info.master = master;
changeupper_info.linking = true; changeupper_info.linking = true;
changeupper_info.upper_info = upper_info;
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
&changeupper_info.info); &changeupper_info.info);
...@@ -5452,7 +5453,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, ...@@ -5452,7 +5453,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (ret) if (ret)
return ret; return ret;
ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private, ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
master); master);
if (ret) if (ret)
return ret; return ret;
...@@ -5490,8 +5491,12 @@ static int __netdev_upper_dev_link(struct net_device *dev, ...@@ -5490,8 +5491,12 @@ static int __netdev_upper_dev_link(struct net_device *dev,
goto rollback_lower_mesh; goto rollback_lower_mesh;
} }
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
&changeupper_info.info); &changeupper_info.info);
ret = notifier_to_errno(ret);
if (ret)
goto rollback_lower_mesh;
return 0; return 0;
rollback_lower_mesh: rollback_lower_mesh:
...@@ -5545,7 +5550,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, ...@@ -5545,7 +5550,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
int netdev_upper_dev_link(struct net_device *dev, int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
return __netdev_upper_dev_link(dev, upper_dev, false, NULL); return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
} }
EXPORT_SYMBOL(netdev_upper_dev_link); EXPORT_SYMBOL(netdev_upper_dev_link);
...@@ -5553,6 +5558,8 @@ EXPORT_SYMBOL(netdev_upper_dev_link); ...@@ -5553,6 +5558,8 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
* netdev_master_upper_dev_link - Add a master link to the upper device * netdev_master_upper_dev_link - Add a master link to the upper device
* @dev: device * @dev: device
* @upper_dev: new upper device * @upper_dev: new upper device
* @upper_priv: upper device private
* @upper_info: upper info to be passed down via notifier
* *
* Adds a link to device which is upper to this one. In this case, only * Adds a link to device which is upper to this one. In this case, only
* one master upper device can be linked, although other non-master devices * one master upper device can be linked, although other non-master devices
...@@ -5561,19 +5568,13 @@ EXPORT_SYMBOL(netdev_upper_dev_link); ...@@ -5561,19 +5568,13 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
* counts are adjusted and the function returns zero. * counts are adjusted and the function returns zero.
*/ */
int netdev_master_upper_dev_link(struct net_device *dev, int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev)
{
return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
}
EXPORT_SYMBOL(netdev_master_upper_dev_link);
int netdev_master_upper_dev_link_private(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
void *private) void *upper_priv, void *upper_info)
{ {
return __netdev_upper_dev_link(dev, upper_dev, true, private); return __netdev_upper_dev_link(dev, upper_dev, true,
upper_priv, upper_info);
} }
EXPORT_SYMBOL(netdev_master_upper_dev_link_private); EXPORT_SYMBOL(netdev_master_upper_dev_link);
/** /**
* netdev_upper_dev_unlink - Removes a link to upper device * netdev_upper_dev_unlink - Removes a link to upper device
...@@ -5755,6 +5756,26 @@ int dev_get_nest_level(struct net_device *dev, ...@@ -5755,6 +5756,26 @@ int dev_get_nest_level(struct net_device *dev,
} }
EXPORT_SYMBOL(dev_get_nest_level); EXPORT_SYMBOL(dev_get_nest_level);
/**
* netdev_lower_change - Dispatch event about lower device state change
* @lower_dev: device
* @lower_state_info: state to dispatch
*
* Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
* The caller must hold the RTNL lock.
*/
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info)
{
struct netdev_notifier_changelowerstate_info changelowerstate_info;
ASSERT_RTNL();
changelowerstate_info.lower_state_info = lower_state_info;
call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
&changelowerstate_info.info);
}
EXPORT_SYMBOL(netdev_lower_state_changed);
static void dev_change_rx_flags(struct net_device *dev, int flags) static void dev_change_rx_flags(struct net_device *dev, int flags)
{ {
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
......
...@@ -105,7 +105,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name) ...@@ -105,7 +105,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name)
rtnl_lock(); rtnl_lock();
err = netdev_master_upper_dev_link(vport->dev, err = netdev_master_upper_dev_link(vport->dev,
get_dpdev(vport->dp)); get_dpdev(vport->dp), NULL, NULL);
if (err) if (err)
goto error_unlock; goto error_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment