Commit 71e8727e authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-vrf-offload-prep'

Jiri Pirko says:

====================
mlxsw: Preparations for VRF offload

Ido says:

This patchset aims to prepare the mlxsw driver for VRF offload. The
follow-up patchsets that introduce VRF support can be found here:
https://github.com/idosch/linux/tree/idosch-next

The first four patches are mainly concerned with the netdevice
notification block. There are no functional changes, but merely
restructuring to more easily integrate VRF enslavement.

Patches 5-10 remove various assumptions throughout the code about a
single virtual router (VR) and also restructure the internal data
structures to more accurately represent the device's operation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1015d743 b5d90e6d
...@@ -4141,7 +4141,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, ...@@ -4141,7 +4141,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
enum mlxsw_reg_ritr_if_type type, enum mlxsw_reg_ritr_if_type type,
u16 rif, u16 mtu, const char *mac) u16 rif, u16 vr_id, u16 mtu,
const char *mac)
{ {
bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
...@@ -4153,6 +4154,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, ...@@ -4153,6 +4154,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu); mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
} }
......
...@@ -3352,7 +3352,7 @@ static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev ...@@ -3352,7 +3352,7 @@ static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev
return mlxsw_sp_port; return mlxsw_sp_port;
} }
static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
{ {
struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_port *mlxsw_sp_port;
...@@ -3391,546 +3391,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -3391,546 +3391,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev); dev_put(mlxsw_sp_port->dev);
} }
static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
unsigned long event)
{
switch (event) {
case NETDEV_UP:
if (!r)
return true;
r->ref_count++;
return false;
case NETDEV_DOWN:
if (r && --r->ref_count == 0)
return true;
/* It is possible we already removed the RIF ourselves
* if it was assigned to a netdev that is now a bridge
* or LAG slave.
*/
return false;
}
return false;
}
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
if (!mlxsw_sp->rifs[i])
return i;
return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
bool *p_lagged, u16 *p_system_port)
{
u8 local_port = mlxsw_sp_vport->local_port;
*p_lagged = mlxsw_sp_vport->lagged;
*p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
}
static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev, u16 rif,
bool create)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
bool lagged = mlxsw_sp_vport->lagged;
char ritr_pl[MLXSW_REG_RITR_LEN];
u16 system_port;
mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
l3_dev->mtu, l3_dev->dev_addr);
mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
static struct mlxsw_sp_fid *
mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
{
struct mlxsw_sp_fid *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
f->leave = mlxsw_sp_vport_rif_sp_leave;
f->ref_count = 0;
f->dev = l3_dev;
f->fid = fid;
return f;
}
static struct mlxsw_sp_rif *
mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_rif *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
INIT_LIST_HEAD(&r->nexthop_list);
INIT_LIST_HEAD(&r->neigh_list);
ether_addr_copy(r->addr, l3_dev->dev_addr);
r->mtu = l3_dev->mtu;
r->ref_count = 1;
r->dev = l3_dev;
r->rif = rif;
r->f = f;
return r;
}
static struct mlxsw_sp_rif *
mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_fid *f;
struct mlxsw_sp_rif *r;
u16 fid, rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
if (err)
return ERR_PTR(err);
fid = mlxsw_sp_rif_sp_to_fid(rif);
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
if (err)
goto err_rif_fdb_op;
f = mlxsw_sp_rfid_alloc(fid, l3_dev);
if (!f) {
err = -ENOMEM;
goto err_rfid_alloc;
}
r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
return r;
err_rif_alloc:
kfree(f);
err_rfid_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
return ERR_PTR(err);
}
static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
struct mlxsw_sp_rif *r)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 fid = f->fid;
u16 rif = r->rif;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
kfree(f);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
}
static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_rif *r;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!r) {
r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
if (IS_ERR(r))
return PTR_ERR(r);
}
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
r->f->ref_count++;
netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
return 0;
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
if (--f->ref_count == 0)
mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
}
static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
case NETDEV_DOWN:
mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event)
{
if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
struct net_device *lag_dev,
unsigned long event, u16 vid)
{
struct net_device *port_dev;
struct list_head *iter;
int err;
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
event, vid);
if (err)
return err;
}
}
return 0;
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
unsigned long event)
{
if (netif_is_bridge_port(lag_dev))
return 0;
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev)
{
u16 fid;
if (is_vlan_dev(l3_dev))
fid = vlan_dev_vlan_id(l3_dev);
else if (mlxsw_sp->master_bridge.dev == l3_dev)
fid = 1;
else
return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
return mlxsw_sp_fid_find(mlxsw_sp, fid);
}
static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
}
static u16 mlxsw_sp_flood_table_index_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
}
static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
bool set)
{
enum mlxsw_flood_table_type table_type;
char *sftr_pl;
u16 index;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
table_type = mlxsw_sp_flood_table_type_get(fid);
index = mlxsw_sp_flood_table_index_get(fid);
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
1, MLXSW_PORT_ROUTER_PORT, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
return err;
}
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
return MLXSW_REG_RITR_FID_IF;
else
return MLXSW_REG_RITR_VLAN_IF;
}
static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
u16 fid, u16 rif,
bool create)
{
enum mlxsw_reg_ritr_if_type rif_type;
char ritr_pl[MLXSW_REG_RITR_LEN];
rif_type = mlxsw_sp_rif_type_get(fid);
mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
l3_dev->dev_addr);
mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_rif *r;
u16 rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
if (err)
return err;
err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
if (err)
goto err_rif_bridge_op;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err)
goto err_rif_fdb_op;
r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
netdev_dbg(l3_dev, "RIF=%d created\n", rif);
return 0;
err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
err_rif_bridge_op:
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
return err;
}
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r)
{
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 rif = r->rif;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
}
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_fid *f;
/* FID can either be an actual FID if the L3 device is the
* VLAN-aware bridge or a VLAN device on top. Otherwise, the
* L3 device is a VLAN-unaware bridge and we get a vFID.
*/
f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
if (WARN_ON(!f))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
case NETDEV_DOWN:
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_bridge_master(real_dev) &&
mlxsw_sp->master_bridge.dev == real_dev)
return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
event);
return 0;
}
static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
goto out;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(r, event))
goto out;
if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_inetaddr_port_event(dev, event);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
else if (is_vlan_dev(dev))
err = mlxsw_sp_inetaddr_vlan_event(dev, event);
out:
return notifier_from_errno(err);
}
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
const char *mac, int mtu)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
if (err)
return err;
mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!r)
return 0;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
if (err)
return err;
err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
if (err)
goto err_rif_edit;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
if (err)
goto err_rif_fdb_op;
ether_addr_copy(r->addr, dev->dev_addr);
r->mtu = dev->mtu;
netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
return 0;
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
err_rif_edit:
mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
return err;
}
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid) u16 fid)
{ {
...@@ -4221,7 +3681,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, ...@@ -4221,7 +3681,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static void static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
u16 lag_id) struct net_device *lag_dev, u16 lag_id)
{ {
struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f; struct mlxsw_sp_fid *f;
...@@ -4239,6 +3699,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -4239,6 +3699,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->lag_id = lag_id; mlxsw_sp_vport->lag_id = lag_id;
mlxsw_sp_vport->lagged = 1; mlxsw_sp_vport->lagged = 1;
mlxsw_sp_vport->dev = lag_dev;
} }
static void static void
...@@ -4255,6 +3716,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -4255,6 +3716,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
if (f) if (f)
f->leave(mlxsw_sp_vport); f->leave(mlxsw_sp_vport);
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->lagged = 0; mlxsw_sp_vport->lagged = 0;
} }
...@@ -4294,7 +3756,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -4294,7 +3756,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1; mlxsw_sp_port->lagged = 1;
lag->ref_count++; lag->ref_count++;
mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
return 0; return 0;
...@@ -4565,33 +4027,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, ...@@ -4565,33 +4027,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info; struct netdev_notifier_changeupper_info *info;
struct net_device *upper_dev; struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
int err; int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(br_dev); mlxsw_sp = mlxsw_sp_lower_get(br_dev);
if (!mlxsw_sp) if (!mlxsw_sp)
return 0; return 0;
if (br_dev != mlxsw_sp->master_bridge.dev)
return 0;
info = ptr; info = ptr;
switch (event) { switch (event) {
case NETDEV_CHANGEUPPER: case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev; upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev)) if (!is_vlan_dev(upper_dev))
break; return -EINVAL;
if (info->linking) { if (is_vlan_dev(upper_dev) &&
err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, br_dev != mlxsw_sp->master_bridge.dev)
upper_dev); return -EINVAL;
if (err) break;
return err; case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (is_vlan_dev(upper_dev)) {
if (info->linking)
err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
upper_dev);
else
mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
upper_dev);
} else { } else {
mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); err = -EINVAL;
WARN_ON(1);
} }
break; break;
} }
return 0; return err;
} }
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
...@@ -4811,6 +4280,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, ...@@ -4811,6 +4280,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
int err = 0; int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport)
return 0;
switch (event) { switch (event) {
case NETDEV_PRECHANGEUPPER: case NETDEV_PRECHANGEUPPER:
...@@ -4828,16 +4299,17 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, ...@@ -4828,16 +4299,17 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
break; break;
case NETDEV_CHANGEUPPER: case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev; upper_dev = info->upper_dev;
if (info->linking) { if (netif_is_bridge_master(upper_dev)) {
if (WARN_ON(!mlxsw_sp_vport)) if (info->linking)
return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev);
upper_dev); else
mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
} else { } else {
if (!mlxsw_sp_vport) err = -EINVAL;
return 0; WARN_ON(1);
mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
} }
break;
} }
return err; return err;
......
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360 #define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000 #define MLXSW_SP_MID_MAX 7000
...@@ -92,6 +91,7 @@ static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) ...@@ -92,6 +91,7 @@ static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
} }
struct mlxsw_sp_port; struct mlxsw_sp_port;
struct mlxsw_sp_rif;
struct mlxsw_sp_upper { struct mlxsw_sp_upper {
struct net_device *dev; struct net_device *dev;
...@@ -107,17 +107,6 @@ struct mlxsw_sp_fid { ...@@ -107,17 +107,6 @@ struct mlxsw_sp_fid {
u16 fid; u16 fid;
}; };
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
struct net_device *dev;
unsigned int ref_count;
struct mlxsw_sp_fid *f;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif;
};
struct mlxsw_sp_mid { struct mlxsw_sp_mid {
struct list_head list; struct list_head list;
unsigned char addr[ETH_ALEN]; unsigned char addr[ETH_ALEN];
...@@ -141,16 +130,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid) ...@@ -141,16 +130,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
} }
static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
{
return fid >= MLXSW_SP_RFID_BASE;
}
static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
{
return MLXSW_SP_RFID_BASE + rif;
}
struct mlxsw_sp_sb_pr { struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode; enum mlxsw_reg_sbpr_mode mode;
u32 size; u32 size;
...@@ -207,11 +186,9 @@ struct mlxsw_sp_fib; ...@@ -207,11 +186,9 @@ struct mlxsw_sp_fib;
struct mlxsw_sp_vr { struct mlxsw_sp_vr {
u16 id; /* virtual router ID */ u16 id; /* virtual router ID */
bool used;
enum mlxsw_sp_l3proto proto;
u32 tb_id; /* kernel fib table id */ u32 tb_id; /* kernel fib table id */
struct mlxsw_sp_lpm_tree *lpm_tree; unsigned int rif_count;
struct mlxsw_sp_fib *fib; struct mlxsw_sp_fib *fib4;
}; };
enum mlxsw_sp_span_type { enum mlxsw_sp_span_type {
...@@ -386,6 +363,7 @@ struct mlxsw_sp_port { ...@@ -386,6 +363,7 @@ struct mlxsw_sp_port {
}; };
bool mlxsw_sp_port_dev_check(const struct net_device *dev); bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
...@@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, ...@@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
return NULL; return NULL;
} }
static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
return NULL;
}
enum mlxsw_sp_flood_table { enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BC, MLXSW_SP_FLOOD_TABLE_BC,
...@@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, ...@@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding); bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight); bool dwrr, u8 dwrr_weight);
...@@ -608,8 +571,11 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); ...@@ -608,8 +571,11 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr); unsigned long event, void *ptr);
void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
struct mlxsw_sp_rif *r); int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
......
...@@ -50,6 +50,21 @@ ...@@ -50,6 +50,21 @@
#include "core.h" #include "core.h"
#include "reg.h" #include "reg.h"
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
struct net_device *dev;
struct mlxsw_sp_fid *f;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif;
u16 vr_id;
};
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
...@@ -88,12 +103,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, ...@@ -88,12 +103,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
} }
static void
mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
{
memset(prefix_usage, 0, sizeof(*prefix_usage));
}
static void static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len) unsigned char prefix_len)
...@@ -125,7 +134,7 @@ struct mlxsw_sp_fib_node { ...@@ -125,7 +134,7 @@ struct mlxsw_sp_fib_node {
struct list_head entry_list; struct list_head entry_list;
struct list_head list; struct list_head list;
struct rhash_head ht_node; struct rhash_head ht_node;
struct mlxsw_sp_vr *vr; struct mlxsw_sp_fib *fib;
struct mlxsw_sp_fib_key key; struct mlxsw_sp_fib_key key;
}; };
...@@ -149,13 +158,17 @@ struct mlxsw_sp_fib_entry { ...@@ -149,13 +158,17 @@ struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib { struct mlxsw_sp_fib {
struct rhashtable ht; struct rhashtable ht;
struct list_head node_list; struct list_head node_list;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage; struct mlxsw_sp_prefix_usage prefix_usage;
enum mlxsw_sp_l3proto proto;
}; };
static const struct rhashtable_params mlxsw_sp_fib_ht_params; static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_fib *fib; struct mlxsw_sp_fib *fib;
int err; int err;
...@@ -167,6 +180,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) ...@@ -167,6 +180,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
if (err) if (err)
goto err_rhashtable_init; goto err_rhashtable_init;
INIT_LIST_HEAD(&fib->node_list); INIT_LIST_HEAD(&fib->node_list);
fib->proto = proto;
fib->vr = vr;
return fib; return fib;
err_rhashtable_init: err_rhashtable_init:
...@@ -177,24 +192,21 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) ...@@ -177,24 +192,21 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{ {
WARN_ON(!list_empty(&fib->node_list)); WARN_ON(!list_empty(&fib->node_list));
WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht); rhashtable_destroy(&fib->ht);
kfree(fib); kfree(fib);
} }
static struct mlxsw_sp_lpm_tree * static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
{ {
static struct mlxsw_sp_lpm_tree *lpm_tree; static struct mlxsw_sp_lpm_tree *lpm_tree;
int i; int i;
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i]; lpm_tree = &mlxsw_sp->router.lpm_trees[i];
if (lpm_tree->ref_count == 0) { if (lpm_tree->ref_count == 0)
if (one_reserved) return lpm_tree;
one_reserved = false;
else
return lpm_tree;
}
} }
return NULL; return NULL;
} }
...@@ -248,12 +260,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, ...@@ -248,12 +260,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree * static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage, struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto, bool one_reserved) enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_lpm_tree *lpm_tree;
int err; int err;
lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
if (!lpm_tree) if (!lpm_tree)
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
lpm_tree->proto = proto; lpm_tree->proto = proto;
...@@ -283,7 +295,7 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, ...@@ -283,7 +295,7 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree * static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage, struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto, bool one_reserved) enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_lpm_tree *lpm_tree;
int i; int i;
...@@ -297,7 +309,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, ...@@ -297,7 +309,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
goto inc_ref_count; goto inc_ref_count;
} }
lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
proto, one_reserved); proto);
if (IS_ERR(lpm_tree)) if (IS_ERR(lpm_tree))
return lpm_tree; return lpm_tree;
...@@ -325,6 +337,11 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) ...@@ -325,6 +337,11 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
} }
} }
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
return !!vr->fib4;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{ {
struct mlxsw_sp_vr *vr; struct mlxsw_sp_vr *vr;
...@@ -332,31 +349,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) ...@@ -332,31 +349,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i]; vr = &mlxsw_sp->router.vrs[i];
if (!vr->used) if (!mlxsw_sp_vr_is_used(vr))
return vr; return vr;
} }
return NULL; return NULL;
} }
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr) const struct mlxsw_sp_fib *fib)
{ {
char raltb_pl[MLXSW_REG_RALTB_LEN]; char raltb_pl[MLXSW_REG_RALTB_LEN];
mlxsw_reg_raltb_pack(raltb_pl, vr->id, mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
(enum mlxsw_reg_ralxx_protocol) vr->proto, (enum mlxsw_reg_ralxx_protocol) fib->proto,
vr->lpm_tree->id); fib->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
} }
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr) const struct mlxsw_sp_fib *fib)
{ {
char raltb_pl[MLXSW_REG_RALTB_LEN]; char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */ /* Bind to tree 0 which is default */
mlxsw_reg_raltb_pack(raltb_pl, vr->id, mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
(enum mlxsw_reg_ralxx_protocol) vr->proto, 0); (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
} }
...@@ -369,8 +386,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id) ...@@ -369,8 +386,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
} }
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id, u32 tb_id)
enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_vr *vr; struct mlxsw_sp_vr *vr;
int i; int i;
...@@ -379,69 +395,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, ...@@ -379,69 +395,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i]; vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id) if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr; return vr;
} }
return NULL; return NULL;
} }
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return vr->fib4;
case MLXSW_SP_L3_PROTO_IPV6:
BUG_ON(1);
}
return NULL;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
unsigned char prefix_len, u32 tb_id)
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_vr *vr; struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp); vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr) if (!vr)
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
vr->fib = mlxsw_sp_fib_create(); vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib)) if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib); return ERR_CAST(vr->fib4);
vr->proto = proto;
vr->tb_id = tb_id; vr->tb_id = tb_id;
mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
proto, true);
if (IS_ERR(lpm_tree)) {
err = PTR_ERR(lpm_tree);
goto err_tree_get;
}
vr->lpm_tree = lpm_tree;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
if (err)
goto err_tree_bind;
vr->used = true;
return vr; return vr;
err_tree_bind:
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
err_tree_get:
mlxsw_sp_fib_destroy(vr->fib);
return ERR_PTR(err);
} }
static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
struct mlxsw_sp_vr *vr)
{ {
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); mlxsw_sp_fib_destroy(vr->fib4);
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); vr->fib4 = NULL;
mlxsw_sp_fib_destroy(vr->fib);
vr->used = false;
} }
static int static int
mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
struct mlxsw_sp_prefix_usage *req_prefix_usage) struct mlxsw_sp_prefix_usage *req_prefix_usage)
{ {
struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
struct mlxsw_sp_lpm_tree *new_tree; struct mlxsw_sp_lpm_tree *new_tree;
int err; int err;
...@@ -449,7 +446,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, ...@@ -449,7 +446,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0; return 0;
new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
vr->proto, false); fib->proto);
if (IS_ERR(new_tree)) { if (IS_ERR(new_tree)) {
/* We failed to get a tree according to the required /* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good * prefix usage. However, the current tree might be still good
...@@ -463,8 +460,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, ...@@ -463,8 +460,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
} }
/* Prevent packet loss by overwriting existing binding */ /* Prevent packet loss by overwriting existing binding */
vr->lpm_tree = new_tree; fib->lpm_tree = new_tree;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
if (err) if (err)
goto err_tree_bind; goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
...@@ -472,53 +469,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, ...@@ -472,53 +469,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0; return 0;
err_tree_bind: err_tree_bind:
vr->lpm_tree = lpm_tree; fib->lpm_tree = lpm_tree;
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
return err; return err;
} }
static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
unsigned char prefix_len,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_vr *vr; struct mlxsw_sp_vr *vr;
int err;
tb_id = mlxsw_sp_fix_tb_id(tb_id); tb_id = mlxsw_sp_fix_tb_id(tb_id);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
if (!vr) { if (!vr)
vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
if (IS_ERR(vr))
return vr;
} else {
struct mlxsw_sp_prefix_usage req_prefix_usage;
mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
&vr->fib->prefix_usage);
mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
/* Need to replace LPM tree in case new prefix is required. */
err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
&req_prefix_usage);
if (err)
return ERR_PTR(err);
}
return vr; return vr;
} }
static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{ {
/* Destroy virtual router entity in case the associated FIB is empty if (!vr->rif_count && list_empty(&vr->fib4->node_list))
* and allow it to be used for other tables in future. Otherwise, mlxsw_sp_vr_destroy(vr);
* check if some prefix usage did not disappear and change tree if
* that is the case. Note that in case new, smaller tree cannot be
* allocated, the original one will be kept being used.
*/
if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
mlxsw_sp_vr_destroy(mlxsw_sp, vr);
else
mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
&vr->fib->prefix_usage);
} }
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
...@@ -1171,7 +1141,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, ...@@ -1171,7 +1141,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
} }
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr, const struct mlxsw_sp_fib *fib,
u32 adj_index, u16 ecmp_size, u32 adj_index, u16 ecmp_size,
u32 new_adj_index, u32 new_adj_index,
u16 new_ecmp_size) u16 new_ecmp_size)
...@@ -1179,8 +1149,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, ...@@ -1179,8 +1149,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN]; char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl, mlxsw_reg_raleu_pack(raleu_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id, (enum mlxsw_reg_ralxx_protocol) fib->proto,
adj_index, ecmp_size, new_adj_index, fib->vr->id, adj_index, ecmp_size, new_adj_index,
new_ecmp_size); new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
} }
...@@ -1190,14 +1160,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, ...@@ -1190,14 +1160,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
u32 old_adj_index, u16 old_ecmp_size) u32 old_adj_index, u16 old_ecmp_size)
{ {
struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr = NULL; struct mlxsw_sp_fib *fib = NULL;
int err; int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
if (vr == fib_entry->fib_node->vr) if (fib == fib_entry->fib_node->fib)
continue; continue;
vr = fib_entry->fib_node->vr; fib = fib_entry->fib_node->fib;
err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
old_adj_index, old_adj_index,
old_ecmp_size, old_ecmp_size,
nh_grp->adj_index, nh_grp->adj_index,
...@@ -1514,6 +1484,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, ...@@ -1514,6 +1484,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
return err; return err;
if (!dev)
return 0;
in_dev = __in_dev_get_rtnl(dev); in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN) fib_nh->nh_flags & RTNH_F_LINKDOWN)
...@@ -1699,7 +1672,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) ...@@ -1699,7 +1672,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
{ {
fib_entry->offloaded = true; fib_entry->offloaded = true;
switch (fib_entry->fib_node->vr->proto) { switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4: case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_inc(fib_entry->nh_group->key.fi); fib_info_offload_inc(fib_entry->nh_group->key.fi);
break; break;
...@@ -1711,7 +1684,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) ...@@ -1711,7 +1684,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
static void static void
mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
{ {
switch (fib_entry->fib_node->vr->proto) { switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4: case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_dec(fib_entry->nh_group->key.fi); fib_info_offload_dec(fib_entry->nh_group->key.fi);
break; break;
...@@ -1751,8 +1724,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, ...@@ -1751,8 +1724,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op) enum mlxsw_reg_ralue_op op)
{ {
char ralue_pl[MLXSW_REG_RALUE_LEN]; char ralue_pl[MLXSW_REG_RALUE_LEN];
struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
enum mlxsw_reg_ralue_trap_action trap_action; enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0; u16 trap_id = 0;
u32 adjacency_index = 0; u32 adjacency_index = 0;
...@@ -1772,8 +1745,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, ...@@ -1772,8 +1745,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
} }
mlxsw_reg_ralue_pack4(ralue_pl, mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op, (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
vr->id, fib_entry->fib_node->key.prefix_len, fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip); *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size); adjacency_index, ecmp_size);
...@@ -1785,10 +1758,10 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, ...@@ -1785,10 +1758,10 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op) enum mlxsw_reg_ralue_op op)
{ {
struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
enum mlxsw_reg_ralue_trap_action trap_action; enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN]; char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
u16 trap_id = 0; u16 trap_id = 0;
u16 rif = 0; u16 rif = 0;
...@@ -1801,8 +1774,8 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, ...@@ -1801,8 +1774,8 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
} }
mlxsw_reg_ralue_pack4(ralue_pl, mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op, (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
vr->id, fib_entry->fib_node->key.prefix_len, fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip); *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
...@@ -1812,13 +1785,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, ...@@ -1812,13 +1785,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry, struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op) enum mlxsw_reg_ralue_op op)
{ {
struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
char ralue_pl[MLXSW_REG_RALUE_LEN]; char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
mlxsw_reg_ralue_pack4(ralue_pl, mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op, (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
vr->id, fib_entry->fib_node->key.prefix_len, fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip); *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
...@@ -1845,7 +1818,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, ...@@ -1845,7 +1818,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{ {
int err = -EINVAL; int err = -EINVAL;
switch (fib_entry->fib_node->vr->proto) { switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4: case MLXSW_SP_L3_PROTO_IPV4:
err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
break; break;
...@@ -1877,17 +1850,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, ...@@ -1877,17 +1850,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{ {
struct fib_info *fi = fen_info->fi; struct fib_info *fi = fen_info->fi;
if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { switch (fen_info->type) {
case RTN_BROADCAST: /* fall through */
case RTN_LOCAL:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0; return 0;
} case RTN_UNREACHABLE: /* fall through */
if (fen_info->type != RTN_UNICAST) case RTN_BLACKHOLE: /* fall through */
return -EINVAL; case RTN_PROHIBIT:
if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) /* Packets hitting these routes need to be trapped, but
* can do so with a lower priority than packets directed
* at the host, so use action type local instead of trap.
*/
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else return 0;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; case RTN_UNICAST:
return 0; if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
return 0;
default:
return -EINVAL;
}
} }
static struct mlxsw_sp_fib_entry * static struct mlxsw_sp_fib_entry *
...@@ -1996,7 +1981,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, ...@@ -1996,7 +1981,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
} }
static struct mlxsw_sp_fib_node * static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len) size_t addr_len, unsigned char prefix_len)
{ {
struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib_node *fib_node;
...@@ -2006,18 +1991,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, ...@@ -2006,18 +1991,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
return NULL; return NULL;
INIT_LIST_HEAD(&fib_node->entry_list); INIT_LIST_HEAD(&fib_node->entry_list);
list_add(&fib_node->list, &vr->fib->node_list); list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len); memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len; fib_node->key.prefix_len = prefix_len;
mlxsw_sp_fib_node_insert(vr->fib, fib_node);
fib_node->vr = vr;
return fib_node; return fib_node;
} }
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{ {
mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
list_del(&fib_node->list); list_del(&fib_node->list);
WARN_ON(!list_empty(&fib_node->entry_list)); WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node); kfree(fib_node);
...@@ -2034,7 +2016,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, ...@@ -2034,7 +2016,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
{ {
unsigned char prefix_len = fib_node->key.prefix_len; unsigned char prefix_len = fib_node->key.prefix_len;
struct mlxsw_sp_fib *fib = fib_node->vr->fib; struct mlxsw_sp_fib *fib = fib_node->fib;
if (fib->prefix_ref_count[prefix_len]++ == 0) if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
...@@ -2043,32 +2025,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) ...@@ -2043,32 +2025,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
{ {
unsigned char prefix_len = fib_node->key.prefix_len; unsigned char prefix_len = fib_node->key.prefix_len;
struct mlxsw_sp_fib *fib = fib_node->vr->fib; struct mlxsw_sp_fib *fib = fib_node->fib;
if (--fib->prefix_ref_count[prefix_len] == 0) if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
} }
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
struct mlxsw_sp_fib *fib)
{
struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
err = mlxsw_sp_fib_node_insert(fib, fib_node);
if (err)
return err;
fib_node->fib = fib;
mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
&req_prefix_usage);
if (err)
goto err_tree_check;
} else {
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
return PTR_ERR(lpm_tree);
fib->lpm_tree = lpm_tree;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
if (err)
goto err_tree_bind;
}
mlxsw_sp_fib_node_prefix_inc(fib_node);
return 0;
err_tree_bind:
fib->lpm_tree = NULL;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_tree_check:
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
return err;
}
static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
struct mlxsw_sp_fib *fib = fib_node->fib;
mlxsw_sp_fib_node_prefix_dec(fib_node);
if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
fib->lpm_tree = NULL;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
} else {
mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
}
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
}
static struct mlxsw_sp_fib_node * static struct mlxsw_sp_fib_node *
mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info) const struct fib_entry_notifier_info *fen_info)
{ {
struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr; struct mlxsw_sp_vr *vr;
int err; int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr)) if (IS_ERR(vr))
return ERR_CAST(vr); return ERR_CAST(vr);
fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
sizeof(fen_info->dst), sizeof(fen_info->dst),
fen_info->dst_len); fen_info->dst_len);
if (fib_node) if (fib_node)
return fib_node; return fib_node;
fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
sizeof(fen_info->dst), sizeof(fen_info->dst),
fen_info->dst_len); fen_info->dst_len);
if (!fib_node) { if (!fib_node) {
...@@ -2076,22 +2124,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, ...@@ -2076,22 +2124,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
goto err_fib_node_create; goto err_fib_node_create;
} }
err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
if (err)
goto err_fib_node_init;
return fib_node; return fib_node;
err_fib_node_init:
mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create: err_fib_node_create:
mlxsw_sp_vr_put(mlxsw_sp, vr); mlxsw_sp_vr_put(vr);
return ERR_PTR(err); return ERR_PTR(err);
} }
static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node) struct mlxsw_sp_fib_node *fib_node)
{ {
struct mlxsw_sp_vr *vr = fib_node->vr; struct mlxsw_sp_vr *vr = fib_node->fib->vr;
if (!list_empty(&fib_node->entry_list)) if (!list_empty(&fib_node->entry_list))
return; return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node); mlxsw_sp_fib_node_destroy(fib_node);
mlxsw_sp_vr_put(mlxsw_sp, vr); mlxsw_sp_vr_put(vr);
} }
static struct mlxsw_sp_fib_entry * static struct mlxsw_sp_fib_entry *
...@@ -2236,8 +2291,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, ...@@ -2236,8 +2291,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_fib4_node_entry_add; goto err_fib4_node_entry_add;
mlxsw_sp_fib_node_prefix_inc(fib_node);
return 0; return 0;
err_fib4_node_entry_add: err_fib4_node_entry_add:
...@@ -2251,7 +2304,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, ...@@ -2251,7 +2304,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
mlxsw_sp_fib_node_prefix_dec(fib_node);
mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
mlxsw_sp_fib4_node_list_remove(fib_entry); mlxsw_sp_fib4_node_list_remove(fib_entry);
} }
...@@ -2340,9 +2392,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) ...@@ -2340,9 +2392,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{ {
char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralta_pl[MLXSW_REG_RALTA_LEN];
char ralst_pl[MLXSW_REG_RALST_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN];
char raltb_pl[MLXSW_REG_RALTB_LEN]; int i, err;
char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_SP_LPM_TREE_MIN); MLXSW_SP_LPM_TREE_MIN);
...@@ -2355,16 +2405,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) ...@@ -2355,16 +2405,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err) if (err)
return err; return err;
mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
MLXSW_SP_LPM_TREE_MIN); struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); char raltb_pl[MLXSW_REG_RALTB_LEN];
if (err) char ralue_pl[MLXSW_REG_RALUE_LEN];
return err;
mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, if (!mlxsw_sp_vr_is_used(vr))
MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0); continue;
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); mlxsw_reg_raltb_pack(raltb_pl, vr->id,
MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
raltb_pl);
if (err)
return err;
mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
0);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
ralue_pl);
if (err)
return err;
}
return 0;
} }
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
...@@ -2390,7 +2457,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, ...@@ -2390,7 +2457,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node) struct mlxsw_sp_fib_node *fib_node)
{ {
switch (fib_node->vr->proto) { switch (fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4: case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
break; break;
...@@ -2400,26 +2467,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, ...@@ -2400,26 +2467,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
} }
} }
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{ {
struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
struct mlxsw_sp_fib_node *fib_node, *tmp; struct mlxsw_sp_fib_node *fib_node, *tmp;
struct mlxsw_sp_vr *vr;
list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
bool do_break = &tmp->list == &fib->node_list;
mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
if (do_break)
break;
}
}
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
int i; int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i]; struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
if (!vr->used) if (!mlxsw_sp_vr_is_used(vr))
continue; continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
list) {
bool do_break = &tmp->list == &vr->fib->node_list;
mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
if (do_break)
break;
}
} }
} }
...@@ -2437,70 +2510,6 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) ...@@ -2437,70 +2510,6 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
} }
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
if (WARN_ON_ONCE(err))
return err;
mlxsw_reg_ritr_enable_set(ritr_pl, false);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r)
{
mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
GFP_KERNEL);
if (!mlxsw_sp->rifs)
return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
goto err_rgcr_fail;
return 0;
err_rgcr_fail:
kfree(mlxsw_sp->rifs);
return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]);
kfree(mlxsw_sp->rifs);
}
struct mlxsw_sp_fib_event_work { struct mlxsw_sp_fib_event_work {
struct work_struct work; struct work_struct work;
union { union {
...@@ -2594,16 +2603,666 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, ...@@ -2594,16 +2603,666 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{ {
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); int i;
/* Flush pending FIB notifications and then flush the device's for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
* table before requesting another dump. The FIB notification if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
* block is unregistered, so no need to take RTNL. return mlxsw_sp->rifs[i];
*/
mlxsw_core_flush_owq(); return NULL;
mlxsw_sp_router_fib_flush(mlxsw_sp); }
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
if (WARN_ON_ONCE(err))
return err;
mlxsw_reg_ritr_enable_set(ritr_pl, false);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r)
{
mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
}
static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
const struct in_device *in_dev,
unsigned long event)
{
switch (event) {
case NETDEV_UP:
if (!r)
return true;
return false;
case NETDEV_DOWN:
if (r && !in_dev->ifa_list)
return true;
/* It is possible we already removed the RIF ourselves
* if it was assigned to a netdev that is now a bridge
* or LAG slave.
*/
return false;
}
return false;
}
#define MLXSW_SP_INVALID_RIF 0xffff
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
if (!mlxsw_sp->rifs[i])
return i;
return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
bool *p_lagged, u16 *p_system_port)
{
u8 local_port = mlxsw_sp_vport->local_port;
*p_lagged = mlxsw_sp_vport->lagged;
*p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
}
static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
u16 vr_id, struct net_device *l3_dev,
u16 rif, bool create)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
bool lagged = mlxsw_sp_vport->lagged;
char ritr_pl[MLXSW_REG_RITR_LEN];
u16 system_port;
mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, vr_id,
l3_dev->mtu, l3_dev->dev_addr);
mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
static u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
{
return MLXSW_SP_RFID_BASE + rif;
}
static struct mlxsw_sp_fid *
mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
{
struct mlxsw_sp_fid *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
f->leave = mlxsw_sp_vport_rif_sp_leave;
f->ref_count = 0;
f->dev = l3_dev;
f->fid = fid;
return f;
}
static struct mlxsw_sp_rif *
mlxsw_sp_rif_alloc(u16 rif, u16 vr_id, struct net_device *l3_dev,
struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_rif *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
INIT_LIST_HEAD(&r->nexthop_list);
INIT_LIST_HEAD(&r->neigh_list);
ether_addr_copy(r->addr, l3_dev->dev_addr);
r->mtu = l3_dev->mtu;
r->vr_id = vr_id;
r->dev = l3_dev;
r->rif = rif;
r->f = f;
return r;
}
static struct mlxsw_sp_rif *
mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_fid *f;
struct mlxsw_sp_rif *r;
u16 fid, rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif,
true);
if (err)
goto err_vport_rif_sp_op;
fid = mlxsw_sp_rif_sp_to_fid(rif);
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
if (err)
goto err_rif_fdb_op;
f = mlxsw_sp_rfid_alloc(fid, l3_dev);
if (!f) {
err = -ENOMEM;
goto err_rfid_alloc;
}
r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
vr->rif_count++;
return r;
err_rif_alloc:
kfree(f);
err_rfid_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
err_vport_rif_sp_op:
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
struct mlxsw_sp_rif *r)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 fid = f->fid;
u16 rif = r->rif;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
vr->rif_count--;
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
kfree(f);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
mlxsw_sp_vr_put(vr);
}
static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_rif *r;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!r) {
r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
if (IS_ERR(r))
return PTR_ERR(r);
}
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
r->f->ref_count++;
netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
return 0;
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
if (--f->ref_count == 0)
mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
}
static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
case NETDEV_DOWN:
mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event)
{
if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
struct net_device *lag_dev,
unsigned long event, u16 vid)
{
struct net_device *port_dev;
struct list_head *iter;
int err;
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
event, vid);
if (err)
return err;
}
}
return 0;
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
unsigned long event)
{
if (netif_is_bridge_port(lag_dev))
return 0;
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev)
{
u16 fid;
if (is_vlan_dev(l3_dev))
fid = vlan_dev_vlan_id(l3_dev);
else if (mlxsw_sp->master_bridge.dev == l3_dev)
fid = 1;
else
return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
return mlxsw_sp_fid_find(mlxsw_sp, fid);
}
static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
}
static u16 mlxsw_sp_flood_table_index_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
}
static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
bool set)
{
enum mlxsw_flood_table_type table_type;
char *sftr_pl;
u16 index;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
table_type = mlxsw_sp_flood_table_type_get(fid);
index = mlxsw_sp_flood_table_index_get(fid);
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
1, MLXSW_PORT_ROUTER_PORT, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
return err;
}
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
return MLXSW_REG_RITR_FID_IF;
else
return MLXSW_REG_RITR_VLAN_IF;
}
static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
struct net_device *l3_dev,
u16 fid, u16 rif,
bool create)
{
enum mlxsw_reg_ritr_if_type rif_type;
char ritr_pl[MLXSW_REG_RITR_LEN];
rif_type = mlxsw_sp_rif_type_get(fid);
mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
l3_dev->dev_addr);
mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_rif *r;
u16 rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
if (IS_ERR(vr))
return PTR_ERR(vr);
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
if (err)
goto err_port_flood_set;
err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif,
true);
if (err)
goto err_rif_bridge_op;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err)
goto err_rif_fdb_op;
r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
vr->rif_count++;
netdev_dbg(l3_dev, "RIF=%d created\n", rif);
return 0;
err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
err_rif_bridge_op:
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
err_port_flood_set:
mlxsw_sp_vr_put(vr);
return err;
}
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r)
{
struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 rif = r->rif;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
vr->rif_count--;
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
mlxsw_sp_vr_put(vr);
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
}
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_fid *f;
/* FID can either be an actual FID if the L3 device is the
* VLAN-aware bridge or a VLAN device on top. Otherwise, the
* L3 device is a VLAN-unaware bridge and we get a vFID.
*/
f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
if (WARN_ON(!f))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
case NETDEV_DOWN:
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_bridge_master(real_dev) &&
mlxsw_sp->master_bridge.dev == real_dev)
return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
event);
return 0;
}
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
goto out;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(r, ifa->ifa_dev, event))
goto out;
if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_inetaddr_port_event(dev, event);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
else if (is_vlan_dev(dev))
err = mlxsw_sp_inetaddr_vlan_event(dev, event);
out:
return notifier_from_errno(err);
}
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
const char *mac, int mtu)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
if (err)
return err;
mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!r)
return 0;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
if (err)
return err;
err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
if (err)
goto err_rif_edit;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
if (err)
goto err_rif_fdb_op;
ether_addr_copy(r->addr, dev->dev_addr);
r->mtu = dev->mtu;
netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
return 0;
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
err_rif_edit:
mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
return err;
}
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
/* Flush pending FIB notifications and then flush the device's
* table before requesting another dump. The FIB notification
* block is unregistered, so no need to take RTNL.
*/
mlxsw_core_flush_owq();
mlxsw_sp_router_fib_flush(mlxsw_sp);
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
GFP_KERNEL);
if (!mlxsw_sp->rifs)
return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
goto err_rgcr_fail;
return 0;
err_rgcr_fail:
kfree(mlxsw_sp->rifs);
return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]);
kfree(mlxsw_sp->rifs);
} }
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment