Commit 798661c7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-unified-bridge-conversion-part-6'

Ido Schimmel says:

====================
mlxsw: Unified bridge conversion - part 6/6

This is the sixth and final part of the conversion of mlxsw to the
unified bridge model. It transitions the last bits of functionality that
were under firmware's responsibility in the legacy model to the driver.
The last patches flip the driver to the unified bridge model and clean
up code that was used to make the conversion easier to review.

Patchset overview:

Patch #1 sets the egress VID for known unicast packets. For multicast
packets, the egress VID is configured using the MPE table. See commit
8c2da081 ("mlxsw: spectrum_fid: Configure egress VID classification
for multicast").

Patch #2 configures the VNI to FID classification that is used during
decapsulation.

Patch #3 configures ingress router interface (RIF) in FID classification
records, so that when a packet reaches the router block, its ingress RIF
is known. Care is taken to configure this in all the different flows
(e.g., RIF set on a FID, {Port, VID} joins a FID that already has a RIF
etc.).

Patch #4 configures the egress VID for routed packets. For such packets,
the egress VID is not set by the MPE table or by an FDB record at the
egress bridge, but instead by a dedicated table that maps {Egress RIF,
Egress port} to a VID.

Patch #5 removes VID configuration from RIF creation as in the unified
bridge model firmware no longer needs it.

Patch #6 sets the egress FID to use in RIF configuration so that the
device knows using which FID to bridge the packet after routing.

Patches #7-#9 add a new 802.1Q family and associated VLAN RIFs. In the
unified bridge model, we no longer need to emulate 802.1Q FIDs using
802.1D FIDs as VNI can be associated with both.

Patches #10-#11 finally flip the driver to the unified bridge model.

Patches #12-#13 clean up code that was used to make the conversion
easier to review.

v2:
* Fix build failure [1] in patch #1.

[1] https://lore.kernel.org/netdev/20220630201709.6e66a1bb@kernel.org/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0bf1fe6 88840d69
......@@ -633,6 +633,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
/* cmd_mbox_config_set_ubridge
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
......@@ -792,6 +798,13 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
/* cmd_mbox_config_profile_ubridge
* Unified Bridge
* 0 - non unified bridge
* 1 - unified bridge
*/
MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
* Valid for Spectrum only
......
......@@ -295,6 +295,7 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
used_ubridge:1,
used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_mid;
......@@ -314,6 +315,7 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
......
......@@ -1235,6 +1235,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
if (profile->used_ubridge) {
mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
profile->ubridge);
}
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
......
......@@ -380,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid_vid,
const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
......@@ -389,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
......@@ -454,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
......@@ -1655,40 +1658,43 @@ MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
static inline void __mlxsw_reg_svfa_pack(char *payload,
enum mlxsw_reg_svfa_mt mt, bool valid,
u16 fid)
u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
mlxsw_reg_svfa_swid_set(payload, 0);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
mlxsw_reg_svfa_irif_v_set(payload, irif_v);
mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
}
static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
bool valid, u16 fid, u16 vid)
bool valid, u16 fid, u16 vid,
bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
u16 vid)
u16 vid, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
u32 vni)
u32 vni, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vni_set(payload, vni);
}
......@@ -1963,7 +1969,8 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
u16 fid_offset, bool flood_rsp,
enum mlxsw_reg_bridge_type bridge_type)
enum mlxsw_reg_bridge_type bridge_type,
bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
......@@ -1973,6 +1980,8 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_vv_set(payload, false);
mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
......@@ -7107,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
u16 system_port, u16 vid)
u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
......
......@@ -3161,7 +3161,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_ports_create;
}
mlxsw_sp->ubridge = false;
return 0;
err_ports_create:
......@@ -3383,24 +3382,15 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_parsing_fini(mlxsw_sp);
}
/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
* 802.1Q FIDs
*/
#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
VLAN_VID_MASK - 1)
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED,
.max_fid_flood_tables = 3,
.fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_ubridge = 1,
.ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
......@@ -3414,17 +3404,14 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED,
.max_fid_flood_tables = 3,
.fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_ubridge = 1,
.ubridge = 1,
.swid_config = {
{
.used_type = 1,
......
......@@ -84,7 +84,7 @@ struct mlxsw_sp_upper {
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN_EMU,
MLXSW_SP_RIF_TYPE_VLAN,
MLXSW_SP_RIF_TYPE_FID,
MLXSW_SP_RIF_TYPE_IPIP_LB, /* IP-in-IP loopback. */
MLXSW_SP_RIF_TYPE_MAX,
......@@ -208,7 +208,6 @@ struct mlxsw_sp {
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
bool ubridge;
struct mlxsw_sp_pgt *pgt;
bool pgt_smpe_index_valid;
};
......@@ -737,6 +736,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
......@@ -1285,7 +1285,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
......@@ -1476,7 +1477,6 @@ void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
u16 count);
int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
u16 smpe, u16 local_port, bool member);
u16 mlxsw_sp_pgt_index_to_mid(const struct mlxsw_sp *mlxsw_sp, u16 pgt_index);
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
......
......@@ -182,16 +182,6 @@ static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
}
#define MLXSW_SP_FID_PGT_FLOOD_ENTRIES 15354 /* Reserved for flooding. */
u16 mlxsw_sp_pgt_index_to_mid(const struct mlxsw_sp *mlxsw_sp, u16 pgt_index)
{
if (mlxsw_sp->ubridge)
return pgt_index;
return pgt_index - MLXSW_SP_FID_PGT_FLOOD_ENTRIES;
}
static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
bool member)
{
......@@ -204,21 +194,16 @@ mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_pgt_entry *pgt_entry,
u16 local_port, bool member)
{
bool smpe_index_valid;
char *smid2_pl;
u16 smpe, mid;
int err;
smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
if (!smid2_pl)
return -ENOMEM;
smpe_index_valid = mlxsw_sp->ubridge ? mlxsw_sp->pgt->smpe_index_valid :
false;
smpe = mlxsw_sp->ubridge ? pgt_entry->smpe_index : 0;
mid = mlxsw_sp_pgt_index_to_mid(mlxsw_sp, pgt_entry->index);
mlxsw_reg_smid2_pack(smid2_pl, mid, 0, 0, smpe_index_valid, smpe);
mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
mlxsw_sp->pgt->smpe_index_valid,
pgt_entry->smpe_index);
mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
......
......@@ -7730,7 +7730,7 @@ u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
/* We only return the VID for VLAN RIFs. Otherwise we return an
* invalid value (0).
*/
if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN_EMU)
if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
goto out;
vid = mlxsw_sp_fid_8021q_vid(rif->fid);
......@@ -9316,17 +9316,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
rif_subport->vid);
efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
......@@ -9351,9 +9352,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
mlxsw_sp_fid_rif_set(rif->fid, rif);
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
......@@ -9365,7 +9372,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
......@@ -9442,9 +9449,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
mlxsw_sp_fid_rif_set(rif->fid, rif);
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
......@@ -9464,7 +9477,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
......@@ -9549,11 +9562,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN_EMU,
static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
bool enable)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
rif->dev->mtu, rif->dev->dev_addr,
rif->mac_profile_id, vid, efid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
struct netlink_ext_ack *extack)
{
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u8 mac_profile;
int err;
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
&mac_profile, extack);
if (err)
return err;
rif->mac_profile_id = mac_profile;
err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
if (err)
goto err_rif_vlan_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
if (err)
goto err_fid_mc_flood_set;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), true);
if (err)
goto err_fid_bc_flood_set;
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
goto err_rif_fdb_op;
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
err_rif_vlan_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
{
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
mlxsw_sp_fid_rif_unset(rif->fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.configure = mlxsw_sp1_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
u16 efid = mlxsw_sp_fid_index(rif->fid);
return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp2_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
......@@ -9628,7 +9749,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN_EMU] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
......@@ -9816,7 +9937,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN_EMU] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
......
......@@ -82,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
......
......@@ -1681,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
const char *mac, u16 fid, u16 vid,
bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
......@@ -1694,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
......@@ -1709,18 +1711,18 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
const char *mac, u16 fid, u16 vid,
bool adding, bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_NOP,
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
......@@ -1782,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
fdb_info->addr, fid_index,
fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
......@@ -1796,7 +1798,6 @@ static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
bool adding)
{
char *sfd_pl;
u16 mid_idx;
u8 num_rec;
int err;
......@@ -1804,11 +1805,10 @@ static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
if (!sfd_pl)
return -ENOMEM;
mid_idx = mlxsw_sp_pgt_index_to_mid(mlxsw_sp, mdb_entry->mid);
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
mid_idx);
mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
......@@ -2906,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u16 local_port;
u16 vid, fid;
bool do_notification = true;
int err;
......@@ -2940,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment