Commit 39712e59 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-fixes'

Jiri Pirko says:

====================
mlxsw fixes

Another bulk of fixes from Ido.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 562a9f91 28a01d2d
...@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, ...@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
} }
} }
/* SPAFT - Switch Port Acceptable Frame Types
* ------------------------------------------
* The Switch Port Acceptable Frame Types register configures the frame
* admittance of the port.
*/
#define MLXSW_REG_SPAFT_ID 0x2010
#define MLXSW_REG_SPAFT_LEN 0x08
static const struct mlxsw_reg_info mlxsw_reg_spaft = {
.id = MLXSW_REG_SPAFT_ID,
.len = MLXSW_REG_SPAFT_LEN,
};
/* reg_spaft_local_port
* Local port number.
* Access: Index
*
* Note: CPU port is not supported (all tag types are allowed).
*/
MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
/* reg_spaft_sub_port
* Virtual port within the physical port.
* Should be set to 0 when virtual ports are not enabled on the port.
* Access: RW
*/
MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
/* reg_spaft_allow_untagged
* When set, untagged frames on the ingress are allowed (default).
* Access: RW
*/
MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
/* reg_spaft_allow_prio_tagged
* When set, priority tagged frames on the ingress are allowed (default).
* Access: RW
*/
MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
/* reg_spaft_allow_tagged
* When set, tagged frames on the ingress are allowed (default).
* Access: RW
*/
MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
bool allow_untagged)
{
MLXSW_REG_ZERO(spaft, payload);
mlxsw_reg_spaft_local_port_set(payload, local_port);
mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
mlxsw_reg_spaft_allow_tagged_set(payload, true);
}
/* SFGC - Switch Flooding Group Configuration /* SFGC - Switch Flooding Group Configuration
* ------------------------------------------ * ------------------------------------------
* The following register controls the association of flooding tables and MIDs * The following register controls the association of flooding tables and MIDs
...@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) ...@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SPVID"; return "SPVID";
case MLXSW_REG_SPVM_ID: case MLXSW_REG_SPVM_ID:
return "SPVM"; return "SPVM";
case MLXSW_REG_SPAFT_ID:
return "SPAFT";
case MLXSW_REG_SFGC_ID: case MLXSW_REG_SFGC_ID:
return "SFGC"; return "SFGC";
case MLXSW_REG_SFTR_ID: case MLXSW_REG_SFTR_ID:
......
...@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0; mlxsw_sp_port->uc_flood = 0;
......
...@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, ...@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc); bool set, bool only_uc);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
#endif #endif
...@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, ...@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err; return err;
} }
static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spvid_pl[MLXSW_REG_SPVID_LEN]; char spvid_pl[MLXSW_REG_SPVID_LEN];
...@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) ...@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
} }
static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool allow)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spaft_pl[MLXSW_REG_SPAFT_LEN];
mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
}
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
if (!vid) {
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
if (err) {
netdev_err(dev, "Failed to disallow untagged traffic\n");
return err;
}
} else {
err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
if (err) {
netdev_err(dev, "Failed to set PVID\n");
return err;
}
/* Only allow if not already allowed. */
if (!mlxsw_sp_port->pvid) {
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
true);
if (err) {
netdev_err(dev, "Failed to allow untagged traffic\n");
goto err_port_allow_untagged_set;
}
}
}
mlxsw_sp_port->pvid = vid;
return 0;
err_port_allow_untagged_set:
__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
return err;
}
static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
{ {
char sfmr_pl[MLXSW_REG_SFMR_LEN]; char sfmr_pl[MLXSW_REG_SFMR_LEN];
...@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(dev, "Unable to add PVID %d\n", vid_begin); netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
goto err_port_pvid_set; goto err_port_pvid_set;
} }
mlxsw_sp_port->pvid = vid_begin; } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
if (err) {
netdev_err(dev, "Unable to del PVID\n");
goto err_port_pvid_set;
}
} }
/* Changing activity bits only if HW operation succeded */ /* Changing activity bits only if HW operation succeded */
...@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return err; return err;
} }
if (init)
goto out;
pvid = mlxsw_sp_port->pvid; pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { if (pvid >= vid_begin && pvid <= vid_end) {
/* Default VLAN is always 1 */ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
if (err) { if (err) {
netdev_err(dev, "Unable to del PVID %d\n", pvid); netdev_err(dev, "Unable to del PVID %d\n", pvid);
return err; return err;
} }
mlxsw_sp_port->pvid = 1;
} }
if (init)
goto out;
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
false, false); false, false);
if (err) { if (err) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment