Commit ee625938 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-flooding-and-cosmetics'

Jiri Pirko says:

====================
mlxsw: driver update

This driver update mainly brings support for user to be able to setup
flooding on specified port, via bridge flag. Also, there is a fix in ageing
time conversion. The rest is just cosmetics.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65bdc43d c7070fc4
......@@ -278,8 +278,8 @@ Flooding L2 domain
For a given L2 VLAN domain, the switch device should flood multicast/broadcast
and unknown unicast packets to all ports in domain, if allowed by port's
current STP state. The switch driver, knowing which ports are within which
vlan L2 domain, can program the switch device for flooding. The packet should
also be sent to the port netdev for processing by the bridge driver. The
vlan L2 domain, can program the switch device for flooding. The packet may
be sent to the port netdev for processing by the bridge driver. The
bridge should not reflood the packet to the same ports the device flooded,
otherwise there will be duplicate packets on the wire.
......@@ -298,6 +298,9 @@ packets up to the bridge driver for flooding. This is not ideal as the number
of ports scale in the L2 domain as the device is much more efficient at
flooding packets that software.
If supported by the device, flood control can be offloaded to it, preventing
certain netdevs from flooding unicast traffic for which there is no FDB entry.
IGMP Snooping
^^^^^^^^^^^^^
......
......@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_status_set(op_tlv, 0);
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
mlxsw_emad_op_tlv_method_set(op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_QUERY);
else
......@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
char *op_tlv;
op_tlv = mlxsw_emad_op_tlv(skb);
return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
#define MLXSW_EMAD_TIMEOUT_MS 200
......
......@@ -1662,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
CIR_OUT_PARAM_LO));
memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
}
} else if (!err && out_mbox)
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
mutex_unlock(&mlxsw_pci->cmd.lock);
......
......@@ -348,8 +348,9 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
/* reg_sfd_uc_sub_port
* LAG sub port.
* Must be 0 if multichannel VEPA is not enabled.
* VEPA channel on local port.
* Valid only if local port is a non-stacking port. Must be 0 if multichannel
* VEPA is not enabled.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
......@@ -396,10 +397,9 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
static inline void
mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
char *mac, u16 *p_vid,
u8 *p_local_port)
static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
char *mac, u16 *p_vid,
u8 *p_local_port)
{
mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
......@@ -474,7 +474,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_REG_SFN_REC_LEN, 0x02);
/* reg_sfd_mac_sub_port
/* reg_sfn_mac_sub_port
* VEPA channel on the local port.
* 0 if multichannel VEPA is not enabled.
* Access: RO
......@@ -482,14 +482,14 @@ MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
MLXSW_REG_SFN_REC_LEN, 0x08, false);
/* reg_sfd_mac_fid
/* reg_sfn_mac_fid
* Filtering identifier.
* Access: RO
*/
MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
MLXSW_REG_SFN_REC_LEN, 0x08, false);
/* reg_sfd_mac_system_port
/* reg_sfn_mac_system_port
* Unique port identifier for the final destination of the packet.
* Access: RO
*/
......
......@@ -1227,6 +1227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->pcpu_stats =
......@@ -1899,12 +1900,12 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
if (err)
netdev_err(dev, "Failed to join bridge\n");
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
mlxsw_sp_port->bridged = true;
mlxsw_sp_port->bridged = 1;
} else {
err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
if (err)
netdev_err(dev, "Failed to leave bridge\n");
mlxsw_sp_port->bridged = false;
mlxsw_sp_port->bridged = 0;
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
}
}
......
......@@ -84,10 +84,11 @@ struct mlxsw_sp_port {
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
u8 stp_state;
u8 learning:1;
u8 learning_sync:1;
u8 learning:1,
learning_sync:1,
uc_flood:1,
bridged:1;
u16 pvid;
bool bridged;
/* 802.1Q bridge VLANs */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* VLAN interfaces */
......
......@@ -66,7 +66,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags =
(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0);
(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
break;
default:
return -EOPNOTSUPP;
......@@ -123,15 +124,89 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_begin, u16 fid_end, bool set,
bool only_uc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 range = fid_end - fid_begin + 1;
char *sftr_pl;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
/* Flooding control allows one to decide whether a given port will
* flood unicast traffic for which there is no FDB entry.
*/
if (only_uc)
goto buffer_out;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
return err;
}
static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool set)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, last_visited_vid;
int err;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
true);
if (err) {
last_visited_vid = vid;
goto err_port_flood_set;
}
}
return 0;
err_port_flood_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
netdev_err(dev, "Failed to configure unicast flooding\n");
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
bool set;
int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
set = mlxsw_sp_port->uc_flood ? false : true;
err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
if (err)
return err;
}
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0;
}
......@@ -150,9 +225,10 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long ageing_jiffies)
unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
if (switchdev_trans_ph_prepare(trans))
......@@ -247,40 +323,6 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid, bool set, bool only_uc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *sftr_pl;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
/* Flooding control allows one to decide whether a given port will
* flood unicast traffic for which there is no FDB entry.
*/
if (only_uc)
goto buffer_out;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
return err;
}
static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
u16 vid_end)
{
......@@ -345,14 +387,13 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(dev, "Failed to map FID=%d", vid);
return err;
}
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true,
false);
if (err) {
netdev_err(dev, "Failed to set flooding for FID=%d",
vid);
return err;
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
true, false);
if (err) {
netdev_err(dev, "Failed to configure flooding\n");
return err;
}
for (vid = vid_begin; vid <= vid_end;
......@@ -530,15 +571,14 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (init)
goto out;
for (vid = vid_begin; vid <= vid_end; vid++) {
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false,
false);
if (err) {
netdev_err(dev, "Failed to clear flooding for FID=%d",
vid);
return err;
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
if (err) {
netdev_err(dev, "Failed to clear flooding\n");
return err;
}
for (vid = vid_begin; vid <= vid_end; vid++) {
/* Remove FID mapping in case of Virtual mode */
err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
if (err) {
......@@ -692,7 +732,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
return err;
}
const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
.switchdev_port_obj_add = mlxsw_sp_port_obj_add,
......
......@@ -1147,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
}
status = mlxsw_reg_pude_oper_status_get(pude_pl);
if (MLXSW_PORT_OPER_STATUS_UP == status) {
if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sx_port->dev, "link up\n");
netif_carrier_on(mlxsw_sx_port->dev);
} else {
......
......@@ -746,7 +746,7 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
};
u16 mode = BRIDGE_MODE_UNDEF;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
err = switchdev_port_attr_get(dev, &attr);
......@@ -817,6 +817,9 @@ static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
err = switchdev_port_br_setflag(dev, attr,
BR_LEARNING_SYNC);
break;
case IFLA_BRPORT_UNICAST_FLOOD:
err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
break;
default:
err = -EOPNOTSUPP;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment