Commit ee625938 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-flooding-and-cosmetics'

Jiri Pirko says:

====================
mlxsw: driver update

This driver update mainly brings support for user to be able to setup
flooding on specified port, via bridge flag. Also, there is a fix in ageing
time conversion. The rest is just cosmetics.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65bdc43d c7070fc4
...@@ -278,8 +278,8 @@ Flooding L2 domain ...@@ -278,8 +278,8 @@ Flooding L2 domain
For a given L2 VLAN domain, the switch device should flood multicast/broadcast For a given L2 VLAN domain, the switch device should flood multicast/broadcast
and unknown unicast packets to all ports in domain, if allowed by port's and unknown unicast packets to all ports in domain, if allowed by port's
current STP state. The switch driver, knowing which ports are within which current STP state. The switch driver, knowing which ports are within which
vlan L2 domain, can program the switch device for flooding. The packet should vlan L2 domain, can program the switch device for flooding. The packet may
also be sent to the port netdev for processing by the bridge driver. The be sent to the port netdev for processing by the bridge driver. The
bridge should not reflood the packet to the same ports the device flooded, bridge should not reflood the packet to the same ports the device flooded,
otherwise there will be duplicate packets on the wire. otherwise there will be duplicate packets on the wire.
...@@ -298,6 +298,9 @@ packets up to the bridge driver for flooding. This is not ideal as the number ...@@ -298,6 +298,9 @@ packets up to the bridge driver for flooding. This is not ideal as the number
of ports scale in the L2 domain as the device is much more efficient at of ports scale in the L2 domain as the device is much more efficient at
flooding packets that software. flooding packets that software.
If supported by the device, flood control can be offloaded to it, preventing
certain netdevs from flooding unicast traffic for which there is no FDB entry.
IGMP Snooping IGMP Snooping
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
......
...@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv, ...@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_status_set(op_tlv, 0); mlxsw_emad_op_tlv_status_set(op_tlv, 0);
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
mlxsw_emad_op_tlv_method_set(op_tlv, mlxsw_emad_op_tlv_method_set(op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_QUERY); MLXSW_EMAD_OP_TLV_METHOD_QUERY);
else else
...@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb) ...@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
char *op_tlv; char *op_tlv;
op_tlv = mlxsw_emad_op_tlv(skb); op_tlv = mlxsw_emad_op_tlv(skb);
return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
} }
#define MLXSW_EMAD_TIMEOUT_MS 200 #define MLXSW_EMAD_TIMEOUT_MS 200
......
...@@ -1662,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, ...@@ -1662,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
CIR_OUT_PARAM_LO)); CIR_OUT_PARAM_LO));
memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} }
} else if (!err && out_mbox) } else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
mutex_unlock(&mlxsw_pci->cmd.lock); mutex_unlock(&mlxsw_pci->cmd.lock);
......
...@@ -348,8 +348,9 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4, ...@@ -348,8 +348,9 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_REG_SFD_REC_LEN, 0x0C, false); MLXSW_REG_SFD_REC_LEN, 0x0C, false);
/* reg_sfd_uc_sub_port /* reg_sfd_uc_sub_port
* LAG sub port. * VEPA channel on local port.
* Must be 0 if multichannel VEPA is not enabled. * Valid only if local port is a non-stacking port. Must be 0 if multichannel
* VEPA is not enabled.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
...@@ -396,10 +397,9 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, ...@@ -396,10 +397,9 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
} }
static inline void static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, char *mac, u16 *p_vid,
char *mac, u16 *p_vid, u8 *p_local_port)
u8 *p_local_port)
{ {
mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
...@@ -474,7 +474,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4, ...@@ -474,7 +474,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6, MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_REG_SFN_REC_LEN, 0x02); MLXSW_REG_SFN_REC_LEN, 0x02);
/* reg_sfd_mac_sub_port /* reg_sfn_mac_sub_port
* VEPA channel on the local port. * VEPA channel on the local port.
* 0 if multichannel VEPA is not enabled. * 0 if multichannel VEPA is not enabled.
* Access: RO * Access: RO
...@@ -482,14 +482,14 @@ MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6, ...@@ -482,14 +482,14 @@ MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8, MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
MLXSW_REG_SFN_REC_LEN, 0x08, false); MLXSW_REG_SFN_REC_LEN, 0x08, false);
/* reg_sfd_mac_fid /* reg_sfn_mac_fid
* Filtering identifier. * Filtering identifier.
* Access: RO * Access: RO
*/ */
MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16, MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
MLXSW_REG_SFN_REC_LEN, 0x08, false); MLXSW_REG_SFN_REC_LEN, 0x08, false);
/* reg_sfd_mac_system_port /* reg_sfn_mac_system_port
* Unique port identifier for the final destination of the packet. * Unique port identifier for the final destination of the packet.
* Access: RO * Access: RO
*/ */
......
...@@ -1227,6 +1227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) ...@@ -1227,6 +1227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port->local_port = local_port; mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
mlxsw_sp_port->pvid = 1; mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->pcpu_stats = mlxsw_sp_port->pcpu_stats =
...@@ -1899,12 +1900,12 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, ...@@ -1899,12 +1900,12 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
if (err) if (err)
netdev_err(dev, "Failed to join bridge\n"); netdev_err(dev, "Failed to join bridge\n");
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
mlxsw_sp_port->bridged = true; mlxsw_sp_port->bridged = 1;
} else { } else {
err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
if (err) if (err)
netdev_err(dev, "Failed to leave bridge\n"); netdev_err(dev, "Failed to leave bridge\n");
mlxsw_sp_port->bridged = false; mlxsw_sp_port->bridged = 0;
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
} }
} }
......
...@@ -84,10 +84,11 @@ struct mlxsw_sp_port { ...@@ -84,10 +84,11 @@ struct mlxsw_sp_port {
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
u8 local_port; u8 local_port;
u8 stp_state; u8 stp_state;
u8 learning:1; u8 learning:1,
u8 learning_sync:1; learning_sync:1,
uc_flood:1,
bridged:1;
u16 pvid; u16 pvid;
bool bridged;
/* 802.1Q bridge VLANs */ /* 802.1Q bridge VLANs */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* VLAN interfaces */ /* VLAN interfaces */
......
...@@ -66,7 +66,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, ...@@ -66,7 +66,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags = attr->u.brport_flags =
(mlxsw_sp_port->learning ? BR_LEARNING : 0) | (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0); (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -123,15 +124,89 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -123,15 +124,89 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
} }
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_begin, u16 fid_end, bool set,
bool only_uc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 range = fid_end - fid_begin + 1;
char *sftr_pl;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
/* Flooding control allows one to decide whether a given port will
* flood unicast traffic for which there is no FDB entry.
*/
if (only_uc)
goto buffer_out;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
return err;
}
static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool set)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, last_visited_vid;
int err;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
true);
if (err) {
last_visited_vid = vid;
goto err_port_flood_set;
}
}
return 0;
err_port_flood_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
netdev_err(dev, "Failed to configure unicast flooding\n");
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans, struct switchdev_trans *trans,
unsigned long brport_flags) unsigned long brport_flags)
{ {
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
bool set;
int err;
if (switchdev_trans_ph_prepare(trans)) if (switchdev_trans_ph_prepare(trans))
return 0; return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
set = mlxsw_sp_port->uc_flood ? false : true;
err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
if (err)
return err;
}
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0; return 0;
} }
...@@ -150,9 +225,10 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) ...@@ -150,9 +225,10 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans, struct switchdev_trans *trans,
unsigned long ageing_jiffies) unsigned long ageing_clock_t)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
if (switchdev_trans_ph_prepare(trans)) if (switchdev_trans_ph_prepare(trans))
...@@ -247,40 +323,6 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) ...@@ -247,40 +323,6 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
} }
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid, bool set, bool only_uc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *sftr_pl;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
/* Flooding control allows one to decide whether a given port will
* flood unicast traffic for which there is no FDB entry.
*/
if (only_uc)
goto buffer_out;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid,
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
mlxsw_sp_port->local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
return err;
}
static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
u16 vid_end) u16 vid_end)
{ {
...@@ -345,14 +387,13 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -345,14 +387,13 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(dev, "Failed to map FID=%d", vid); netdev_err(dev, "Failed to map FID=%d", vid);
return err; return err;
} }
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true, err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
false); true, false);
if (err) { if (err) {
netdev_err(dev, "Failed to set flooding for FID=%d", netdev_err(dev, "Failed to configure flooding\n");
vid); return err;
return err;
}
} }
for (vid = vid_begin; vid <= vid_end; for (vid = vid_begin; vid <= vid_end;
...@@ -530,15 +571,14 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -530,15 +571,14 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (init) if (init)
goto out; goto out;
for (vid = vid_begin; vid <= vid_end; vid++) { err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false, false, false);
false); if (err) {
if (err) { netdev_err(dev, "Failed to clear flooding\n");
netdev_err(dev, "Failed to clear flooding for FID=%d", return err;
vid); }
return err;
}
for (vid = vid_begin; vid <= vid_end; vid++) {
/* Remove FID mapping in case of Virtual mode */ /* Remove FID mapping in case of Virtual mode */
err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
if (err) { if (err) {
...@@ -692,7 +732,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, ...@@ -692,7 +732,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
return err; return err;
} }
const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get, .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set, .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
.switchdev_port_obj_add = mlxsw_sp_port_obj_add, .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
......
...@@ -1147,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, ...@@ -1147,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
} }
status = mlxsw_reg_pude_oper_status_get(pude_pl); status = mlxsw_reg_pude_oper_status_get(pude_pl);
if (MLXSW_PORT_OPER_STATUS_UP == status) { if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sx_port->dev, "link up\n"); netdev_info(mlxsw_sx_port->dev, "link up\n");
netif_carrier_on(mlxsw_sx_port->dev); netif_carrier_on(mlxsw_sx_port->dev);
} else { } else {
......
...@@ -746,7 +746,7 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ...@@ -746,7 +746,7 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
}; };
u16 mode = BRIDGE_MODE_UNDEF; u16 mode = BRIDGE_MODE_UNDEF;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC; u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err; int err;
err = switchdev_port_attr_get(dev, &attr); err = switchdev_port_attr_get(dev, &attr);
...@@ -817,6 +817,9 @@ static int switchdev_port_br_setlink_protinfo(struct net_device *dev, ...@@ -817,6 +817,9 @@ static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
err = switchdev_port_br_setflag(dev, attr, err = switchdev_port_br_setflag(dev, attr,
BR_LEARNING_SYNC); BR_LEARNING_SYNC);
break; break;
case IFLA_BRPORT_UNICAST_FLOOD:
err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment