Commit d7a39d39 authored by David S. Miller's avatar David S. Miller

Merge branch 'bridge-mdb-bulk-delete'

Ido Schimmel says:

====================
Add MDB bulk deletion support

This patchset adds MDB bulk deletion support, allowing user space to
request the deletion of matching entries instead of dumping the entire
MDB and issuing a separate deletion request for each matching entry.
Support is added in both the bridge and VXLAN drivers in a similar
fashion to the existing FDB bulk deletion support.

The parameters according to which bulk deletion can be performed are
similar to the FDB ones, namely: Destination port, VLAN ID, state (e.g.,
"permanent"), routing protocol, source / destination VNI, destination IP
and UDP port. Flushing based on flags (e.g., "offload", "fast_leave",
"added_by_star_ex", "blocked") is not currently supported, but can be
added in the future, if a use case arises.

Patch #1 adds a new uAPI attribute to allow specifying the state mask
according to which bulk deletion will be performed, if any.

Patch #2 adds a new policy according to which bulk deletion requests
(with 'NLM_F_BULK' flag set) will be parsed.

Patches #3-#4 add a new NDO for MDB bulk deletion and invoke it from the
rtnetlink code when a bulk deletion request is made.

Patches #5-#6 implement the MDB bulk deletion NDO in the bridge and
VXLAN drivers, respectively.

Patch #7 allows user space to issue MDB bulk deletion requests by no
longer rejecting the 'NLM_F_BULK' flag when it is set in 'RTM_DELMDB'
requests.

Patches #8-#9 add selftests for both drivers, for both good and bad
flows.

iproute2 changes can be found here [1].

https://github.com/idosch/iproute2/tree/submit/mdb_flush_v1
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b6895d0a c3e87a7f
......@@ -3235,6 +3235,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_get = vxlan_fdb_get,
.ndo_mdb_add = vxlan_mdb_add,
.ndo_mdb_del = vxlan_mdb_del,
.ndo_mdb_del_bulk = vxlan_mdb_del_bulk,
.ndo_mdb_dump = vxlan_mdb_dump,
.ndo_mdb_get = vxlan_mdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
......
......@@ -74,6 +74,14 @@ struct vxlan_mdb_config {
u8 rt_protocol;
};
struct vxlan_mdb_flush_desc {
union vxlan_addr remote_ip;
__be32 src_vni;
__be32 remote_vni;
__be16 remote_port;
u8 rt_protocol;
};
static const struct rhashtable_params vxlan_mdb_rht_params = {
.head_offset = offsetof(struct vxlan_mdb_entry, rhnode),
.key_offset = offsetof(struct vxlan_mdb_entry, key),
......@@ -1306,6 +1314,145 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
static const struct nla_policy
vxlan_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
[MDBE_ATTR_DST] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
[MDBE_ATTR_DST_PORT] = { .type = NLA_U16 },
[MDBE_ATTR_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
[MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
[MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
};
static int vxlan_mdb_flush_desc_init(struct vxlan_dev *vxlan,
struct vxlan_mdb_flush_desc *desc,
struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
int err;
if (entry->ifindex && entry->ifindex != vxlan->dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port net device");
return -EINVAL;
}
if (entry->vid) {
NL_SET_ERR_MSG_MOD(extack, "VID must not be specified");
return -EINVAL;
}
if (!tb[MDBA_SET_ENTRY_ATTRS])
return 0;
err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
tb[MDBA_SET_ENTRY_ATTRS],
vxlan_mdbe_attrs_del_bulk_pol, extack);
if (err)
return err;
if (mdbe_attrs[MDBE_ATTR_STATE_MASK]) {
u8 state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
if ((state_mask & MDB_PERMANENT) && !(entry->state & MDB_PERMANENT)) {
NL_SET_ERR_MSG_MOD(extack, "Only permanent MDB entries are supported");
return -EINVAL;
}
}
if (mdbe_attrs[MDBE_ATTR_RTPROT])
desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
if (mdbe_attrs[MDBE_ATTR_DST])
vxlan_nla_get_addr(&desc->remote_ip, mdbe_attrs[MDBE_ATTR_DST]);
if (mdbe_attrs[MDBE_ATTR_DST_PORT])
desc->remote_port =
cpu_to_be16(nla_get_u16(mdbe_attrs[MDBE_ATTR_DST_PORT]));
if (mdbe_attrs[MDBE_ATTR_VNI])
desc->remote_vni =
cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_VNI]));
if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
desc->src_vni =
cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
return 0;
}
static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_flush_desc *desc)
{
struct vxlan_mdb_remote *remote, *tmp;
list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list) {
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
__be32 remote_vni;
if (desc->remote_ip.sa.sa_family &&
!vxlan_addr_equal(&desc->remote_ip, &rd->remote_ip))
continue;
/* Encapsulation is performed with source VNI if remote VNI
* is not set.
*/
remote_vni = rd->remote_vni ? : mdb_entry->key.vni;
if (desc->remote_vni && desc->remote_vni != remote_vni)
continue;
if (desc->remote_port && desc->remote_port != rd->remote_port)
continue;
if (desc->rt_protocol &&
desc->rt_protocol != remote->rt_protocol)
continue;
vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
}
}
static void vxlan_mdb_flush(struct vxlan_dev *vxlan,
const struct vxlan_mdb_flush_desc *desc)
{
struct vxlan_mdb_entry *mdb_entry;
struct hlist_node *tmp;
/* The removal of an entry cannot trigger the removal of another entry
* since entries are always added to the head of the list.
*/
hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
if (desc->src_vni && desc->src_vni != mdb_entry->key.vni)
continue;
vxlan_mdb_remotes_flush(vxlan, mdb_entry, desc);
/* Entry will only be removed if its remotes list is empty. */
vxlan_mdb_entry_put(vxlan, mdb_entry);
}
}
int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_mdb_flush_desc desc = {};
int err;
ASSERT_RTNL();
err = vxlan_mdb_flush_desc_init(vxlan, &desc, tb, extack);
if (err)
return err;
vxlan_mdb_flush(vxlan, &desc);
return 0;
}
static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
......@@ -1575,29 +1722,6 @@ static void vxlan_mdb_check_empty(void *ptr, void *arg)
WARN_ON_ONCE(1);
}
static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
struct vxlan_mdb_entry *mdb_entry)
{
struct vxlan_mdb_remote *remote, *tmp;
list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list)
vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
}
static void vxlan_mdb_entries_flush(struct vxlan_dev *vxlan)
{
struct vxlan_mdb_entry *mdb_entry;
struct hlist_node *tmp;
/* The removal of an entry cannot trigger the removal of another entry
* since entries are always added to the head of the list.
*/
hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
vxlan_mdb_remotes_flush(vxlan, mdb_entry);
vxlan_mdb_entry_put(vxlan, mdb_entry);
}
}
int vxlan_mdb_init(struct vxlan_dev *vxlan)
{
int err;
......@@ -1613,7 +1737,9 @@ int vxlan_mdb_init(struct vxlan_dev *vxlan)
void vxlan_mdb_fini(struct vxlan_dev *vxlan)
{
vxlan_mdb_entries_flush(vxlan);
struct vxlan_mdb_flush_desc desc = {};
vxlan_mdb_flush(vxlan, &desc);
WARN_ON_ONCE(vxlan->cfg.flags & VXLAN_F_MDB);
rhashtable_free_and_destroy(&vxlan->mdb_tbl, vxlan_mdb_check_empty,
NULL);
......
......@@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
u32 seq, struct netlink_ext_ack *extack);
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
......
......@@ -1329,6 +1329,9 @@ struct netdev_net_notifier {
* int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Deletes the MDB entry from dev.
* int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Bulk deletes MDB entries from dev.
* int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
* struct netlink_callback *cb);
* Dumps MDB entries from dev. The first argument (marker) in the netlink
......@@ -1611,6 +1614,9 @@ struct net_device_ops {
int (*ndo_mdb_del)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
int (*ndo_mdb_del_bulk)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
......
......@@ -757,6 +757,7 @@ enum {
MDBE_ATTR_VNI,
MDBE_ATTR_IFINDEX,
MDBE_ATTR_SRC_VNI,
MDBE_ATTR_STATE_MASK,
__MDBE_ATTR_MAX,
};
#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
......
......@@ -471,6 +471,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fdb_get = br_fdb_get,
.ndo_mdb_add = br_mdb_add,
.ndo_mdb_del = br_mdb_del,
.ndo_mdb_del_bulk = br_mdb_del_bulk,
.ndo_mdb_dump = br_mdb_dump,
.ndo_mdb_get = br_mdb_get,
.ndo_bridge_getlink = br_getlink,
......
......@@ -1412,6 +1412,139 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
struct br_mdb_flush_desc {
u32 port_ifindex;
u16 vid;
u8 rt_protocol;
u8 state;
u8 state_mask;
};
static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
[MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
};
static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
int err;
desc->port_ifindex = entry->ifindex;
desc->vid = entry->vid;
desc->state = entry->state;
if (!tb[MDBA_SET_ENTRY_ATTRS])
return 0;
err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
tb[MDBA_SET_ENTRY_ATTRS],
br_mdbe_attrs_del_bulk_pol, extack);
if (err)
return err;
if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
if (mdbe_attrs[MDBE_ATTR_RTPROT])
desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
return 0;
}
static void br_mdb_flush_host(struct net_bridge *br,
struct net_bridge_mdb_entry *mp,
const struct br_mdb_flush_desc *desc)
{
u8 state;
if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
return;
if (desc->rt_protocol)
return;
state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
if (desc->state_mask && (state & desc->state_mask) != desc->state)
return;
br_multicast_host_leave(mp, true);
if (!mp->ports && netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
}
static void br_mdb_flush_pgs(struct net_bridge *br,
struct net_bridge_mdb_entry *mp,
const struct br_mdb_flush_desc *desc)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
u8 state;
if (desc->port_ifindex &&
desc->port_ifindex != p->key.port->dev->ifindex) {
pp = &p->next;
continue;
}
if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
pp = &p->next;
continue;
}
state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
if (desc->state_mask &&
(state & desc->state_mask) != desc->state) {
pp = &p->next;
continue;
}
br_multicast_del_pg(mp, p, pp);
}
}
static void br_mdb_flush(struct net_bridge *br,
const struct br_mdb_flush_desc *desc)
{
struct net_bridge_mdb_entry *mp;
spin_lock_bh(&br->multicast_lock);
/* Safe variant is not needed because entries are removed from the list
* upon group timer expiration or bridge deletion.
*/
hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
if (desc->vid && desc->vid != mp->addr.vid)
continue;
br_mdb_flush_host(br, mp, desc);
br_mdb_flush_pgs(br, mp, desc);
}
spin_unlock_bh(&br->multicast_lock);
}
int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(dev);
struct br_mdb_flush_desc desc = {};
int err;
err = br_mdb_flush_desc_init(&desc, tb, extack);
if (err)
return err;
br_mdb_flush(br, &desc);
return 0;
}
static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
......
......@@ -1022,6 +1022,8 @@ int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb);
int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
......@@ -1430,6 +1432,12 @@ static inline int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
return -EOPNOTSUPP;
}
static inline int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
static inline int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb)
{
......
......@@ -6410,17 +6410,64 @@ static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
}
static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(attr);
struct br_mdb_entry zero_entry = {};
if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
return -EINVAL;
}
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
NL_SET_ERR_MSG(extack, "Unknown entry state");
return -EINVAL;
}
if (entry->flags) {
NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
return -EINVAL;
}
if (entry->vid >= VLAN_N_VID - 1) {
NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
return -EINVAL;
}
if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
NL_SET_ERR_MSG(extack, "Entry address cannot be set");
return -EINVAL;
}
return 0;
}
static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
[MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
rtnl_validate_mdb_entry_del_bulk,
sizeof(struct br_mdb_entry)),
[MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
};
static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
struct net *net = sock_net(skb->sk);
struct br_port_msg *bpm;
struct net_device *dev;
int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
MDBA_SET_ENTRY_MAX, mdba_policy, extack);
if (!del_bulk)
err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
MDBA_SET_ENTRY_MAX, mdba_policy,
extack);
else
err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
mdba_del_bulk_policy, extack);
if (err)
return err;
......@@ -6441,6 +6488,14 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
if (del_bulk) {
if (!dev->netdev_ops->ndo_mdb_del_bulk) {
NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
return -EOPNOTSUPP;
}
return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
}
if (!dev->netdev_ops->ndo_mdb_del) {
NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
return -EOPNOTSUPP;
......@@ -6686,5 +6741,6 @@ void __init rtnetlink_init(void)
rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
RTNL_FLAG_BULK_DEL_SUPPORTED);
}
......@@ -803,11 +803,198 @@ cfg_test_dump()
cfg_test_dump_common "L2" l2_grps_get
}
# Check flush functionality with different parameters.
cfg_test_flush()
{
local num_entries
# Add entries with different attributes and check that they are all
# flushed when the flush command is given with no parameters.
# Different port.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
bridge mdb add dev br0 port $swp2 grp 239.1.1.2 vid 10
# Different VLAN ID.
bridge mdb add dev br0 port $swp1 grp 239.1.1.3 vid 10
bridge mdb add dev br0 port $swp1 grp 239.1.1.4 vid 20
# Different routing protocol.
bridge mdb add dev br0 port $swp1 grp 239.1.1.5 vid 10 proto bgp
bridge mdb add dev br0 port $swp1 grp 239.1.1.6 vid 10 proto zebra
# Different state.
bridge mdb add dev br0 port $swp1 grp 239.1.1.7 vid 10 permanent
bridge mdb add dev br0 port $swp1 grp 239.1.1.8 vid 10 temp
bridge mdb flush dev br0
num_entries=$(bridge mdb show dev br0 | wc -l)
[[ $num_entries -eq 0 ]]
check_err $? 0 "Not all entries flushed after flush all"
# Check that when flushing by port only entries programmed with the
# specified port are flushed and the rest are not.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10
bridge mdb add dev br0 port br0 grp 239.1.1.1 vid 10
bridge mdb flush dev br0 port $swp1
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
check_fail $? "Entry not flushed by specified port"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
check_err $? "Entry flushed by wrong port"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
check_err $? "Host entry flushed by wrong port"
bridge mdb flush dev br0 port br0
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
check_fail $? "Host entry not flushed by specified port"
bridge mdb flush dev br0
# Check that when flushing by VLAN ID only entries programmed with the
# specified VLAN ID are flushed and the rest are not.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 20
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 20
bridge mdb flush dev br0 vid 10
bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
check_fail $? "Entry not flushed by specified VLAN ID"
bridge mdb get dev br0 grp 239.1.1.1 vid 20 &> /dev/null
check_err $? "Entry flushed by wrong VLAN ID"
bridge mdb flush dev br0
# Check that all permanent entries are flushed when "permanent" is
# specified and that temporary entries are not.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 permanent vid 10
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 temp vid 10
bridge mdb flush dev br0 permanent
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
check_fail $? "Entry not flushed by \"permanent\" state"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
check_err $? "Entry flushed by wrong state (\"permanent\")"
bridge mdb flush dev br0
# Check that all temporary entries are flushed when "nopermanent" is
# specified and that permanent entries are not.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 permanent vid 10
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 temp vid 10
bridge mdb flush dev br0 nopermanent
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
check_err $? "Entry flushed by wrong state (\"nopermanent\")"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
check_fail $? "Entry not flushed by \"nopermanent\" state"
bridge mdb flush dev br0
# Check that L2 host entries are not flushed when "nopermanent" is
# specified, but flushed when "permanent" is specified.
bridge mdb add dev br0 port br0 grp 01:02:03:04:05:06 permanent vid 10
bridge mdb flush dev br0 nopermanent
bridge mdb get dev br0 grp 01:02:03:04:05:06 vid 10 &> /dev/null
check_err $? "L2 host entry flushed by wrong state (\"nopermanent\")"
bridge mdb flush dev br0 permanent
bridge mdb get dev br0 grp 01:02:03:04:05:06 vid 10 &> /dev/null
check_fail $? "L2 host entry not flushed by \"permanent\" state"
bridge mdb flush dev br0
# Check that IPv4 host entries are not flushed when "permanent" is
# specified, but flushed when "nopermanent" is specified.
bridge mdb add dev br0 port br0 grp 239.1.1.1 temp vid 10
bridge mdb flush dev br0 permanent
bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
check_err $? "IPv4 host entry flushed by wrong state (\"permanent\")"
bridge mdb flush dev br0 nopermanent
bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
check_fail $? "IPv4 host entry not flushed by \"nopermanent\" state"
bridge mdb flush dev br0
# Check that IPv6 host entries are not flushed when "permanent" is
# specified, but flushed when "nopermanent" is specified.
bridge mdb add dev br0 port br0 grp ff0e::1 temp vid 10
bridge mdb flush dev br0 permanent
bridge mdb get dev br0 grp ff0e::1 vid 10 &> /dev/null
check_err $? "IPv6 host entry flushed by wrong state (\"permanent\")"
bridge mdb flush dev br0 nopermanent
bridge mdb get dev br0 grp ff0e::1 vid 10 &> /dev/null
check_fail $? "IPv6 host entry not flushed by \"nopermanent\" state"
bridge mdb flush dev br0
# Check that when flushing by routing protocol only entries programmed
# with the specified routing protocol are flushed and the rest are not.
bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10 proto bgp
bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10 proto zebra
bridge mdb add dev br0 port br0 grp 239.1.1.1 vid 10
bridge mdb flush dev br0 proto bgp
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
check_fail $? "Entry not flushed by specified routing protocol"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
check_err $? "Entry flushed by wrong routing protocol"
bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
check_err $? "Host entry flushed by wrong routing protocol"
bridge mdb flush dev br0
# Test that an error is returned when trying to flush using unsupported
# parameters.
bridge mdb flush dev br0 src_vni 10 &> /dev/null
check_fail $? "Managed to flush by source VNI"
bridge mdb flush dev br0 dst 198.51.100.1 &> /dev/null
check_fail $? "Managed to flush by destination IP"
bridge mdb flush dev br0 dst_port 4789 &> /dev/null
check_fail $? "Managed to flush by UDP destination port"
bridge mdb flush dev br0 vni 10 &> /dev/null
check_fail $? "Managed to flush by destination VNI"
log_test "Flush tests"
}
cfg_test()
{
cfg_test_host
cfg_test_port
cfg_test_dump
cfg_test_flush
}
__fwd_test_host_ip()
......@@ -1166,8 +1353,8 @@ ctrl_test()
ctrl_mldv2_is_in_test
}
if ! bridge mdb help 2>&1 | grep -q "get"; then
echo "SKIP: iproute2 too old, missing bridge mdb get support"
if ! bridge mdb help 2>&1 | grep -q "flush"; then
echo "SKIP: iproute2 too old, missing bridge mdb flush support"
exit $ksft_skip
fi
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment