Commit bc4c48e7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mdb-get'

Ido Schimmel says:

====================
Add MDB get support

This patchset adds MDB get support, allowing user space to request a
single MDB entry to be retrieved instead of dumping the entire MDB.
Support is added in both the bridge and VXLAN drivers.

Patches #1-#6 are small preparations in both drivers.

Patches #7-#8 add the required uAPI attributes for the new functionality
and the MDB get net device operation (NDO), respectively.

Patches #9-#10 implement the MDB get NDO in both drivers.

Patch #11 registers a handler for RTM_GETMDB messages in rtnetlink core.
The handler derives the net device from the ifindex specified in the
ancillary header and invokes its MDB get NDO.

Patches #12-#13 add selftests by converting tests that use MDB dump with
grep to the new MDB get functionality.

iproute2 changes can be found here [1].

v2:
* Patch #7: Add a comment to describe attributes structure.
* Patch #9: Add a comment above spin_lock_bh().

[1] https://github.com/idosch/iproute2/tree/submit/mdb_get_v1
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents eff8313b 0514dd05
......@@ -3226,6 +3226,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_mdb_add = vxlan_mdb_add,
.ndo_mdb_del = vxlan_mdb_del,
.ndo_mdb_dump = vxlan_mdb_dump,
.ndo_mdb_get = vxlan_mdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
......
......@@ -370,12 +370,10 @@ static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto,
return true;
}
static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg,
const struct br_mdb_entry *entry,
const struct nlattr *source_attr)
static void vxlan_mdb_group_set(struct vxlan_mdb_entry_key *group,
const struct br_mdb_entry *entry,
const struct nlattr *source_attr)
{
struct vxlan_mdb_entry_key *group = &cfg->group;
switch (entry->addr.proto) {
case htons(ETH_P_IP):
group->dst.sa.sa_family = AF_INET;
......@@ -503,7 +501,7 @@ static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg,
entry->addr.proto, extack))
return -EINVAL;
vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]);
vxlan_mdb_group_set(&cfg->group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]);
/* rtnetlink code only validates that IPv4 group address is
* multicast.
......@@ -927,23 +925,20 @@ vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group,
return nlmsg_size;
}
static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote)
static size_t
vxlan_mdb_nlmsg_remote_size(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote)
{
const struct vxlan_mdb_entry_key *group = &mdb_entry->key;
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
size_t nlmsg_size;
nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0) +
/* MDBA_MDB_ENTRY_INFO */
nla_total_size(sizeof(struct br_mdb_entry)) +
nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
/* MDBA_MDB_EATTR_TIMER */
nla_total_size(sizeof(u32));
/* MDBA_MDB_EATTR_SOURCE */
if (vxlan_mdb_is_sg(group))
nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst));
......@@ -971,6 +966,19 @@ static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan,
return nlmsg_size;
}
static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0) +
/* Remote entry */
vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, remote);
}
static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan,
struct sk_buff *skb,
const struct vxlan_mdb_entry *mdb_entry,
......@@ -1298,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
[MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
};
static int vxlan_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
struct vxlan_mdb_entry_key *group,
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
memset(group, 0, sizeof(*group));
group->vni = vxlan->default_dst.remote_vni;
if (!tb[MDBA_GET_ENTRY_ATTRS]) {
vxlan_mdb_group_set(group, entry, NULL);
return 0;
}
err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
tb[MDBA_GET_ENTRY_ATTRS],
vxlan_mdbe_attrs_get_pol, extack);
if (err)
return err;
if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
!vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE],
entry->addr.proto, extack))
return -EINVAL;
vxlan_mdb_group_set(group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]);
if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
group->vni =
cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
return 0;
}
static struct sk_buff *
vxlan_mdb_get_reply_alloc(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry)
{
struct vxlan_mdb_remote *remote;
size_t nlmsg_size;
nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0);
list_for_each_entry(remote, &mdb_entry->remotes, list)
nlmsg_size += vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry,
remote);
return nlmsg_new(nlmsg_size, GFP_KERNEL);
}
static int
vxlan_mdb_get_reply_fill(const struct vxlan_dev *vxlan,
struct sk_buff *skb,
const struct vxlan_mdb_entry *mdb_entry,
u32 portid, u32 seq)
{
struct nlattr *mdb_nest, *mdb_entry_nest;
struct vxlan_mdb_remote *remote;
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = vxlan->dev->ifindex;
mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (!mdb_nest) {
err = -EMSGSIZE;
goto cancel;
}
mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (!mdb_entry_nest) {
err = -EMSGSIZE;
goto cancel;
}
list_for_each_entry(remote, &mdb_entry->remotes, list) {
err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote);
if (err)
goto cancel;
}
nla_nest_end(skb, mdb_entry_nest);
nla_nest_end(skb, mdb_nest);
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return err;
}
int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
u32 seq, struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_mdb_entry *mdb_entry;
struct vxlan_mdb_entry_key group;
struct sk_buff *skb;
int err;
ASSERT_RTNL();
err = vxlan_mdb_get_parse(dev, tb, &group, extack);
if (err)
return err;
mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group);
if (!mdb_entry) {
NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
return -ENOENT;
}
skb = vxlan_mdb_get_reply_alloc(vxlan, mdb_entry);
if (!skb)
return -ENOMEM;
err = vxlan_mdb_get_reply_fill(vxlan, skb, mdb_entry, portid, seq);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
goto free;
}
return rtnl_unicast(skb, dev_net(dev), portid);
free:
kfree_skb(skb);
return err;
}
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
struct sk_buff *skb,
__be32 src_vni)
......
......@@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
u32 seq, struct netlink_ext_ack *extack);
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
struct sk_buff *skb,
__be32 src_vni);
......
......@@ -1609,6 +1609,10 @@ struct net_device_ops {
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
int (*ndo_mdb_get)(struct net_device *dev,
struct nlattr *tb[], u32 portid,
u32 seq,
struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
......
......@@ -723,6 +723,24 @@ enum {
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
/* [MDBA_GET_ENTRY] = {
* struct br_mdb_entry
* [MDBA_GET_ENTRY_ATTRS] = {
* [MDBE_ATTR_SOURCE]
* struct in_addr / struct in6_addr
* [MDBE_ATTR_SRC_VNI]
* u32
* }
* }
*/
enum {
MDBA_GET_ENTRY_UNSPEC,
MDBA_GET_ENTRY,
MDBA_GET_ENTRY_ATTRS,
__MDBA_GET_ENTRY_MAX,
};
#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1)
/* [MDBA_SET_ENTRY_ATTRS] = {
* [MDBE_ATTR_xxx]
* ...
......
......@@ -92,7 +92,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
mdst = br_mdb_get(brmctx, skb, vid);
mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst))
br_multicast_flood(mdst, skb, brmctx, false, true);
......@@ -472,6 +472,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_mdb_add = br_mdb_add,
.ndo_mdb_del = br_mdb_del,
.ndo_mdb_dump = br_mdb_dump,
.ndo_mdb_get = br_mdb_get,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
......
......@@ -175,7 +175,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
switch (pkt_type) {
case BR_PKT_MULTICAST:
mdst = br_mdb_get(brmctx, skb, vid);
mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
if ((mdst && mdst->host_joined) ||
......
......@@ -323,9 +323,6 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_bridge_mdb_entry *mp;
struct nlattr *nest, *nest2;
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
......@@ -453,13 +450,15 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
return -EMSGSIZE;
}
static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
{
size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
nla_total_size(sizeof(struct br_mdb_entry)) +
nla_total_size(sizeof(u32));
struct net_bridge_group_src *ent;
size_t addr_size = 0;
size_t nlmsg_size, addr_size = 0;
/* MDBA_MDB_ENTRY_INFO */
nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
/* MDBA_MDB_EATTR_TIMER */
nla_total_size(sizeof(u32));
if (!pg)
goto out;
......@@ -507,6 +506,17 @@ static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
return nlmsg_size;
}
static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0) +
/* Port group entry */
rtnl_mdb_nlmsg_pg_size(pg);
}
void br_mdb_notify(struct net_device *dev,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
......@@ -1401,3 +1411,161 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
br_mdb_config_fini(&cfg);
return err;
}
static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
};
static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
struct br_ip *group, struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
int err;
if (!tb[MDBA_GET_ENTRY_ATTRS]) {
__mdb_entry_to_br_ip(entry, group, NULL);
return 0;
}
err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol,
extack);
if (err)
return err;
if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
!is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE],
entry->addr.proto, extack))
return -EINVAL;
__mdb_entry_to_br_ip(entry, group, mdbe_attrs);
return 0;
}
static struct sk_buff *
br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
{
struct net_bridge_port_group *pg;
size_t nlmsg_size;
nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0);
if (mp->host_joined)
nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
for (pg = mlock_dereference(mp->ports, mp->br); pg;
pg = mlock_dereference(pg->next, mp->br))
nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
return nlmsg_new(nlmsg_size, GFP_ATOMIC);
}
static int br_mdb_get_reply_fill(struct sk_buff *skb,
struct net_bridge_mdb_entry *mp, u32 portid,
u32 seq)
{
struct nlattr *mdb_nest, *mdb_entry_nest;
struct net_bridge_port_group *pg;
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = mp->br->dev->ifindex;
mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (!mdb_nest) {
err = -EMSGSIZE;
goto cancel;
}
mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (!mdb_entry_nest) {
err = -EMSGSIZE;
goto cancel;
}
if (mp->host_joined) {
err = __mdb_fill_info(skb, mp, NULL);
if (err)
goto cancel;
}
for (pg = mlock_dereference(mp->ports, mp->br); pg;
pg = mlock_dereference(pg->next, mp->br)) {
err = __mdb_fill_info(skb, mp, pg);
if (err)
goto cancel;
}
nla_nest_end(skb, mdb_entry_nest);
nla_nest_end(skb, mdb_nest);
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return err;
}
int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
struct netlink_ext_ack *extack)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_entry *mp;
struct sk_buff *skb;
struct br_ip group;
int err;
err = br_mdb_get_parse(dev, tb, &group, extack);
if (err)
return err;
/* Hold the multicast lock to ensure that the MDB entry does not change
* between the time the reply size is determined and when the reply is
* filled in.
*/
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &group);
if (!mp) {
NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
err = -ENOENT;
goto unlock;
}
skb = br_mdb_get_reply_alloc(mp);
if (!skb) {
err = -ENOMEM;
goto unlock;
}
err = br_mdb_get_reply_fill(skb, mp, portid, seq);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
goto free;
}
spin_unlock_bh(&br->multicast_lock);
return rtnl_unicast(skb, dev_net(dev), portid);
free:
kfree_skb(skb);
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
......@@ -145,8 +145,9 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
}
#endif
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
struct sk_buff *skb, u16 vid)
struct net_bridge_mdb_entry *
br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
u16 vid)
{
struct net_bridge *br = brmctx->br;
struct br_ip ip;
......
......@@ -955,8 +955,9 @@ int br_multicast_rcv(struct net_bridge_mcast **brmctx,
struct net_bridge_mcast_port **pmctx,
struct net_bridge_vlan *vlan,
struct sk_buff *skb, u16 vid);
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
struct sk_buff *skb, u16 vid);
struct net_bridge_mdb_entry *
br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
u16 vid);
int br_multicast_add_port(struct net_bridge_port *port);
void br_multicast_del_port(struct net_bridge_port *port);
void br_multicast_enable_port(struct net_bridge_port *port);
......@@ -1021,6 +1022,8 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb);
int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
struct netlink_ext_ack *extack);
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
struct net_bridge_mdb_entry *mp, bool notify);
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify);
......@@ -1345,8 +1348,9 @@ static inline int br_multicast_rcv(struct net_bridge_mcast **brmctx,
return 0;
}
static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
struct sk_buff *skb, u16 vid)
static inline struct net_bridge_mdb_entry *
br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
u16 vid)
{
return NULL;
}
......@@ -1430,6 +1434,13 @@ static inline int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
return 0;
}
static inline int br_mdb_get(struct net_device *dev, struct nlattr *tb[],
u32 portid, u32 seq,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
static inline int br_mdb_hash_init(struct net_bridge *br)
{
return 0;
......
......@@ -6219,6 +6219,93 @@ static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(attr);
if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
return -EINVAL;
}
if (entry->ifindex) {
NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
return -EINVAL;
}
if (entry->state) {
NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
return -EINVAL;
}
if (entry->flags) {
NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
return -EINVAL;
}
if (entry->vid >= VLAN_VID_MASK) {
NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
return -EINVAL;
}
if (entry->addr.proto != htons(ETH_P_IP) &&
entry->addr.proto != htons(ETH_P_IPV6) &&
entry->addr.proto != 0) {
NL_SET_ERR_MSG(extack, "Unknown entry protocol");
return -EINVAL;
}
return 0;
}
static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
[MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
rtnl_validate_mdb_entry_get,
sizeof(struct br_mdb_entry)),
[MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
};
static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
struct net *net = sock_net(in_skb->sk);
struct br_port_msg *bpm;
struct net_device *dev;
int err;
err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
if (err)
return err;
bpm = nlmsg_data(nlh);
if (!bpm->ifindex) {
NL_SET_ERR_MSG(extack, "Invalid ifindex");
return -EINVAL;
}
dev = __dev_get_by_index(net, bpm->ifindex);
if (!dev) {
NL_SET_ERR_MSG(extack, "Device doesn't exist");
return -ENODEV;
}
if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
return -EINVAL;
}
if (!dev->netdev_ops->ndo_mdb_get) {
NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
return -EOPNOTSUPP;
}
return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, extack);
}
static int rtnl_validate_mdb_entry(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
......@@ -6595,7 +6682,7 @@ void __init rtnetlink_init(void)
0);
rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, rtnl_mdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment