Commit f46d9632 authored by David S. Miller's avatar David S. Miller

Merge branch 'rtnl-pushdown-prep'

Florian Westphal says:

====================
rtnetlink: preparation patches for further rtnl lock pushdown/removal

Patches split large rtnl_fill_ifinfo into smaller chunks
to better see which parts

1. require rtnl
2. do not require it at all
3. rely on rtnl locking now but could be converted

Changes since v3:

I dropped the 'ifalias' patch, I have a change to decouple ifalias and
rtnl mutex, I will send it once this series has been merged.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 61f26d92 4c82a95e
...@@ -522,11 +522,15 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev, ...@@ -522,11 +522,15 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev,
static bool rtnl_have_link_slave_info(const struct net_device *dev) static bool rtnl_have_link_slave_info(const struct net_device *dev)
{ {
struct net_device *master_dev; struct net_device *master_dev;
bool ret = false;
master_dev = netdev_master_upper_dev_get((struct net_device *) dev); rcu_read_lock();
master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
if (master_dev && master_dev->rtnl_link_ops) if (master_dev && master_dev->rtnl_link_ops)
return true; ret = true;
return false; rcu_read_unlock();
return ret;
} }
static int rtnl_link_slave_info_fill(struct sk_buff *skb, static int rtnl_link_slave_info_fill(struct sk_buff *skb,
...@@ -1211,6 +1215,36 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ...@@ -1211,6 +1215,36 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
return -EMSGSIZE; return -EMSGSIZE;
} }
static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
struct net_device *dev,
u32 ext_filter_mask)
{
struct nlattr *vfinfo;
int i, num_vfs;
if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
return 0;
num_vfs = dev_num_vf(dev->dev.parent);
if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
return -EMSGSIZE;
if (!dev->netdev_ops->ndo_get_vf_config)
return 0;
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
return -EMSGSIZE;
for (i = 0; i < num_vfs; i++) {
if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
return -EMSGSIZE;
}
nla_nest_end(skb, vfinfo);
return 0;
}
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
{ {
struct rtnl_link_ifmap map; struct rtnl_link_ifmap map;
...@@ -1307,6 +1341,48 @@ static u32 rtnl_get_event(unsigned long event) ...@@ -1307,6 +1341,48 @@ static u32 rtnl_get_event(unsigned long event)
return rtnl_event_type; return rtnl_event_type;
} }
static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
{
const struct net_device *upper_dev;
int ret = 0;
rcu_read_lock();
upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev)
ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
rcu_read_unlock();
return ret;
}
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
{
int ifindex = dev_get_iflink(dev);
if (dev->ifindex == ifindex)
return 0;
return nla_put_u32(skb, IFLA_LINK, ifindex);
}
static int rtnl_fill_link_netnsid(struct sk_buff *skb,
const struct net_device *dev)
{
if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
if (!net_eq(dev_net(dev), link_net)) {
int id = peernet2id_alloc(dev_net(dev), link_net);
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
return -EMSGSIZE;
}
}
return 0;
}
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change, int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask, unsigned int flags, u32 ext_filter_mask,
...@@ -1316,7 +1392,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -1316,7 +1392,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct nlattr *af_spec; struct nlattr *af_spec;
struct rtnl_af_ops *af_ops; struct rtnl_af_ops *af_ops;
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
ASSERT_RTNL(); ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
...@@ -1345,10 +1420,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -1345,10 +1420,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif #endif
(dev->ifindex != dev_get_iflink(dev) && nla_put_iflink(skb, dev) ||
nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) || put_master_ifindex(skb, dev) ||
(upper_dev &&
nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
(dev->qdisc && (dev->qdisc &&
nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
...@@ -1385,26 +1458,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -1385,26 +1458,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (rtnl_fill_stats(skb, dev)) if (rtnl_fill_stats(skb, dev))
goto nla_put_failure; goto nla_put_failure;
if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && if (rtnl_fill_vf(skb, dev, ext_filter_mask))
nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
goto nla_put_failure;
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
ext_filter_mask & RTEXT_FILTER_VF) {
int i;
struct nlattr *vfinfo;
int num_vfs = dev_num_vf(dev->dev.parent);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
goto nla_put_failure; goto nla_put_failure;
for (i = 0; i < num_vfs; i++) {
if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
goto nla_put_failure;
}
nla_nest_end(skb, vfinfo);
}
if (rtnl_port_fill(skb, dev, ext_filter_mask)) if (rtnl_port_fill(skb, dev, ext_filter_mask))
goto nla_put_failure; goto nla_put_failure;
...@@ -1417,17 +1472,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -1417,17 +1472,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure; goto nla_put_failure;
} }
if (dev->rtnl_link_ops && if (rtnl_fill_link_netnsid(skb, dev))
dev->rtnl_link_ops->get_link_net) {
struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
if (!net_eq(dev_net(dev), link_net)) {
int id = peernet2id_alloc(dev_net(dev), link_net);
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
goto nla_put_failure; goto nla_put_failure;
}
}
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC))) if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure; goto nla_put_failure;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment