Commit a8d0d841 authored by David S. Miller's avatar David S. Miller

Merge branch 'bond_stacked_vlans'

Vlad Yasevich says:

====================
Fixed stacked vlan usage on top of bonds

Bonding device driver now support q-in-q on top for bonds.  There are
a few issues here though.

First, when arp monitoring is used, bonding driver will not correctly
tag traffic if the source of the arp device was configured on top of
q-in-q.  It may also incorrectly pick the wrong vlan id if the ordering
of that upper devices isn't as expected (there is no guarntee on ordering).

Second, the alb/tlb may use what would be considered 'inner' vlans in
its learning announcements, as it simply announces all vlans configured
on top of the bond without regard for encapsulation/stacking.

This series fixes the above 2 issues.  This series also depends on the
functionality introduced in
	http://patchwork.ozlabs.org/patch/349766/

Since v1:
  - Changed how patch1 verifies the device path.  We no longer use the
    _all_upper version of the function.  We find the path and if it was
    found, then collect the vlan information.
  - Use the constant to devine maximum vlan nest level support on top
    of bonding.  This can be changed if 2 is too low.
  - Inlude patch2 into the series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6bd64ac0 f60c3704
......@@ -1045,7 +1045,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
/* loop through vlans and send one packet for each */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper->priv_flags & IFF_802_1Q_VLAN)
if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0)
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_id(upper));
}
......
......@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
*/
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
__be32 dest_ip, __be32 src_ip,
struct bond_vlan_tag *inner,
struct bond_vlan_tag *outer)
struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
int i;
pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
arp_op, slave_dev->name, &dest_ip, &src_ip);
......@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
if (outer->vlan_id) {
if (inner->vlan_id) {
/* Go through all the tags backwards and add them to the packet */
for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
if (!tags[i].vlan_id)
continue;
pr_debug("inner tag: proto %X vid %X\n",
ntohs(inner->vlan_proto), inner->vlan_id);
skb = __vlan_put_tag(skb, inner->vlan_proto,
inner->vlan_id);
ntohs(tags[i].vlan_proto), tags[i].vlan_id);
skb = __vlan_put_tag(skb, tags[i].vlan_proto,
tags[i].vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
return;
}
}
pr_debug("outer reg: proto %X vid %X\n",
ntohs(outer->vlan_proto), outer->vlan_id);
skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
/* Set the outer tag */
if (tags[0].vlan_id) {
pr_debug("outer tag: proto %X vid %X\n",
ntohs(tags[0].vlan_proto), tags[0].vlan_id);
skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
......@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
arp_xmit(skb);
}
/* Validate the device path between the @start_dev and the @end_dev.
* The path is valid if the @end_dev is reachable through device
* stacking.
* When the path is validated, collect any vlan information in the
* path.
*/
static bool bond_verify_device_path(struct net_device *start_dev,
struct net_device *end_dev,
struct bond_vlan_tag *tags)
{
struct net_device *upper;
struct list_head *iter;
int idx;
if (start_dev == end_dev)
return true;
netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
if (bond_verify_device_path(upper, end_dev, tags)) {
if (is_vlan_dev(upper)) {
idx = vlan_get_encap_level(upper);
if (idx >= BOND_MAX_VLAN_ENCAP)
return false;
tags[idx].vlan_proto =
vlan_dev_vlan_proto(upper);
tags[idx].vlan_id = vlan_dev_vlan_id(upper);
}
return true;
}
}
return false;
}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
struct net_device *upper, *vlan_upper;
struct list_head *iter, *vlan_iter;
struct rtable *rt;
struct bond_vlan_tag inner, outer;
struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
__be32 *targets = bond->params.arp_targets, addr;
int i;
bool ret;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
inner.vlan_proto = 0;
inner.vlan_id = 0;
outer.vlan_proto = 0;
outer.vlan_id = 0;
memset(tags, 0, sizeof(tags));
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
......@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
0, tags);
continue;
}
......@@ -2201,51 +2237,11 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
goto found;
rcu_read_lock();
/* first we search only for vlan devices. for every vlan
* found we verify its upper dev list, searching for the
* rt->dst.dev. If found we save the tag of the vlan and
* proceed to send the packet.
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
if (vlan_upper == rt->dst.dev) {
outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
rcu_read_unlock();
goto found;
}
netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
iter) {
if (upper == rt->dst.dev) {
/* If the upper dev is a vlan dev too,
* set the vlan tag to inner tag.
*/
if (is_vlan_dev(upper)) {
inner.vlan_proto = vlan_dev_vlan_proto(upper);
inner.vlan_id = vlan_dev_vlan_id(upper);
}
outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
goto found;
}
}
}
/* if the device we're looking for is not on top of any of
* our upper vlans, then just search for any dev that
* matches, and in case it's a vlan - save the id
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
rcu_read_unlock();
if (ret)
goto found;
}
}
rcu_read_unlock();
/* Not our device - skip */
pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
......@@ -2259,7 +2255,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
addr, &inner, &outer);
addr, tags);
}
}
......
......@@ -36,6 +36,7 @@
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
#define BOND_MAX_VLAN_ENCAP 2
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
......
......@@ -484,4 +484,10 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
skb->protocol = htons(ETH_P_802_2);
}
static inline int vlan_get_encap_level(struct net_device *dev)
{
BUG_ON(!is_vlan_dev(dev));
return vlan_dev_priv(dev)->nest_level;
}
#endif /* !(_LINUX_IF_VLAN_H_) */
......@@ -3056,9 +3056,18 @@ extern int weight_p;
extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->adj_list.upper, \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
updev; \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->all_adj_list.upper, \
......
......@@ -4541,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
}
EXPORT_SYMBOL(netdev_adjacent_get_private);
/**
* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next device from the dev's upper list, starting from iter
* position. The caller must hold RCU read lock.
*/
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *upper;
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
if (&upper->list == &dev->adj_list.upper)
return NULL;
*iter = &upper->list;
return upper->dev;
}
EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
/**
* netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
* @dev: device
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment