Commit 15077228 authored by Nikolay Aleksandrov's avatar Nikolay Aleksandrov Committed by David S. Miller

bonding: factor out slave id tx code and simplify xmit paths

I factored out the tx xmit code which relies on slave id in
bond_xmit_slave_id. It is global because later it can be used also in
3ad mode xmit. Unnecessary obvious comments are removed. Active-backup
mode is simplified because bond_dev_queue_xmit always consumes the skb.
bond_xmit_xor becomes one line because of bond_xmit_slave_id.
bond_for_each_slave_from is not used in bond_xmit_slave_id because later
when RCU is used we can avoid important race condition by using standard
rculist routines.
Signed-off-by: default avatarNikolay Aleksandrov <nikolay@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 78a646ce
...@@ -3795,12 +3795,50 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) ...@@ -3795,12 +3795,50 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
return res; return res;
} }
/**
* bond_xmit_slave_id - transmit skb through slave with slave_id
* @bond: bonding device that is transmitting
* @skb: buffer to transmit
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
* This function tries to transmit through slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
bond_for_each_slave(bond, slave) {
if (--i < 0) {
if (slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
}
}
/* Here we start from the first slave up to slave_id */
i = slave_id;
bond_for_each_slave(bond, slave) {
if (--i < 0)
break;
if (slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
}
/* no slave that can tx has been found */
kfree_skb(skb);
}
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *start_at;
int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
/* /*
* Start with the curr_active_slave that joined the bond as the * Start with the curr_active_slave that joined the bond as the
...@@ -3809,46 +3847,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev ...@@ -3809,46 +3847,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
* send the join/membership reports. The curr_active_slave found * send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic. * will send all of this type of traffic.
*/ */
if ((iph->protocol == IPPROTO_IGMP) && if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
(skb->protocol == htons(ETH_P_IP))) {
slave = bond->curr_active_slave; slave = bond->curr_active_slave;
if (!slave) if (slave && slave_can_tx(slave))
goto out; bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
} else { } else {
/* bond_xmit_slave_id(bond, skb,
* Concurrent TX may collide on rr_tx_counter; we accept bond->rr_tx_counter++ % bond->slave_cnt);
* that as being rare enough not to justify using an
* atomic op here.
*/
slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
bond_for_each_slave(bond, slave) {
slave_no--;
if (slave_no < 0)
break;
}
}
start_at = slave;
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
}
out:
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
/* /*
* in active-backup mode, we know that bond->curr_active_slave is always valid if * in active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface. * the bond has a usable interface.
...@@ -3857,14 +3869,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d ...@@ -3857,14 +3869,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave; struct slave *slave;
int res = 1;
slave = bond->curr_active_slave; slave = bond->curr_active_slave;
if (slave) if (slave)
res = bond_dev_queue_xmit(bond, skb, slave->dev); bond_dev_queue_xmit(bond, skb, slave->dev);
else
if (res)
/* no suitable interface, frame not sent */
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -3878,34 +3887,9 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d ...@@ -3878,34 +3887,9 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *start_at;
int slave_no;
int i;
int res = 1;
slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
bond_for_each_slave(bond, slave) {
slave_no--;
if (slave_no < 0)
break;
}
start_at = slave;
bond_for_each_slave_from(bond, slave, i, start_at) { bond_xmit_slave_id(bond, skb,
if (IS_UP(slave->dev) && bond->xmit_hash_policy(skb, bond->slave_cnt));
(slave->link == BOND_LINK_UP) &&
bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
}
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -426,10 +426,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3 ...@@ -426,10 +426,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr; return addr;
} }
static inline bool slave_can_tx(struct slave *slave)
{
if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
bond_is_active_slave(slave))
return true;
else
return false;
}
struct bond_net; struct bond_net;
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name); int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net); int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net); void bond_destroy_sysfs(struct bond_net *net);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment