Commit 18673533 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix regression in /proc/net/if_inet6, sometimes devices do not get
    listed.  From Eric Dumazet.

 2) Add IPSEC networking sub-section to MAINTAINERS.

 3) S390 networking fixes from Hendrik Brueckner and Stefan Raspl.

 4) Fix enslavement of devices that can't do VLAN properly, from Jiri
    Pirko.

 5) SCTP sack handling fix from Zijie Pan.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  ipv6: addrconf: fix /proc/net/if_inet6
  bnx2x: fix handling mf storage modes
  qeth: fix deadlock between recovery and bonding driver
  smsgiucv: reestablish IUCV path after resume
  sctp: fix call to SCTP_CMD_PROCESS_SACK in sctp_cmd_interpreter()
  vlan: fix bond/team enslave of vlan challenged slave/port
  MAINTAINERS: Add explicit section for IPSEC networking.
parents ccbfddb7 9f0d3c27
...@@ -5019,6 +5019,20 @@ F: net/ipv6/ ...@@ -5019,6 +5019,20 @@ F: net/ipv6/
F: include/net/ip* F: include/net/ip*
F: arch/x86/net/* F: arch/x86/net/*
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
S: Maintained
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
F: net/ipv6/xfrm*
F: include/uapi/linux/xfrm.h
F: include/net/xfrm.h
NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK) NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
M: Paul Moore <paul@paul-moore.com> M: Paul Moore <paul@paul-moore.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
......
...@@ -1519,7 +1519,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) ...@@ -1519,7 +1519,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* no need to lock since we're protected by rtnl_lock */ /* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
if (bond_vlan_used(bond)) { if (vlan_uses_dev(bond_dev)) {
pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
bond_dev->name, slave_dev->name, bond_dev->name); bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM; return -EPERM;
......
...@@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_shinfo(skb)->nr_frags + skb_shinfo(skb)->nr_frags +
BDS_PER_TX_PKT + BDS_PER_TX_PKT +
NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; /* Handle special storage cases separately */
netif_tx_stop_queue(txq); if (txdata->tx_ring_size != 0) {
BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
}
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
......
...@@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp, ...@@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
/* Check if this request is ok */ /* Check if this request is ok */
rc = o->validate(bp, o->owner, elem); rc = o->validate(bp, o->owner, elem);
if (rc) { if (rc) {
BNX2X_ERR("Preamble failed: %d\n", rc); DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
goto free_and_exit; goto free_and_exit;
} }
} }
......
...@@ -1141,11 +1141,12 @@ static int qeth_l2_recover(void *ptr) ...@@ -1141,11 +1141,12 @@ static int qeth_l2_recover(void *ptr)
dev_info(&card->gdev->dev, dev_info(&card->gdev->dev,
"Device successfully recovered!\n"); "Device successfully recovered!\n");
else { else {
rtnl_lock(); if (rtnl_trylock()) {
dev_close(card->dev); dev_close(card->dev);
rtnl_unlock(); rtnl_unlock();
dev_warn(&card->gdev->dev, "The qeth device driver " dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n"); "failed to recover an error on the device\n");
}
} }
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
......
...@@ -3510,11 +3510,12 @@ static int qeth_l3_recover(void *ptr) ...@@ -3510,11 +3510,12 @@ static int qeth_l3_recover(void *ptr)
dev_info(&card->gdev->dev, dev_info(&card->gdev->dev,
"Device successfully recovered!\n"); "Device successfully recovered!\n");
else { else {
rtnl_lock(); if (rtnl_trylock()) {
dev_close(card->dev); dev_close(card->dev);
rtnl_unlock(); rtnl_unlock();
dev_warn(&card->gdev->dev, "The qeth device driver " dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n"); "failed to recover an error on the device\n");
}
} }
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
......
...@@ -157,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev) ...@@ -157,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
#ifdef CONFIG_PM_DEBUG #ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_restore_thaw\n"); printk(KERN_WARNING "smsg_pm_restore_thaw\n");
#endif #endif
if (smsg_path && iucv_path_connected) { if (smsg_path && !iucv_path_connected) {
memset(smsg_path, 0, sizeof(*smsg_path)); memset(smsg_path, 0, sizeof(*smsg_path));
smsg_path->msglim = 255; smsg_path->msglim = 255;
smsg_path->flags = 0; smsg_path->flags = 0;
......
...@@ -366,6 +366,13 @@ EXPORT_SYMBOL(vlan_vids_del_by_dev); ...@@ -366,6 +366,13 @@ EXPORT_SYMBOL(vlan_vids_del_by_dev);
bool vlan_uses_dev(const struct net_device *dev) bool vlan_uses_dev(const struct net_device *dev)
{ {
return rtnl_dereference(dev->vlan_info) ? true : false; struct vlan_info *vlan_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return false;
return vlan_info->grp.nr_vlan_devs ? true : false;
} }
EXPORT_SYMBOL(vlan_uses_dev); EXPORT_SYMBOL(vlan_uses_dev);
...@@ -3064,14 +3064,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) ...@@ -3064,14 +3064,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
struct hlist_node *n; struct hlist_node *n;
hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst) { addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
/* sync with offset */ /* sync with offset */
if (p < state->offset) { if (p < state->offset) {
p++; p++;
continue; continue;
} }
state->offset++; state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net)) return ifa;
return ifa;
} }
/* prepare for next bucket */ /* prepare for next bucket */
...@@ -3089,18 +3090,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, ...@@ -3089,18 +3090,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct hlist_node *n = &ifa->addr_lst; struct hlist_node *n = &ifa->addr_lst;
hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
state->offset++; state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net)) return ifa;
return ifa;
} }
while (++state->bucket < IN6_ADDR_HSIZE) { while (++state->bucket < IN6_ADDR_HSIZE) {
state->offset = 0; state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa, n, hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) { &inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
state->offset++; state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net)) return ifa;
return ifa;
} }
} }
......
...@@ -1642,8 +1642,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1642,8 +1642,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
asoc->outqueue.outstanding_bytes; asoc->outqueue.outstanding_bytes;
sackh.num_gap_ack_blocks = 0; sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0; sackh.num_dup_tsns = 0;
chunk->subh.sack_hdr = &sackh;
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
SCTP_SACKH(&sackh)); SCTP_CHUNK(chunk));
break; break;
case SCTP_CMD_DISCARD_PACKET: case SCTP_CMD_DISCARD_PACKET:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment