Commit 7e5530af authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) 8139cp leaks memory in error paths, from Francois Romieu.

 2) do_tcp_sendpages() cannot handle order > 0 pages, but they can
    certainly arrive there now, fix from Eric Dumazet.

 3) Race condition and sysfs fixes in bonding from Nikolay Aleksandrov.

 4) Remain-on-Channel fix in mac80211 from Felix Liao.

 5) CCK rate calculation fix in iwlwifi, from Emmanuel Grumbach.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  8139cp: fix coherent mapping leak in error path.
  tcp: fix crashes in do_tcp_sendpages()
  bonding: fix race condition in bonding_store_slaves_active
  bonding: make arp_ip_target parameter checks consistent with sysfs
  bonding: fix miimon and arp_interval delayed work race conditions
  mac80211: fix remain-on-channel (non-)cancelling
  iwlwifi: fix the basic CCK rates calculation
parents 4ccc8045 892a925e
...@@ -3459,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) ...@@ -3459,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
/*-------------------------- Device entry points ----------------------------*/ /*-------------------------- Device entry points ----------------------------*/
static void bond_work_init_all(struct bonding *bond)
{
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
}
static void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
cancel_delayed_work_sync(&bond->alb_work);
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
}
static int bond_open(struct net_device *bond_dev) static int bond_open(struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
...@@ -3481,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev) ...@@ -3481,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev)
} }
read_unlock(&bond->lock); read_unlock(&bond->lock);
INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); bond_work_init_all(bond);
if (bond_is_lb(bond)) { if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer /* bond_alb_initialize must be called before the timer
* is started. * is started.
*/ */
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
/* something went wrong - fail the open operation */
return -ENOMEM; return -ENOMEM;
}
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
queue_delayed_work(bond->wq, &bond->alb_work, 0); queue_delayed_work(bond->wq, &bond->alb_work, 0);
} }
if (bond->params.miimon) { /* link check interval, in milliseconds. */ if (bond->params.miimon) /* link check interval, in milliseconds. */
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
queue_delayed_work(bond->wq, &bond->mii_work, 0); queue_delayed_work(bond->wq, &bond->mii_work, 0);
}
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
INIT_DELAYED_WORK(&bond->arp_work,
bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work,
bond_loadbalance_arp_mon);
queue_delayed_work(bond->wq, &bond->arp_work, 0); queue_delayed_work(bond->wq, &bond->arp_work, 0);
if (bond->params.arp_validate) if (bond->params.arp_validate)
bond->recv_probe = bond_arp_rcv; bond->recv_probe = bond_arp_rcv;
} }
if (bond->params.mode == BOND_MODE_8023AD) { if (bond->params.mode == BOND_MODE_8023AD) {
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
queue_delayed_work(bond->wq, &bond->ad_work, 0); queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */ /* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv; bond->recv_probe = bond_3ad_lacpdu_recv;
...@@ -3530,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev) ...@@ -3530,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
write_lock_bh(&bond->lock); write_lock_bh(&bond->lock);
bond->send_peer_notif = 0; bond->send_peer_notif = 0;
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
if (bond->params.miimon) { /* link check interval, in milliseconds. */ bond_work_cancel_all(bond);
cancel_delayed_work_sync(&bond->mii_work);
}
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
cancel_delayed_work_sync(&bond->arp_work);
}
switch (bond->params.mode) {
case BOND_MODE_8023AD:
cancel_delayed_work_sync(&bond->ad_work);
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
cancel_delayed_work_sync(&bond->alb_work);
break;
default:
break;
}
if (delayed_work_pending(&bond->mcast_work))
cancel_delayed_work_sync(&bond->mcast_work);
if (bond_is_lb(bond)) { if (bond_is_lb(bond)) {
/* Must be called only after all /* Must be called only after all
* slaves have been released * slaves have been released
...@@ -4436,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev) ...@@ -4436,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->features |= bond_dev->hw_features; bond_dev->features |= bond_dev->hw_features;
} }
static void bond_work_cancel_all(struct bonding *bond)
{
if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
cancel_delayed_work_sync(&bond->mii_work);
if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
cancel_delayed_work_sync(&bond->arp_work);
if (bond->params.mode == BOND_MODE_ALB &&
delayed_work_pending(&bond->alb_work))
cancel_delayed_work_sync(&bond->alb_work);
if (bond->params.mode == BOND_MODE_8023AD &&
delayed_work_pending(&bond->ad_work))
cancel_delayed_work_sync(&bond->ad_work);
if (delayed_work_pending(&bond->mcast_work))
cancel_delayed_work_sync(&bond->mcast_work);
}
/* /*
* Destroy a bonding device. * Destroy a bonding device.
* Must be under rtnl_lock when this function is called. * Must be under rtnl_lock when this function is called.
...@@ -4706,12 +4670,13 @@ static int bond_check_params(struct bond_params *params) ...@@ -4706,12 +4670,13 @@ static int bond_check_params(struct bond_params *params)
arp_ip_count++) { arp_ip_count++) {
/* not complete check, but should be good enough to /* not complete check, but should be good enough to
catch mistakes */ catch mistakes */
if (!isdigit(arp_ip_target[arp_ip_count][0])) { __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
ip == 0 || ip == htonl(INADDR_BROADCAST)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[arp_ip_count]); arp_ip_target[arp_ip_count]);
arp_interval = 0; arp_interval = 0;
} else { } else {
__be32 ip = in_aton(arp_ip_target[arp_ip_count]);
arp_target[arp_ip_count] = ip; arp_target[arp_ip_count] = ip;
} }
} }
......
...@@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d, ...@@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
int new_value, ret = count; int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
if (!rtnl_trylock())
return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) { if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no arp_interval value specified.\n", pr_err("%s: no arp_interval value specified.\n",
bond->dev->name); bond->dev->name);
...@@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d, ...@@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name); bond->dev->name, bond->dev->name);
bond->params.miimon = 0; bond->params.miimon = 0;
if (delayed_work_pending(&bond->mii_work)) {
cancel_delayed_work(&bond->mii_work);
flush_workqueue(bond->wq);
}
} }
if (!bond->params.arp_targets[0]) { if (!bond->params.arp_targets[0]) {
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
...@@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d, ...@@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function * timer will get fired off when the open function
* is called. * is called.
*/ */
if (!delayed_work_pending(&bond->arp_work)) { cancel_delayed_work_sync(&bond->mii_work);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) queue_delayed_work(bond->wq, &bond->arp_work, 0);
INIT_DELAYED_WORK(&bond->arp_work,
bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work,
bond_loadbalance_arp_mon);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
} }
out: out:
rtnl_unlock();
return ret; return ret;
} }
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
...@@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d, ...@@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d,
int new_value, ret = count; int new_value, ret = count;
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
if (!rtnl_trylock())
return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) { if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no miimon value specified.\n", pr_err("%s: no miimon value specified.\n",
bond->dev->name); bond->dev->name);
...@@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d, ...@@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d,
bond->params.arp_validate = bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE; BOND_ARP_VALIDATE_NONE;
} }
if (delayed_work_pending(&bond->arp_work)) {
cancel_delayed_work(&bond->arp_work);
flush_workqueue(bond->wq);
}
} }
if (bond->dev->flags & IFF_UP) { if (bond->dev->flags & IFF_UP) {
...@@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d, ...@@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d,
* timer will get fired off when the open function * timer will get fired off when the open function
* is called. * is called.
*/ */
if (!delayed_work_pending(&bond->mii_work)) { cancel_delayed_work_sync(&bond->arp_work);
INIT_DELAYED_WORK(&bond->mii_work, queue_delayed_work(bond->wq, &bond->mii_work, 0);
bond_mii_monitor);
queue_delayed_work(bond->wq,
&bond->mii_work, 0);
}
} }
} }
out: out:
rtnl_unlock();
return ret; return ret;
} }
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
...@@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, ...@@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
goto out; goto out;
} }
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave, i) {
if (!bond_is_active_slave(slave)) { if (!bond_is_active_slave(slave)) {
if (new_value) if (new_value)
...@@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, ...@@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
slave->inactive = 1; slave->inactive = 1;
} }
} }
read_unlock(&bond->lock);
out: out:
return ret; return ret;
} }
......
...@@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp) ...@@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp)
static int cp_alloc_rings (struct cp_private *cp) static int cp_alloc_rings (struct cp_private *cp)
{ {
struct device *d = &cp->pdev->dev;
void *mem; void *mem;
int rc;
mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
&cp->ring_dma, GFP_KERNEL);
if (!mem) if (!mem)
return -ENOMEM; return -ENOMEM;
cp->rx_ring = mem; cp->rx_ring = mem;
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
return cp_init_rings(cp); rc = cp_init_rings(cp);
if (rc < 0)
dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
return rc;
} }
static void cp_clean_rings (struct cp_private *cp) static void cp_clean_rings (struct cp_private *cp)
......
...@@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv, ...@@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
* As a consequence, it's not as complicated as it sounds, just add * As a consequence, it's not as complicated as it sounds, just add
* any lower rates to the ACK rate bitmap. * any lower rates to the ACK rate bitmap.
*/ */
if (IWL_RATE_11M_INDEX < lowest_present_ofdm) if (IWL_RATE_11M_INDEX < lowest_present_cck)
ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
if (IWL_RATE_5M_INDEX < lowest_present_ofdm) if (IWL_RATE_5M_INDEX < lowest_present_cck)
ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
if (IWL_RATE_2M_INDEX < lowest_present_ofdm) if (IWL_RATE_2M_INDEX < lowest_present_cck)
ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
/* 1M already there or needed so always add */ /* 1M already there or needed so always add */
cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
......
...@@ -830,8 +830,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) ...@@ -830,8 +830,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now; return mss_now;
} }
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t psize, int flags) size_t size, int flags)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal; int mss_now, size_goal;
...@@ -858,12 +858,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -858,12 +858,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err; goto out_err;
while (psize > 0) { while (size > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk); struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i; int copy, i;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
bool can_coalesce; bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
...@@ -912,8 +909,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -912,8 +909,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy; copied += copy;
poffset += copy; offset += copy;
if (!(psize -= copy)) if (!(size -= copy))
goto out; goto out;
if (skb->len < size_goal || (flags & MSG_OOB)) if (skb->len < size_goal || (flags & MSG_OOB))
...@@ -960,7 +957,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, ...@@ -960,7 +957,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
flags); flags);
lock_sock(sk); lock_sock(sk);
res = do_tcp_sendpages(sk, &page, offset, size, flags); res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk); release_sock(sk);
return res; return res;
} }
......
...@@ -458,8 +458,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) ...@@ -458,8 +458,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
list_move_tail(&roc->list, &tmp_list); list_move_tail(&roc->list, &tmp_list);
roc->abort = true; roc->abort = true;
} }
ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx); mutex_unlock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &tmp_list, list) { list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment