Commit 7686e3c1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma fixes from Doug Ledford:
 "I think we are getting pretty close to done now.  There are four
  one-off fixes in this update:

   - fix ipoib multicast joins
   - fix mlx4 error handling
   - fix mlx5 size computation
   - fix a thinko in core code"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx5: Fix RC transport send queue overhead computation
  IB/ipoib: fix for rare multicast join race condition
  IB/core: Fix reading capability mask of the port info class
  net/mlx4: fix some error handling in mlx4_multi_func_init()
parents 2f2e9f2d 75c1657e
...@@ -720,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev, ...@@ -720,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
&cpi, 40, sizeof(cpi)) >= 0) { &cpi, 40, sizeof(cpi)) >= 0) {
if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
/* We have extended counters */ /* We have extended counters */
return &pma_group_ext; return &pma_group_ext;
if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
/* But not the IETF ones */ /* But not the IETF ones */
return &pma_group_noietf; return &pma_group_noietf;
} }
......
...@@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type) ...@@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type)
/* fall through */ /* fall through */
case IB_QPT_RC: case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_atomic_seg) + max(sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg); sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg));
break; break;
case IB_QPT_XRC_TGT: case IB_QPT_XRC_TGT:
...@@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type) ...@@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC: case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) + max(sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) + sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg); sizeof(struct mlx5_mkey_seg));
break; break;
case IB_QPT_UD: case IB_QPT_UD:
......
...@@ -456,7 +456,10 @@ static int ipoib_mcast_join_complete(int status, ...@@ -456,7 +456,10 @@ static int ipoib_mcast_join_complete(int status,
return status; return status;
} }
static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) /*
* Caller must hold 'priv->lock'
*/
static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast; struct ib_sa_multicast *multicast;
...@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) ...@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask; ib_sa_comp_mask comp_mask;
int ret = 0; int ret = 0;
if (!priv->broadcast ||
!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return -EINVAL;
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid; rec.mgid = mcast->mcmember.mgid;
...@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) ...@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.join_state = 4; rec.join_state = 4;
#endif #endif
} }
spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL, &rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast); ipoib_mcast_join_complete, mcast);
spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) { if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast); ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */ /* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1); __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
complete(&mcast->done); complete(&mcast->done);
spin_lock_irq(&priv->lock);
} }
return 0;
} }
void ipoib_mcast_join_task(struct work_struct *work) void ipoib_mcast_join_task(struct work_struct *work)
...@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work) ...@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */ /* Found the next unjoined group */
init_completion(&mcast->done); init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
if (ipoib_mcast_join(dev, mcast)) {
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
ipoib_mcast_join(dev, mcast); return;
spin_lock_irq(&priv->lock); }
} else if (!delay_until || } else if (!delay_until ||
time_before(mcast->delay_until, delay_until)) time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until; delay_until = mcast->delay_until;
...@@ -641,10 +652,9 @@ void ipoib_mcast_join_task(struct work_struct *work) ...@@ -641,10 +652,9 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (mcast) { if (mcast) {
init_completion(&mcast->done); init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
ipoib_mcast_join(dev, mcast);
} }
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
if (mcast)
ipoib_mcast_join(dev, mcast);
} }
int ipoib_mcast_start_thread(struct net_device *dev) int ipoib_mcast_start_thread(struct net_device *dev)
......
...@@ -2429,7 +2429,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) ...@@ -2429,7 +2429,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
flush_workqueue(priv->mfunc.master.comm_wq); flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves: err_slaves:
while (--i) { while (i--) {
for (port = 1; port <= MLX4_MAX_PORTS; port++) for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment