Commit 617125e7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-core-fixes'

Tariq Toukan says:

====================
mlx4 core fixes

This patchset contains bug fixes from Jack to the mlx4 Core driver.

Patch 1 solves a race in the flow of CQ free.
Patch 2 moves some qp context flags update to the correct qp transition.
Patch 3 eliminates warnings from the path of SRQ_LIMIT that flood the message log,
and keeps them only in the path of SRQ_CATAS_ERROR.

Series generated against net commit:
1a717fcf Merge tag 'mac80211-for-davem-2017-01-13' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b618ab45 9577b174
...@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) ...@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{ {
struct mlx4_cq *cq; struct mlx4_cq *cq;
rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1)); cqn & (dev->caps.num_cqs - 1));
rcu_read_unlock();
if (!cq) { if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return; return;
} }
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn; ++cq->arm_sn;
cq->comp(cq); cq->comp(cq);
...@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) ...@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq; struct mlx4_cq *cq;
spin_lock(&cq_table->lock); rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
if (cq) rcu_read_unlock();
atomic_inc(&cq->refcount);
spin_unlock(&cq_table->lock);
if (!cq) { if (!cq) {
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return; return;
} }
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type); cq->event(cq, event_type);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
} }
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
...@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err) if (err)
return err; return err;
spin_lock_irq(&cq_table->lock); spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock); spin_unlock(&cq_table->lock);
if (err) if (err)
goto err_icm; goto err_icm;
...@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0; return 0;
err_radix: err_radix:
spin_lock_irq(&cq_table->lock); spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn); radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock); spin_unlock(&cq_table->lock);
err_icm: err_icm:
mlx4_cq_free_icm(dev, cq->cqn); mlx4_cq_free_icm(dev, cq->cqn);
...@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) ...@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err) if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock(&cq_table->lock);
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq) priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
if (atomic_dec_and_test(&cq->refcount)) if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free); complete(&cq->free);
wait_for_completion(&cq->free); wait_for_completion(&cq->free);
......
...@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break; break;
case MLX4_EVENT_TYPE_SRQ_LIMIT: case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__); __func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) { if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */ /* forward only to slave owning the SRQ */
...@@ -570,12 +571,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -570,12 +571,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret); eq->eqn, eq->cons_index, ret);
break; break;
} }
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave, __func__, slave,
be32_to_cpu(eqe->event.srq.srqn), be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype); eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) { if (!ret && slave != dev->caps.function) {
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type, __func__, eqe->type,
eqe->subtype, slave); eqe->subtype, slave);
......
...@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ); put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq; qp->srq = srq;
} }
/* Save param3 for dynamic changes from VST back to VGT */
qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT); put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn); res_end_move(dev, slave, RES_QP, qpn);
...@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff; int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp; struct res_qp *qp;
u8 orig_sched_queue; u8 orig_sched_queue;
__be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control; u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx; u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl; u8 orig_pri_path_fl = qpc->pri_path.fl;
...@@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
*/ */
if (!err) { if (!err) {
qp->sched_queue = orig_sched_queue; qp->sched_queue = orig_sched_queue;
qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control; qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx; qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl; qp->pri_path_fl = orig_pri_path_fl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment