Commit f335af10 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Several bug fixes for old bugs:

   - Welcome Leon as co-maintainer for RDMA so we are back to having two
     people

   - Some corner cases are fixed in mlx5's MR code

   - Long standing CM bug where a DREQ at the wrong time can result in a
     long timeout

   - Missing locking and refcounting in hf1"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hfi1: Fix use-after-free bug for mm struct
  IB/rdmavt: add lock to call to rvt_error_qp to prevent a race condition
  IB/cm: Cancel mad on the DREQ event when the state is MRA_REP_RCVD
  RDMA/mlx5: Add a missing update of cache->last_add
  RDMA/mlx5: Don't remove cache MRs when a delay is needed
  MAINTAINERS: Update qib and hfi1 related drivers
  MAINTAINERS: Add Leon Romanovsky to RDMA maintainers
parents d017a316 2bbac98d
...@@ -8676,7 +8676,6 @@ F: include/linux/cciss*.h ...@@ -8676,7 +8676,6 @@ F: include/linux/cciss*.h
F: include/uapi/linux/cciss*.h F: include/uapi/linux/cciss*.h
HFI1 DRIVER HFI1 DRIVER
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
...@@ -9599,6 +9598,7 @@ F: drivers/iio/pressure/dps310.c ...@@ -9599,6 +9598,7 @@ F: drivers/iio/pressure/dps310.c
INFINIBAND SUBSYSTEM INFINIBAND SUBSYSTEM
M: Jason Gunthorpe <jgg@nvidia.com> M: Jason Gunthorpe <jgg@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
W: https://github.com/linux-rdma/rdma-core W: https://github.com/linux-rdma/rdma-core
...@@ -14657,7 +14657,6 @@ F: drivers/rtc/rtc-optee.c ...@@ -14657,7 +14657,6 @@ F: drivers/rtc/rtc-optee.c
OPA-VNIC DRIVER OPA-VNIC DRIVER
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
F: drivers/infiniband/ulp/opa_vnic F: drivers/infiniband/ulp/opa_vnic
...@@ -16099,7 +16098,6 @@ F: include/uapi/linux/qemu_fw_cfg.h ...@@ -16099,7 +16098,6 @@ F: include/uapi/linux/qemu_fw_cfg.h
QIB DRIVER QIB DRIVER
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
F: drivers/infiniband/hw/qib/ F: drivers/infiniband/hw/qib/
...@@ -16617,7 +16615,6 @@ F: drivers/net/ethernet/rdc/r6040.c ...@@ -16617,7 +16615,6 @@ F: drivers/net/ethernet/rdc/r6040.c
RDMAVT - RDMA verbs software RDMAVT - RDMA verbs software
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
F: drivers/infiniband/sw/rdmavt F: drivers/infiniband/sw/rdmavt
......
...@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work) ...@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
switch (cm_id_priv->id.state) { switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT: case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT: case IB_CM_DREQ_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->msg); ib_cancel_mad(cm_id_priv->msg);
break; break;
case IB_CM_ESTABLISHED: case IB_CM_ESTABLISHED:
...@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work) ...@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->msg); ib_cancel_mad(cm_id_priv->msg);
break; break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT: case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_DREQ_COUNTER]); [CM_DREQ_COUNTER]);
......
...@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) ...@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
unsigned long flags; unsigned long flags;
struct list_head del_list; struct list_head del_list;
/* Prevent freeing of mm until we are completely finished. */
mmgrab(handler->mn.mm);
/* Unregister first so we don't get any more notifications. */ /* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mn.mm); mmu_notifier_unregister(&handler->mn, handler->mn.mm);
...@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) ...@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
do_remove(handler, &del_list); do_remove(handler, &del_list);
/* Now the mm may be freed. */
mmdrop(handler->mn.mm);
kfree(handler); kfree(handler);
} }
......
...@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
if (ent->disabled) if (ent->disabled)
goto out; goto out;
if (need_delay) if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
goto out;
}
remove_cache_mr_locked(ent); remove_cache_mr_locked(ent);
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
} }
...@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
struct mlx5_cache_ent *ent = mr->cache_ent; struct mlx5_cache_ent *ent = mr->cache_ent;
WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head); list_add_tail(&mr->list, &ent->head);
ent->available_mrs++; ent->available_mrs++;
......
...@@ -3190,7 +3190,11 @@ void rvt_ruc_loopback(struct rvt_qp *sqp) ...@@ -3190,7 +3190,11 @@ void rvt_ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
rvt_send_complete(sqp, wqe, send_status); rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) { if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); int lastwqe;
spin_lock(&sqp->r_lock);
lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
spin_unlock(&sqp->r_lock);
sqp->s_flags &= ~RVT_S_BUSY; sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags); spin_unlock_irqrestore(&sqp->s_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment