Commit aa116b81 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Order num_pending_prefetch properly with synchronize_srcu

During destroy setting live = 0 and then synchronize_srcu() prevents
num_pending_prefetch from incrementing, and also, ensures that all work
holding that count is queued on the WQ. Testing before causes races of the
form:

    CPU0                                         CPU1
  dereg_mr()
                                          mlx5_ib_advise_mr_prefetch()
            				   srcu_read_lock()
                                            num_pending_prefetch_inc()
					      if (!live)
   live = 0
   atomic_read() == 0
     // skip flush_workqueue()
                                              atomic_inc()
 					      queue_work();
            				   srcu_read_unlock()
   WARN_ON(atomic_read())  // Fails

Swap the order so that the synchronize_srcu() prevents this.

Fixes: a6bc3875 ("IB/mlx5: Protect against prefetch of invalid MR")
Link: https://lore.kernel.org/r/20191001153821.23621-5-jgg@ziepe.caReviewed-by: default avatarArtemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 9dc775e7
...@@ -1609,13 +1609,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1609,13 +1609,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
*/ */
mr->live = 0; mr->live = 0;
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* dequeue pending prefetch requests for the mr */ /* dequeue pending prefetch requests for the mr */
if (atomic_read(&mr->num_pending_prefetch)) if (atomic_read(&mr->num_pending_prefetch))
flush_workqueue(system_unbound_wq); flush_workqueue(system_unbound_wq);
WARN_ON(atomic_read(&mr->num_pending_prefetch)); WARN_ON(atomic_read(&mr->num_pending_prefetch));
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */ /* Destroy all page mappings */
if (!umem_odp->is_implicit_odp) if (!umem_odp->is_implicit_odp)
mlx5_ib_invalidate_range(umem_odp, mlx5_ib_invalidate_range(umem_odp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment