Commit b4bd701a authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Doug Ledford

RDMA/mlx5: Fix multiple NULL-ptr deref errors in rereg_mr flow

Failure in rereg MR releases UMEM but leaves the MR to be destroyed
by the user. As a result the following scenario may happen:
"create MR -> rereg MR with failure -> call to rereg MR again" and
hit "NULL-ptr deref or user memory access" errors.

Ensure that rereg MR is only performed on a non-dead MR.

Cc: syzkaller <syzkaller@googlegroups.com>
Cc: <stable@vger.kernel.org> # 4.5
Fixes: 395a8e4c ("IB/mlx5: Refactoring register MR code")
Reported-by: default avatarNoa Osherovich <noaos@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent dc5640f2
...@@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, ...@@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int *order) int *order)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *u;
int err; int err;
*umem = ib_umem_get(pd->uobject->context, start, length, *umem = NULL;
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem); u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
err = PTR_ERR_OR_ZERO(u);
if (err) { if (err) {
*umem = NULL; mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err; return err;
} }
mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order); page_shift, ncont, order);
if (!*npages) { if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n"); mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(*umem); ib_umem_release(u);
return -EINVAL; return -EINVAL;
} }
*umem = u;
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift); *npages, *ncont, *order, *page_shift);
...@@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
int access_flags = flags & IB_MR_REREG_ACCESS ? int access_flags = flags & IB_MR_REREG_ACCESS ?
new_access_flags : new_access_flags :
mr->access_flags; mr->access_flags;
u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
int page_shift = 0; int page_shift = 0;
int upd_flags = 0; int upd_flags = 0;
int npages = 0; int npages = 0;
int ncont = 0; int ncont = 0;
int order = 0; int order = 0;
u64 addr, len;
int err; int err;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
...@@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
if (!mr->umem)
return -EINVAL;
if (flags & IB_MR_REREG_TRANS) {
addr = virt_addr;
len = length;
} else {
addr = mr->umem->address;
len = mr->umem->length;
}
if (flags != IB_MR_REREG_PD) { if (flags != IB_MR_REREG_PD) {
/* /*
* Replace umem. This needs to be done whether or not UMR is * Replace umem. This needs to be done whether or not UMR is
...@@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/ */
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment