Commit ddec8ed2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Last pull for 5.16, the reversion has been known for a while now but
  didn't get a proper fix in time. Looks like we will have several
  info-leak bugs to take care of going foward.

   - Revert the patch fixing the DM related crash causing a widespread
     regression for kernel ULPs. A proper fix just didn't appear this
     cycle due to the holidays

   - Missing NULL check on alloc in uverbs

   - Double free in rxe error paths

   - Fix a new kernel-infoleak report when forming ah_attr's without
     GRH's in ucma"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/core: Don't infoleak GRH fields
  RDMA/uverbs: Check for null return of kmalloc_array
  Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"
  RDMA/rxe: Prevent double freeing rxe_map_set()
parents b2b436ec b35a0f4d
...@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, ...@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr *src = ah_attr;
struct rdma_ah_attr conv_ah; struct rdma_ah_attr conv_ah;
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); memset(&dst->grh, 0, sizeof(dst->grh));
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
(rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
......
...@@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi) ...@@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
uapi->num_write_ex = max_write_ex + 1; uapi->num_write_ex = max_write_ex + 1;
data = kmalloc_array(uapi->num_write + uapi->num_write_ex, data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
sizeof(*uapi->write_methods), GFP_KERNEL); sizeof(*uapi->write_methods), GFP_KERNEL);
if (!data)
return -ENOMEM;
for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
data[i] = &uapi->notsupp_method; data[i] = &uapi->notsupp_method;
uapi->write_methods = data; uapi->write_methods = data;
......
...@@ -664,6 +664,7 @@ struct mlx5_ib_mr { ...@@ -664,6 +664,7 @@ struct mlx5_ib_mr {
/* User MR data */ /* User MR data */
struct mlx5_cache_ent *cache_ent; struct mlx5_cache_ent *cache_ent;
struct ib_umem *umem;
/* This is zero'd when the MR is allocated */ /* This is zero'd when the MR is allocated */
union { union {
...@@ -675,7 +676,7 @@ struct mlx5_ib_mr { ...@@ -675,7 +676,7 @@ struct mlx5_ib_mr {
struct list_head list; struct list_head list;
}; };
/* Used only by kernel MRs */ /* Used only by kernel MRs (umem == NULL) */
struct { struct {
void *descs; void *descs;
void *descs_alloc; void *descs_alloc;
...@@ -696,9 +697,8 @@ struct mlx5_ib_mr { ...@@ -696,9 +697,8 @@ struct mlx5_ib_mr {
int data_length; int data_length;
}; };
/* Used only by User MRs */ /* Used only by User MRs (umem != NULL) */
struct { struct {
struct ib_umem *umem;
unsigned int page_shift; unsigned int page_shift;
/* Current access_flags */ /* Current access_flags */
int access_flags; int access_flags;
......
...@@ -1904,18 +1904,19 @@ mlx5_alloc_priv_descs(struct ib_device *device, ...@@ -1904,18 +1904,19 @@ mlx5_alloc_priv_descs(struct ib_device *device,
return ret; return ret;
} }
static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{ {
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); if (!mr->umem && mr->descs) {
int size = mr->max_descs * mr->desc_size; struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
if (!mr->descs) struct mlx5_ib_dev *dev = to_mdev(device);
return;
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree(mr->descs_alloc); kfree(mr->descs_alloc);
mr->descs = NULL; mr->descs = NULL;
}
} }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
...@@ -1991,8 +1992,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) ...@@ -1991,8 +1992,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) { if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr); mlx5_mr_cache_free(dev, mr);
} else { } else {
if (!udata) mlx5_free_priv_descs(mr);
mlx5_free_priv_descs(mr);
kfree(mr); kfree(mr);
} }
return 0; return 0;
...@@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, ...@@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err) if (err)
goto err_free_in; goto err_free_in;
mr->umem = NULL;
kfree(in); kfree(in);
return mr; return mr;
...@@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, ...@@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
} }
mr->ibmr.device = pd->device; mr->ibmr.device = pd->device;
mr->umem = NULL;
switch (mr_type) { switch (mr_type) {
case IB_MR_TYPE_MEM_REG: case IB_MR_TYPE_MEM_REG:
......
...@@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both) ...@@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set); ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
if (ret) if (ret)
goto err_out; return -ENOMEM;
if (both) { if (both) {
ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set); ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
if (ret) { if (ret)
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); goto err_free;
goto err_out;
}
} }
return 0; return 0;
err_out: err_free:
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
mr->cur_map_set = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
pr_warn("%s: Unable to get virtual address\n", pr_warn("%s: Unable to get virtual address\n",
__func__); __func__);
err = -ENOMEM; err = -ENOMEM;
goto err_cleanup_map; goto err_release_umem;
} }
buf->addr = (uintptr_t)vaddr; buf->addr = (uintptr_t)vaddr;
...@@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0; return 0;
err_cleanup_map:
rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
err_release_umem: err_release_umem:
ib_umem_release(umem); ib_umem_release(umem);
err_out: err_out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment