Commit b18c7da6 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Fix memory leak in error path code

In rxe_mr_init_user() at the third error the driver fails to free the
memory at mr->map. This patch adds code to do that.  This error only
occurs if page_address() fails to return a non zero address which should
never happen for 64 bit architectures.

Fixes: 8700e3e7 ("Soft RoCE driver")
Link: https://lore.kernel.org/r/20210705164153.17652-1-rpearsonhpe@gmail.com
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Reviewed-by: default avatarZhu Yanjun <zyjzyj2000@gmail.com>
Reviewed-by: default avatarHåkon Bugge <haakon.bugge@oracle.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent c9538831
...@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf; int num_buf;
void *vaddr; void *vaddr;
int err; int err;
int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access); umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n", pr_warn("%s: Unable to pin memory region err = %d\n",
(int)PTR_ERR(umem)); __func__, (int)PTR_ERR(umem));
err = PTR_ERR(umem); err = PTR_ERR(umem);
goto err1; goto err_out;
} }
mr->umem = umem; mr->umem = umem;
...@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf); err = rxe_mr_alloc(mr, num_buf);
if (err) { if (err) {
pr_warn("err %d from rxe_mr_alloc\n", err); pr_warn("%s: Unable to allocate memory for map\n",
ib_umem_release(umem); __func__);
goto err1; goto err_release_umem;
} }
mr->page_shift = PAGE_SHIFT; mr->page_shift = PAGE_SHIFT;
...@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter)); vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) { if (!vaddr) {
pr_warn("null vaddr\n"); pr_warn("%s: Unable to get virtual address\n",
ib_umem_release(umem); __func__);
err = -ENOMEM; err = -ENOMEM;
goto err1; goto err_cleanup_map;
} }
buf->addr = (uintptr_t)vaddr; buf->addr = (uintptr_t)vaddr;
...@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0; return 0;
err1: err_cleanup_map:
for (i = 0; i < mr->num_map; i++)
kfree(mr->map[i]);
kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment