Commit 2778b72b authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Replace pr_xxx by rxe_dbg_xxx in rxe_mr.c

Replace calls to pr_xxx() in rxe_mr.c by rxe_dbg_mr().

Link: https://lore.kernel.org/r/20221103171013.20659-5-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 52920f53
...@@ -38,8 +38,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) ...@@ -38,8 +38,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
return 0; return 0;
default: default:
pr_warn("%s: mr type (%d) not supported\n", rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
__func__, mr->ibmr.type);
return -EFAULT; return -EFAULT;
} }
} }
...@@ -125,8 +124,8 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, ...@@ -125,8 +124,8 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
umem = ib_umem_get(&rxe->ib_dev, start, length, access); umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("%s: Unable to pin memory region err = %d\n", rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
__func__, (int)PTR_ERR(umem)); (int)PTR_ERR(umem));
err = PTR_ERR(umem); err = PTR_ERR(umem);
goto err_out; goto err_out;
} }
...@@ -137,8 +136,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, ...@@ -137,8 +136,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf); err = rxe_mr_alloc(mr, num_buf);
if (err) { if (err) {
pr_warn("%s: Unable to allocate memory for map\n", rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
__func__);
goto err_release_umem; goto err_release_umem;
} }
...@@ -159,8 +157,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, ...@@ -159,8 +157,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter)); vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) { if (!vaddr) {
pr_warn("%s: Unable to get virtual address\n", rxe_dbg_mr(mr, "Unable to get virtual address\n");
__func__);
err = -ENOMEM; err = -ENOMEM;
goto err_cleanup_map; goto err_cleanup_map;
} }
...@@ -255,7 +252,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) ...@@ -255,7 +252,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
void *addr; void *addr;
if (mr->state != RXE_MR_STATE_VALID) { if (mr->state != RXE_MR_STATE_VALID) {
pr_warn("mr not in valid state\n"); rxe_dbg_mr(mr, "Not in valid state\n");
addr = NULL; addr = NULL;
goto out; goto out;
} }
...@@ -266,7 +263,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) ...@@ -266,7 +263,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
} }
if (mr_check_range(mr, iova, length)) { if (mr_check_range(mr, iova, length)) {
pr_warn("range violation\n"); rxe_dbg_mr(mr, "Range violation\n");
addr = NULL; addr = NULL;
goto out; goto out;
} }
...@@ -274,7 +271,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) ...@@ -274,7 +271,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset); lookup_iova(mr, iova, &m, &n, &offset);
if (offset + length > mr->map[m]->buf[n].size) { if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n"); rxe_dbg_mr(mr, "Crosses page boundary\n");
addr = NULL; addr = NULL;
goto out; goto out;
} }
...@@ -527,27 +524,26 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) ...@@ -527,27 +524,26 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
if (!mr) { if (!mr) {
pr_err("%s: No MR for key %#x\n", __func__, key); rxe_dbg_mr(mr, "No MR for key %#x\n", key);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) { if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n", rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
__func__, key, (mr->rkey ? mr->rkey : mr->lkey)); key, (mr->rkey ? mr->rkey : mr->lkey));
ret = -EINVAL; ret = -EINVAL;
goto err_drop_ref; goto err_drop_ref;
} }
if (atomic_read(&mr->num_mw) > 0) { if (atomic_read(&mr->num_mw) > 0) {
pr_warn("%s: Attempt to invalidate an MR while bound to MWs\n", rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
__func__);
ret = -EINVAL; ret = -EINVAL;
goto err_drop_ref; goto err_drop_ref;
} }
if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) { if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
pr_warn("%s: mr type (%d) is wrong\n", __func__, mr->ibmr.type); rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
ret = -EINVAL; ret = -EINVAL;
goto err_drop_ref; goto err_drop_ref;
} }
...@@ -576,22 +572,20 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -576,22 +572,20 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
/* user can only register MR in free state */ /* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) { if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
pr_warn("%s: mr->lkey = 0x%x not free\n", rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
__func__, mr->lkey);
return -EINVAL; return -EINVAL;
} }
/* user can only register mr with qp in same protection domain */ /* user can only register mr with qp in same protection domain */
if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) { if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
pr_warn("%s: qp->pd and mr->pd don't match\n", rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
__func__);
return -EINVAL; return -EINVAL;
} }
/* user is only allowed to change key portion of l/rkey */ /* user is only allowed to change key portion of l/rkey */
if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) { if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n", rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
__func__, key, mr->lkey); key, mr->lkey);
return -EINVAL; return -EINVAL;
} }
......
...@@ -875,6 +875,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) ...@@ -875,6 +875,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
rxe_mr_init_dma(access, mr); rxe_mr_init_dma(access, mr);
rxe_finalize(mr); rxe_finalize(mr);
...@@ -899,6 +900,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, ...@@ -899,6 +900,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
err = rxe_mr_init_user(rxe, start, length, iova, access, mr); err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err) if (err)
...@@ -930,6 +932,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, ...@@ -930,6 +932,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
err = rxe_mr_init_fast(max_num_sg, mr); err = rxe_mr_init_fast(max_num_sg, mr);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment