Commit 203b70fd authored by Zhengchao Shao's avatar Zhengchao Shao Committed by Leon Romanovsky

RDMA/hns: Fix return value in hns_roce_map_mr_sg

As described in the ib_map_mr_sg function comment, it returns the number
of sg elements that were mapped to the memory region. However,
hns_roce_map_mr_sg returns the number of pages required for mapping the
DMA area. Fix it.

Fixes: 9b2cf76c ("RDMA/hns: Optimize PBL buffer allocation process")
Signed-off-by: default avatarZhengchao Shao <shaozhengchao@huawei.com>
Link: https://lore.kernel.org/r/20240411033851.2884771-1-shaozhengchao@huawei.comReviewed-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 8859f009
...@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ...@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_mtr *mtr = &mr->pbl_mtr; struct hns_roce_mtr *mtr = &mr->pbl_mtr;
int ret = 0; int ret, sg_num = 0;
mr->npages = 0; mr->npages = 0;
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
sizeof(dma_addr_t), GFP_KERNEL); sizeof(dma_addr_t), GFP_KERNEL);
if (!mr->page_list) if (!mr->page_list)
return ret; return sg_num;
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
if (ret < 1) { if (sg_num < 1) {
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
goto err_page_list; goto err_page_list;
} }
...@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ...@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
ret = 0; sg_num = 0;
} else { } else {
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
ret = mr->npages;
} }
err_page_list: err_page_list:
kvfree(mr->page_list); kvfree(mr->page_list);
mr->page_list = NULL; mr->page_list = NULL;
return ret; return sg_num;
} }
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment