Commit 48b586ac authored by Shiraz, Saleem's avatar Shiraz, Saleem Committed by Jason Gunthorpe

RDMA/cxgb4: Use for_each_sg_dma_page iterator on umem SGL

Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.
Signed-off-by: default avatarShiraz, Saleem <shiraz.saleem@intel.com>
Acked-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 3856ec55
...@@ -502,10 +502,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -502,10 +502,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, i;
int i, k, entry;
int err = -ENOMEM; int err = -ENOMEM;
struct scatterlist *sg; struct sg_dma_page_iter sg_iter;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
...@@ -541,7 +540,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -541,7 +540,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(mhp->umem)) if (IS_ERR(mhp->umem))
goto err_free_skb; goto err_free_skb;
shift = mhp->umem->page_shift; shift = PAGE_SHIFT;
n = mhp->umem->nmap; n = mhp->umem->nmap;
err = alloc_pbl(mhp, n); err = alloc_pbl(mhp, n);
...@@ -556,21 +555,16 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -556,21 +555,16 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
len = sg_dma_len(sg) >> shift; pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
for (k = 0; k < len; ++k) { if (i == PAGE_SIZE / sizeof(*pages)) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) + err = write_pbl(&mhp->rhp->rdev, pages,
(k << shift)); mhp->attr.pbl_addr + (n << 3), i,
if (i == PAGE_SIZE / sizeof *pages) { mhp->wr_waitp);
err = write_pbl(&mhp->rhp->rdev, if (err)
pages, goto pbl_done;
mhp->attr.pbl_addr + (n << 3), i, n += i;
mhp->wr_waitp); i = 0;
if (err)
goto pbl_done;
n += i;
i = 0;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment