Commit b44e47eb authored by Shiraz, Saleem's avatar Shiraz, Saleem Committed by Jason Gunthorpe

RDMA/cxgb3: Use for_each_sg_dma_page iterator on umem SGL

Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.
Signed-off-by: default avatarShiraz, Saleem <shiraz.saleem@intel.com>
Acked-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 48b586ac
...@@ -516,14 +516,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -516,14 +516,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, i;
int i, k, entry;
int err = 0; int err = 0;
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
struct iwch_mr *mhp; struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp; struct iwch_reg_user_mr_resp uresp;
struct scatterlist *sg; struct sg_dma_page_iter sg_iter;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd); php = to_iwch_pd(pd);
...@@ -541,7 +540,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -541,7 +540,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err); return ERR_PTR(err);
} }
shift = mhp->umem->page_shift; shift = PAGE_SHIFT;
n = mhp->umem->nmap; n = mhp->umem->nmap;
...@@ -557,11 +556,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -557,11 +556,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
len = sg_dma_len(sg) >> shift; pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
(k << shift));
if (i == PAGE_SIZE / sizeof *pages) { if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
if (err) if (err)
...@@ -570,7 +566,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -570,7 +566,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = 0; i = 0;
} }
} }
}
if (i) if (i)
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment