Commit 8d249af3 authored by Shiraz, Saleem's avatar Shiraz, Saleem Committed by Jason Gunthorpe

RDMA/mthca: Use for_each_sg_dma_page iterator on umem SGL

Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.
Signed-off-by: default avatarShiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 161ebe24
...@@ -897,12 +897,11 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -897,12 +897,11 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
struct mthca_dev *dev = to_mdev(pd->device); struct mthca_dev *dev = to_mdev(pd->device);
struct scatterlist *sg; struct sg_dma_page_iter sg_iter;
struct mthca_mr *mr; struct mthca_mr *mr;
struct mthca_reg_mr ucmd; struct mthca_reg_mr ucmd;
u64 *pages; u64 *pages;
int shift, n, len; int n, i;
int i, k, entry;
int err = 0; int err = 0;
int write_mtt_size; int write_mtt_size;
...@@ -929,7 +928,6 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -929,7 +928,6 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err; goto err;
} }
shift = mr->umem->page_shift;
n = mr->umem->nmap; n = mr->umem->nmap;
mr->mtt = mthca_alloc_mtt(dev, n); mr->mtt = mthca_alloc_mtt(dev, n);
...@@ -948,21 +946,19 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -948,21 +946,19 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
len = sg_dma_len(sg) >> shift; pages[i++] = sg_page_iter_dma_address(&sg_iter);
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) + (k << shift); /*
/* * Be friendly to write_mtt and pass it chunks
* Be friendly to write_mtt and pass it chunks * of appropriate size.
* of appropriate size. */
*/ if (i == write_mtt_size) {
if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err)
if (err) goto mtt_done;
goto mtt_done; n += i;
n += i; i = 0;
i = 0;
}
} }
} }
...@@ -973,7 +969,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -973,7 +969,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err) if (err)
goto err_mtt; goto err_mtt;
err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length, err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
convert_access(acc), mr); convert_access(acc), mr);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment