Commit 6ef999f5 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()

This driver is taking the SGL out of the umem and passing it through a
struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
then use rdma_umem_for_each_dma_block() directly.

Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
use, npages is only set for non-umem pbl flows.

Link: https://lore.kernel.org/r/0-v1-b37437a73f35+49c-bnxt_re_dma_block_jgg@nvidia.comAcked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Tested-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 5ce2dced
...@@ -940,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -940,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem); return PTR_ERR(umem);
qp->sumem = umem; qp->sumem = umem;
qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl; qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
qplib_qp->sq.sg_info.nmap = umem->nmap;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
qplib_qp->qp_handle = ureq.qp_handle; qplib_qp->qp_handle = ureq.qp_handle;
...@@ -955,10 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -955,10 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem)) if (IS_ERR(umem))
goto rqfail; goto rqfail;
qp->rumem = umem; qp->rumem = umem;
qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl; qplib_qp->rq.sg_info.umem = umem;
qplib_qp->rq.sg_info.npages =
ib_umem_num_dma_blocks(umem, PAGE_SIZE);
qplib_qp->rq.sg_info.nmap = umem->nmap;
qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
} }
...@@ -1612,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, ...@@ -1612,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem); return PTR_ERR(umem);
srq->umem = umem; srq->umem = umem;
qplib_srq->sg_info.sghead = umem->sg_head.sgl; qplib_srq->sg_info.umem = umem;
qplib_srq->sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
qplib_srq->sg_info.nmap = umem->nmap;
qplib_srq->sg_info.pgsize = PAGE_SIZE; qplib_srq->sg_info.pgsize = PAGE_SIZE;
qplib_srq->sg_info.pgshft = PAGE_SHIFT; qplib_srq->sg_info.pgshft = PAGE_SHIFT;
qplib_srq->srq_handle = ureq.srq_handle; qplib_srq->srq_handle = ureq.srq_handle;
...@@ -2865,10 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -2865,10 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = PTR_ERR(cq->umem); rc = PTR_ERR(cq->umem);
goto fail; goto fail;
} }
cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.sg_info.umem = cq->umem;
cq->qplib_cq.sg_info.npages =
ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi; cq->qplib_cq.dpi = &uctx->dpi;
} else { } else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
......
...@@ -45,6 +45,9 @@ ...@@ -45,6 +45,9 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include "roce_hsi.h" #include "roce_hsi.h"
#include "qplib_res.h" #include "qplib_res.h"
#include "qplib_sp.h" #include "qplib_sp.h"
...@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, ...@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
struct bnxt_qplib_sg_info *sginfo) struct bnxt_qplib_sg_info *sginfo)
{ {
struct scatterlist *sghead = sginfo->sghead; struct ib_block_iter biter;
struct sg_dma_page_iter sg_iter;
int i = 0; int i = 0;
for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) { rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
pbl->pg_arr[i] = NULL; pbl->pg_arr[i] = NULL;
pbl->pg_count++; pbl->pg_count++;
i++; i++;
...@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, ...@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info *sginfo) struct bnxt_qplib_sg_info *sginfo)
{ {
struct pci_dev *pdev = res->pdev; struct pci_dev *pdev = res->pdev;
struct scatterlist *sghead;
bool is_umem = false; bool is_umem = false;
u32 pages; u32 pages;
int i; int i;
if (sginfo->nopte) if (sginfo->nopte)
return 0; return 0;
pages = sginfo->npages; if (sginfo->umem)
sghead = sginfo->sghead; pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
else
pages = sginfo->npages;
/* page ptr arrays */ /* page ptr arrays */
pbl->pg_arr = vmalloc(pages * sizeof(void *)); pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr) if (!pbl->pg_arr)
...@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, ...@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_count = 0; pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize; pbl->pg_size = sginfo->pgsize;
if (!sghead) { if (!sginfo->umem) {
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size, pbl->pg_size,
...@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, ...@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_sg_info sginfo = {}; struct bnxt_qplib_sg_info sginfo = {};
u32 depth, stride, npbl, npde; u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr; dma_addr_t *src_phys_ptr, **dst_virt_ptr;
struct scatterlist *sghead = NULL;
struct bnxt_qplib_res *res; struct bnxt_qplib_res *res;
struct pci_dev *pdev; struct pci_dev *pdev;
int i, rc, lvl; int i, rc, lvl;
res = hwq_attr->res; res = hwq_attr->res;
pdev = res->pdev; pdev = res->pdev;
sghead = hwq_attr->sginfo->sghead;
pg_size = hwq_attr->sginfo->pgsize; pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX; hwq->level = PBL_LVL_MAX;
...@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, ...@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
aux_pages++; aux_pages++;
} }
if (!sghead) { if (!hwq_attr->sginfo->umem) {
hwq->is_user = false; hwq->is_user = false;
npages = (depth * stride) / pg_size + aux_pages; npages = (depth * stride) / pg_size + aux_pages;
if ((depth * stride) % pg_size) if ((depth * stride) % pg_size)
...@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, ...@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL; return -EINVAL;
hwq_attr->sginfo->npages = npages; hwq_attr->sginfo->npages = npages;
} else { } else {
unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
hwq->is_user = true; hwq->is_user = true;
npages = hwq_attr->sginfo->npages; npages = sginfo_num_pages;
npages = (npages * PAGE_SIZE) / npages = (npages * PAGE_SIZE) /
BIT_ULL(hwq_attr->sginfo->pgshft); BIT_ULL(hwq_attr->sginfo->pgshft);
if ((hwq_attr->sginfo->npages * PAGE_SIZE) % if ((sginfo_num_pages * PAGE_SIZE) %
BIT_ULL(hwq_attr->sginfo->pgshft)) BIT_ULL(hwq_attr->sginfo->pgshft))
if (!npages) if (!npages)
npages++; npages++;
......
...@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl { ...@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
}; };
struct bnxt_qplib_sg_info { struct bnxt_qplib_sg_info {
struct scatterlist *sghead; struct ib_umem *umem;
u32 nmap;
u32 npages; u32 npages;
u32 pgshft; u32 pgshft;
u32 pgsize; u32 pgsize;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment