Commit 5aa84840 authored by Selvin Xavier's avatar Selvin Xavier Committed by Jason Gunthorpe

RDMA/bnxt_re: Use correct sizing on buffers holding page DMA addresses

umem->nmap is used while allocating internal buffer for storing
page DMA addresses. This causes out of bounds array access while iterating
the umem DMA-mapped SGL with umem page combining as umem->nmap can be
less than number of system pages in umem.

Use ib_umem_num_pages() instead of umem->nmap to size the page array.
Add a new structure (bnxt_qplib_sg_info) to pass sglist, npages and nmap.
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 196b4ce5
...@@ -895,8 +895,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -895,8 +895,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem); return PTR_ERR(umem);
qp->sumem = umem; qp->sumem = umem;
qplib_qp->sq.sglist = umem->sg_head.sgl; qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
qplib_qp->sq.nmap = umem->nmap; qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->sq.sg_info.nmap = umem->nmap;
qplib_qp->qp_handle = ureq.qp_handle; qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) { if (!qp->qplib_qp.srq) {
...@@ -907,8 +908,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -907,8 +908,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem)) if (IS_ERR(umem))
goto rqfail; goto rqfail;
qp->rumem = umem; qp->rumem = umem;
qplib_qp->rq.sglist = umem->sg_head.sgl; qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
qplib_qp->rq.nmap = umem->nmap; qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->rq.sg_info.nmap = umem->nmap;
} }
qplib_qp->dpi = &cntx->dpi; qplib_qp->dpi = &cntx->dpi;
...@@ -916,8 +918,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -916,8 +918,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
rqfail: rqfail:
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
qp->sumem = NULL; qp->sumem = NULL;
qplib_qp->sq.sglist = NULL; memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
qplib_qp->sq.nmap = 0;
return PTR_ERR(umem); return PTR_ERR(umem);
} }
...@@ -1374,8 +1375,9 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, ...@@ -1374,8 +1375,9 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem); return PTR_ERR(umem);
srq->umem = umem; srq->umem = umem;
qplib_srq->nmap = umem->nmap; qplib_srq->sg_info.sglist = umem->sg_head.sgl;
qplib_srq->sglist = umem->sg_head.sgl; qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
qplib_srq->sg_info.nmap = umem->nmap;
qplib_srq->srq_handle = ureq.srq_handle; qplib_srq->srq_handle = ureq.srq_handle;
qplib_srq->dpi = &cntx->dpi; qplib_srq->dpi = &cntx->dpi;
...@@ -2632,8 +2634,9 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, ...@@ -2632,8 +2634,9 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
rc = PTR_ERR(cq->umem); rc = PTR_ERR(cq->umem);
goto fail; goto fail;
} }
cq->qplib_cq.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
cq->qplib_cq.nmap = cq->umem->nmap; cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi; cq->qplib_cq.dpi = &uctx->dpi;
} else { } else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
...@@ -2645,8 +2648,6 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, ...@@ -2645,8 +2648,6 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
} }
cq->qplib_cq.dpi = &rdev->dpi_privileged; cq->qplib_cq.dpi = &rdev->dpi_privileged;
cq->qplib_cq.sghead = NULL;
cq->qplib_cq.nmap = 0;
} }
/* /*
* Allocating the NQ in a round robin fashion. nq_alloc_cnt is a * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
......
...@@ -478,7 +478,7 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq) ...@@ -478,7 +478,7 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
hwq_type = bnxt_qplib_get_hwq_type(nq->res); hwq_type = bnxt_qplib_get_hwq_type(nq->res);
if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0, if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
&nq->hwq.max_elements, &nq->hwq.max_elements,
BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
PAGE_SIZE, hwq_type)) PAGE_SIZE, hwq_type))
...@@ -542,8 +542,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, ...@@ -542,8 +542,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
int rc, idx; int rc, idx;
srq->hwq.max_elements = srq->max_wqe; srq->hwq.max_elements = srq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
srq->nmap, &srq->hwq.max_elements, &srq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc) if (rc)
...@@ -742,7 +742,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -742,7 +742,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* SQ */ /* SQ */
sq->hwq.max_elements = sq->max_wqe; sq->hwq.max_elements = sq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
&sq->hwq.max_elements, &sq->hwq.max_elements,
BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
...@@ -781,7 +781,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -781,7 +781,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */ /* RQ */
if (rq->max_wqe) { if (rq->max_wqe) {
rq->hwq.max_elements = qp->rq.max_wqe; rq->hwq.max_elements = qp->rq.max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
&rq->hwq.max_elements, &rq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
...@@ -890,8 +890,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -890,8 +890,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search); sizeof(struct sq_psn_search);
} }
sq->hwq.max_elements = sq->max_wqe; sq->hwq.max_elements = sq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
sq->nmap, &sq->hwq.max_elements, &sq->hwq.max_elements,
BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
psn_sz, psn_sz,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
...@@ -959,8 +959,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -959,8 +959,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */ /* RQ */
if (rq->max_wqe) { if (rq->max_wqe) {
rq->hwq.max_elements = rq->max_wqe; rq->hwq.max_elements = rq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
rq->nmap, &rq->hwq.max_elements, &rq->sg_info,
&rq->hwq.max_elements,
BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc) if (rc)
...@@ -1030,7 +1031,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -1030,7 +1031,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements * req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1); req_size &= ~(PAGE_SIZE - 1);
rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
&xrrq->max_elements, &xrrq->max_elements,
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE, BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
0, req_size, HWQ_TYPE_CTX); 0, req_size, HWQ_TYPE_CTX);
...@@ -1046,7 +1047,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -1046,7 +1047,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1); req_size &= ~(PAGE_SIZE - 1);
rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
&xrrq->max_elements, &xrrq->max_elements,
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE, BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
0, req_size, HWQ_TYPE_CTX); 0, req_size, HWQ_TYPE_CTX);
...@@ -1935,8 +1936,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) ...@@ -1935,8 +1936,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
int rc; int rc;
cq->hwq.max_elements = cq->max_wqe; cq->hwq.max_elements = cq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
cq->nmap, &cq->hwq.max_elements, &cq->hwq.max_elements,
BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_QUEUE); PAGE_SIZE, HWQ_TYPE_QUEUE);
if (rc) if (rc)
......
...@@ -52,10 +52,9 @@ struct bnxt_qplib_srq { ...@@ -52,10 +52,9 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_cq *cq; struct bnxt_qplib_cq *cq;
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq; struct bnxt_qplib_swq *swq;
struct scatterlist *sglist;
int start_idx; int start_idx;
int last_idx; int last_idx;
u32 nmap; struct bnxt_qplib_sg_info sg_info;
u16 eventq_hw_ring_id; u16 eventq_hw_ring_id;
spinlock_t lock; /* protect SRQE link list */ spinlock_t lock; /* protect SRQE link list */
}; };
...@@ -237,8 +236,7 @@ struct bnxt_qplib_swqe { ...@@ -237,8 +236,7 @@ struct bnxt_qplib_swqe {
struct bnxt_qplib_q { struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq; struct bnxt_qplib_swq *swq;
struct scatterlist *sglist; struct bnxt_qplib_sg_info sg_info;
u32 nmap;
u32 max_wqe; u32 max_wqe;
u16 q_full_delta; u16 q_full_delta;
u16 max_sge; u16 max_sge;
...@@ -381,8 +379,7 @@ struct bnxt_qplib_cq { ...@@ -381,8 +379,7 @@ struct bnxt_qplib_cq {
u32 cnq_hw_ring_id; u32 cnq_hw_ring_id;
struct bnxt_qplib_nq *nq; struct bnxt_qplib_nq *nq;
bool resize_in_progress; bool resize_in_progress;
struct scatterlist *sghead; struct bnxt_qplib_sg_info sg_info;
u32 nmap;
u64 cq_handle; u64 cq_handle;
#define CQ_RESIZE_WAIT_TIME_MS 500 #define CQ_RESIZE_WAIT_TIME_MS 500
......
...@@ -569,7 +569,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, ...@@ -569,7 +569,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
rcfw->pdev = pdev; rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
hwq_type = bnxt_qplib_get_hwq_type(rcfw->res); hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
&rcfw->creq.max_elements, &rcfw->creq.max_elements,
BNXT_QPLIB_CREQE_UNITS, BNXT_QPLIB_CREQE_UNITS,
0, PAGE_SIZE, hwq_type)) { 0, PAGE_SIZE, hwq_type)) {
...@@ -584,7 +584,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, ...@@ -584,7 +584,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
rcfw->cmdq.max_elements = rcfw->cmdq_depth; rcfw->cmdq.max_elements = rcfw->cmdq_depth;
if (bnxt_qplib_alloc_init_hwq if (bnxt_qplib_alloc_init_hwq
(rcfw->pdev, &rcfw->cmdq, NULL, 0, (rcfw->pdev, &rcfw->cmdq, NULL,
&rcfw->cmdq.max_elements, &rcfw->cmdq.max_elements,
BNXT_QPLIB_CMDQE_UNITS, 0, BNXT_QPLIB_CMDQE_UNITS, 0,
bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth), bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
......
...@@ -83,7 +83,8 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, ...@@ -83,7 +83,8 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
} }
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
struct scatterlist *sghead, u32 pages, u32 pg_size) struct scatterlist *sghead, u32 pages,
u32 nmaps, u32 pg_size)
{ {
struct sg_dma_page_iter sg_iter; struct sg_dma_page_iter sg_iter;
bool is_umem = false; bool is_umem = false;
...@@ -116,7 +117,7 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, ...@@ -116,7 +117,7 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
} else { } else {
i = 0; i = 0;
is_umem = true; is_umem = true;
for_each_sg_dma_page (sghead, &sg_iter, pages, 0) { for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
pbl->pg_arr[i] = NULL; pbl->pg_arr[i] = NULL;
pbl->pg_count++; pbl->pg_count++;
...@@ -158,12 +159,13 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq) ...@@ -158,12 +159,13 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
/* All HWQs are power of 2 in size */ /* All HWQs are power of 2 in size */
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
struct scatterlist *sghead, int nmap, struct bnxt_qplib_sg_info *sg_info,
u32 *elements, u32 element_size, u32 aux, u32 *elements, u32 element_size, u32 aux,
u32 pg_size, enum bnxt_qplib_hwq_type hwq_type) u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
{ {
u32 pages, slots, size, aux_pages = 0, aux_size = 0; u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
dma_addr_t *src_phys_ptr, **dst_virt_ptr; dma_addr_t *src_phys_ptr, **dst_virt_ptr;
struct scatterlist *sghead = NULL;
int i, rc; int i, rc;
hwq->level = PBL_LVL_MAX; hwq->level = PBL_LVL_MAX;
...@@ -177,6 +179,9 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, ...@@ -177,6 +179,9 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
} }
size = roundup_pow_of_two(element_size); size = roundup_pow_of_two(element_size);
if (sg_info)
sghead = sg_info->sglist;
if (!sghead) { if (!sghead) {
hwq->is_user = false; hwq->is_user = false;
pages = (slots * size) / pg_size + aux_pages; pages = (slots * size) / pg_size + aux_pages;
...@@ -184,17 +189,20 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, ...@@ -184,17 +189,20 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
pages++; pages++;
if (!pages) if (!pages)
return -EINVAL; return -EINVAL;
maps = 0;
} else { } else {
hwq->is_user = true; hwq->is_user = true;
pages = nmap; pages = sg_info->npages;
maps = sg_info->nmap;
} }
/* Alloc the 1st memory block; can be a PDL/PTL/PBL */ /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
if (sghead && (pages == MAX_PBL_LVL_0_PGS)) if (sghead && (pages == MAX_PBL_LVL_0_PGS))
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead, rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
pages, pg_size); pages, maps, pg_size);
else else
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size); rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
1, 0, pg_size);
if (rc) if (rc)
goto fail; goto fail;
...@@ -204,7 +212,8 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, ...@@ -204,7 +212,8 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
if (pages > MAX_PBL_LVL_1_PGS) { if (pages > MAX_PBL_LVL_1_PGS) {
/* 2 levels of indirection */ /* 2 levels of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL, rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size); MAX_PBL_LVL_1_PGS_FOR_LVL_2,
0, pg_size);
if (rc) if (rc)
goto fail; goto fail;
/* Fill in lvl0 PBL */ /* Fill in lvl0 PBL */
...@@ -217,7 +226,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, ...@@ -217,7 +226,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
hwq->level = PBL_LVL_1; hwq->level = PBL_LVL_1;
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead, rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
pages, pg_size); pages, maps, pg_size);
if (rc) if (rc)
goto fail; goto fail;
...@@ -246,7 +255,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, ...@@ -246,7 +255,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
/* 1 level of indirection */ /* 1 level of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead, rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
pages, pg_size); pages, maps, pg_size);
if (rc) if (rc)
goto fail; goto fail;
/* Fill in lvl0 PBL */ /* Fill in lvl0 PBL */
...@@ -339,7 +348,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -339,7 +348,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* QPC Tables */ /* QPC Tables */
ctx->qpc_tbl.max_elements = ctx->qpc_count; ctx->qpc_tbl.max_elements = ctx->qpc_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
&ctx->qpc_tbl.max_elements, &ctx->qpc_tbl.max_elements,
BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX); PAGE_SIZE, HWQ_TYPE_CTX);
...@@ -348,7 +357,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -348,7 +357,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* MRW Tables */ /* MRW Tables */
ctx->mrw_tbl.max_elements = ctx->mrw_count; ctx->mrw_tbl.max_elements = ctx->mrw_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
&ctx->mrw_tbl.max_elements, &ctx->mrw_tbl.max_elements,
BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX); PAGE_SIZE, HWQ_TYPE_CTX);
...@@ -357,7 +366,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -357,7 +366,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* SRQ Tables */ /* SRQ Tables */
ctx->srqc_tbl.max_elements = ctx->srqc_count; ctx->srqc_tbl.max_elements = ctx->srqc_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
&ctx->srqc_tbl.max_elements, &ctx->srqc_tbl.max_elements,
BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX); PAGE_SIZE, HWQ_TYPE_CTX);
...@@ -366,7 +375,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -366,7 +375,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* CQ Tables */ /* CQ Tables */
ctx->cq_tbl.max_elements = ctx->cq_count; ctx->cq_tbl.max_elements = ctx->cq_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
&ctx->cq_tbl.max_elements, &ctx->cq_tbl.max_elements,
BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0, BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX); PAGE_SIZE, HWQ_TYPE_CTX);
...@@ -375,7 +384,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -375,7 +384,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* TQM Buffer */ /* TQM Buffer */
ctx->tqm_pde.max_elements = 512; ctx->tqm_pde.max_elements = 512;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
&ctx->tqm_pde.max_elements, sizeof(u64), &ctx->tqm_pde.max_elements, sizeof(u64),
0, PAGE_SIZE, HWQ_TYPE_CTX); 0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc) if (rc)
...@@ -386,7 +395,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -386,7 +395,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
continue; continue;
ctx->tqm_tbl[i].max_elements = ctx->qpc_count * ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
ctx->tqm_count[i]; ctx->tqm_count[i];
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
&ctx->tqm_tbl[i].max_elements, 1, &ctx->tqm_tbl[i].max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX); 0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc) if (rc)
...@@ -424,7 +433,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, ...@@ -424,7 +433,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
/* TIM Buffer */ /* TIM Buffer */
ctx->tim_tbl.max_elements = ctx->qpc_count * 16; ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
&ctx->tim_tbl.max_elements, 1, &ctx->tim_tbl.max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX); 0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc) if (rc)
......
...@@ -219,6 +219,12 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx) ...@@ -219,6 +219,12 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
} }
struct bnxt_qplib_sg_info {
struct scatterlist *sglist;
u32 nmap;
u32 npages;
};
#define to_bnxt_qplib(ptr, type, member) \ #define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member) container_of(ptr, type, member)
...@@ -227,7 +233,7 @@ struct bnxt_qplib_dev_attr; ...@@ -227,7 +233,7 @@ struct bnxt_qplib_dev_attr;
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq); void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
struct scatterlist *sl, int nmap, u32 *elements, struct bnxt_qplib_sg_info *sg_info, u32 *elements,
u32 elements_per_page, u32 aux, u32 pg_size, u32 elements_per_page, u32 aux, u32 pg_size,
enum bnxt_qplib_hwq_type hwq_type); enum bnxt_qplib_hwq_type hwq_type);
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid); void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
......
...@@ -684,7 +684,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, ...@@ -684,7 +684,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
mr->hwq.max_elements = pages; mr->hwq.max_elements = pages;
/* Use system PAGE_SIZE */ /* Use system PAGE_SIZE */
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL,
&mr->hwq.max_elements, &mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE, PAGE_SIZE, 0, PAGE_SIZE,
HWQ_TYPE_CTX); HWQ_TYPE_CTX);
...@@ -754,7 +754,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, ...@@ -754,7 +754,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
return -ENOMEM; return -ENOMEM;
frpl->hwq.max_elements = pages; frpl->hwq.max_elements = pages;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0, rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL,
&frpl->hwq.max_elements, PAGE_SIZE, 0, &frpl->hwq.max_elements, PAGE_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX); PAGE_SIZE, HWQ_TYPE_CTX);
if (!rc) if (!rc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment