Commit 018cf599 authored by Devesh Sharma's avatar Devesh Sharma Committed by Doug Ledford

RDMA/bnxt_re: Fix RQE posting logic

This patch adds code to ring RQ Doorbell aggressively
so that the adapter can DMA RQ buffers sooner, instead
of DMA all WQEs in the post_recv WR list together at the
end of the post_recv verb.
Also use spinlock to serialize RQ posting
Signed-off-by: default avatarKalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 3fb755b3
...@@ -58,6 +58,8 @@ ...@@ -58,6 +58,8 @@
#define BNXT_RE_UD_QP_HW_STALL 0x400000 #define BNXT_RE_UD_QP_HW_STALL 0x400000
#define BNXT_RE_RQ_WQE_THRESHOLD 32
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;
......
...@@ -1249,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, ...@@ -1249,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->ib_qp.qp_num = qp->qplib_qp.id; qp->ib_qp.qp_num = qp->qplib_qp.id;
spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
if (udata) { if (udata) {
struct bnxt_re_qp_resp resp; struct bnxt_re_qp_resp resp;
...@@ -2281,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, ...@@ -2281,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe; struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0; int rc = 0, payload_sz = 0;
unsigned long flags;
u32 count = 0;
spin_lock_irqsave(&qp->rq_lock, flags);
while (wr) { while (wr) {
/* House keeping */ /* House keeping */
memset(&wqe, 0, sizeof(wqe)); memset(&wqe, 0, sizeof(wqe));
...@@ -2310,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, ...@@ -2310,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
*bad_wr = wr; *bad_wr = wr;
break; break;
} }
/* Ring DB if the RQEs posted reaches a threshold value */
if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
bnxt_qplib_post_recv_db(&qp->qplib_qp);
count = 0;
}
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_recv_db(&qp->qplib_qp);
if (count)
bnxt_qplib_post_recv_db(&qp->qplib_qp);
spin_unlock_irqrestore(&qp->rq_lock, flags);
return rc; return rc;
} }
......
...@@ -74,6 +74,7 @@ struct bnxt_re_qp { ...@@ -74,6 +74,7 @@ struct bnxt_re_qp {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_qp ib_qp; struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */ spinlock_t sq_lock; /* protect sq */
spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp; struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem; struct ib_umem *sumem;
struct ib_umem *rumem; struct ib_umem *rumem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment