Commit c9f18927 authored by Hyunchul Lee's avatar Hyunchul Lee Committed by Steve French

ksmbd: smbd: create MR pool

Create a memory region pool because rdma_rw_ctx_init()
uses memory registration if memory registration yields
better performance than using multiple SGE entries.
Acked-by: default avatarNamjae Jeon <linkinjeon@kernel.org>
Signed-off-by: default avatarHyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 41dbda16
...@@ -428,6 +428,7 @@ static void free_transport(struct smb_direct_transport *t) ...@@ -428,6 +428,7 @@ static void free_transport(struct smb_direct_transport *t)
if (t->qp) { if (t->qp) {
ib_drain_qp(t->qp); ib_drain_qp(t->qp);
ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
ib_destroy_qp(t->qp); ib_destroy_qp(t->qp);
} }
...@@ -1708,7 +1709,9 @@ static int smb_direct_init_params(struct smb_direct_transport *t, ...@@ -1708,7 +1709,9 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES; cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
cap->max_inline_data = 0; cap->max_inline_data = 0;
cap->max_rdma_ctxs = 0; cap->max_rdma_ctxs =
rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
smb_direct_max_outstanding_rw_ops;
return 0; return 0;
} }
...@@ -1790,6 +1793,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t, ...@@ -1790,6 +1793,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
{ {
int ret; int ret;
struct ib_qp_init_attr qp_attr; struct ib_qp_init_attr qp_attr;
int pages_per_rw;
t->pd = ib_alloc_pd(t->cm_id->device, 0); t->pd = ib_alloc_pd(t->cm_id->device, 0);
if (IS_ERR(t->pd)) { if (IS_ERR(t->pd)) {
...@@ -1837,6 +1841,23 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t, ...@@ -1837,6 +1841,23 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
t->qp = t->cm_id->qp; t->qp = t->cm_id->qp;
t->cm_id->event_handler = smb_direct_cm_handler; t->cm_id->event_handler = smb_direct_cm_handler;
pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
int pages_per_mr, mr_count;
pages_per_mr = min_t(int, pages_per_rw,
t->cm_id->device->attrs.max_fast_reg_page_list_len);
mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
atomic_read(&t->rw_avail_ops);
ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
if (ret) {
pr_err("failed to init mr pool count %d pages %d\n",
mr_count, pages_per_mr);
goto err;
}
}
return 0; return 0;
err: err:
if (t->qp) { if (t->qp) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment