Commit 0dbb4108 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Unclutter struct rpcrdma_mr_seg

Clean ups:
 - make it obvious that the rl_mw field is a pointer -- allocated
   separately, not as part of struct rpcrdma_mr_seg
 - promote "struct {} frmr;" to a named type
 - promote the state enum to a named type
 - name the MW state field the same way other fields in
   rpcrdma_mw are named
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Tested-by: default avatarSteve Wise <swise@opengridcomputing.com>
Tested-by: default avatarShirley Ma <shirley.ma@oracle.com>
Tested-by: default avatarDevesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 539431a4
...@@ -156,9 +156,9 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc) ...@@ -156,9 +156,9 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc)
return; return;
if (wc->opcode == IB_WC_FAST_REG_MR) if (wc->opcode == IB_WC_FAST_REG_MR)
frmr->r.frmr.state = FRMR_IS_VALID; frmr->r.frmr.fr_state = FRMR_IS_VALID;
else if (wc->opcode == IB_WC_LOCAL_INV) else if (wc->opcode == IB_WC_LOCAL_INV)
frmr->r.frmr.state = FRMR_IS_INVALID; frmr->r.frmr.fr_state = FRMR_IS_INVALID;
} }
static int static int
...@@ -1496,6 +1496,9 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1496,6 +1496,9 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
struct rpcrdma_xprt *r_xprt) struct rpcrdma_xprt *r_xprt)
{ {
struct rpcrdma_mr_seg *seg1 = seg; struct rpcrdma_mr_seg *seg1 = seg;
struct rpcrdma_mw *mw = seg1->mr_chunk.rl_mw;
struct rpcrdma_frmr *frmr = &mw->r.frmr;
struct ib_mr *mr = frmr->fr_mr;
struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr; struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
u8 key; u8 key;
...@@ -1515,8 +1518,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1515,8 +1518,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
rpcrdma_map_one(ia, seg, writing); rpcrdma_map_one(ia, seg, writing);
pa = seg->mr_dma; pa = seg->mr_dma;
for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
seg1->mr_chunk.rl_mw->r.frmr.fr_pgl-> frmr->fr_pgl->page_list[page_no++] = pa;
page_list[page_no++] = pa;
pa += PAGE_SIZE; pa += PAGE_SIZE;
} }
len += seg->mr_len; len += seg->mr_len;
...@@ -1528,20 +1530,18 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1528,20 +1530,18 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
break; break;
} }
dprintk("RPC: %s: Using frmr %p to map %d segments\n", dprintk("RPC: %s: Using frmr %p to map %d segments\n",
__func__, seg1->mr_chunk.rl_mw, i); __func__, mw, i);
if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) { if (unlikely(frmr->fr_state == FRMR_IS_VALID)) {
dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n", dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n",
__func__, __func__, mr->rkey);
seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey);
/* Invalidate before using. */ /* Invalidate before using. */
memset(&invalidate_wr, 0, sizeof invalidate_wr); memset(&invalidate_wr, 0, sizeof invalidate_wr);
invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; invalidate_wr.wr_id = (unsigned long)(void *)mw;
invalidate_wr.next = &frmr_wr; invalidate_wr.next = &frmr_wr;
invalidate_wr.opcode = IB_WR_LOCAL_INV; invalidate_wr.opcode = IB_WR_LOCAL_INV;
invalidate_wr.send_flags = IB_SEND_SIGNALED; invalidate_wr.send_flags = IB_SEND_SIGNALED;
invalidate_wr.ex.invalidate_rkey = invalidate_wr.ex.invalidate_rkey = mr->rkey;
seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep); DECR_CQCOUNT(&r_xprt->rx_ep);
post_wr = &invalidate_wr; post_wr = &invalidate_wr;
} else } else
...@@ -1549,11 +1549,11 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1549,11 +1549,11 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
/* Prepare FRMR WR */ /* Prepare FRMR WR */
memset(&frmr_wr, 0, sizeof frmr_wr); memset(&frmr_wr, 0, sizeof frmr_wr);
frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; frmr_wr.wr_id = (unsigned long)(void *)mw;
frmr_wr.opcode = IB_WR_FAST_REG_MR; frmr_wr.opcode = IB_WR_FAST_REG_MR;
frmr_wr.send_flags = IB_SEND_SIGNALED; frmr_wr.send_flags = IB_SEND_SIGNALED;
frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma; frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; frmr_wr.wr.fast_reg.page_list = frmr->fr_pgl;
frmr_wr.wr.fast_reg.page_list_len = page_no; frmr_wr.wr.fast_reg.page_list_len = page_no;
frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
frmr_wr.wr.fast_reg.length = page_no << PAGE_SHIFT; frmr_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
...@@ -1563,13 +1563,13 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1563,13 +1563,13 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
} }
/* Bump the key */ /* Bump the key */
key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF); key = (u8)(mr->rkey & 0x000000FF);
ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key); ib_update_fast_reg_key(mr, ++key);
frmr_wr.wr.fast_reg.access_flags = (writing ? frmr_wr.wr.fast_reg.access_flags = (writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ); IB_ACCESS_REMOTE_READ);
frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; frmr_wr.wr.fast_reg.rkey = mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep); DECR_CQCOUNT(&r_xprt->rx_ep);
rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
...@@ -1579,7 +1579,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, ...@@ -1579,7 +1579,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
" status %i\n", __func__, rc); " status %i\n", __func__, rc);
goto out_err; goto out_err;
} else { } else {
seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; seg1->mr_rkey = mr->rkey;
seg1->mr_base = seg1->mr_dma + pageoff; seg1->mr_base = seg1->mr_dma + pageoff;
seg1->mr_nsegs = i; seg1->mr_nsegs = i;
seg1->mr_len = len; seg1->mr_len = len;
......
...@@ -145,6 +145,38 @@ struct rpcrdma_rep { ...@@ -145,6 +145,38 @@ struct rpcrdma_rep {
char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */
}; };
/*
* struct rpcrdma_mw - external memory region metadata
*
* An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered).
*
* Each rpcrdma_buffer has a list of these anchored in rb_mws. During
* call_allocate, rpcrdma_buffer_get() assigns one to each segment in
* an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
* track of registration metadata while each RPC is pending.
* rpcrdma_deregister_external() uses this metadata to unmap and
* release these resources when an RPC is complete.
*/
enum rpcrdma_frmr_state {
FRMR_IS_INVALID, /* ready to be used */
FRMR_IS_VALID, /* in use */
};
struct rpcrdma_frmr {
struct ib_fast_reg_page_list *fr_pgl;
struct ib_mr *fr_mr;
enum rpcrdma_frmr_state fr_state;
};
struct rpcrdma_mw {
union {
struct ib_fmr *fmr;
struct rpcrdma_frmr frmr;
} r;
struct list_head mw_list;
};
/* /*
* struct rpcrdma_req -- structure central to the request/reply sequence. * struct rpcrdma_req -- structure central to the request/reply sequence.
* *
...@@ -172,17 +204,7 @@ struct rpcrdma_rep { ...@@ -172,17 +204,7 @@ struct rpcrdma_rep {
struct rpcrdma_mr_seg { /* chunk descriptors */ struct rpcrdma_mr_seg { /* chunk descriptors */
union { /* chunk memory handles */ union { /* chunk memory handles */
struct ib_mr *rl_mr; /* if registered directly */ struct ib_mr *rl_mr; /* if registered directly */
struct rpcrdma_mw { /* if registered from region */ struct rpcrdma_mw *rl_mw; /* if registered from region */
union {
struct ib_fmr *fmr;
struct {
struct ib_fast_reg_page_list *fr_pgl;
struct ib_mr *fr_mr;
enum { FRMR_IS_INVALID, FRMR_IS_VALID } state;
} frmr;
} r;
struct list_head mw_list;
} *rl_mw;
} mr_chunk; } mr_chunk;
u64 mr_base; /* registration result */ u64 mr_base; /* registration result */
u32 mr_rkey; /* registration result */ u32 mr_rkey; /* registration result */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment