Commit d379eaa8 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Name MR trace events consistently

Clean up the names of trace events related to MRs so that it's
easy to enable these with a glob.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 61da886b
...@@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ...@@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
); );
#define DEFINE_MR_EVENT(name) \ #define DEFINE_MR_EVENT(name) \
DEFINE_EVENT(xprtrdma_mr, name, \ DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
TP_PROTO( \ TP_PROTO( \
const struct rpcrdma_mr *mr \ const struct rpcrdma_mr *mr \
), \ ), \
...@@ -651,11 +651,11 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); ...@@ -651,11 +651,11 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
DEFINE_MR_EVENT(xprtrdma_localinv); DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(xprtrdma_dma_map); DEFINE_MR_EVENT(map);
DEFINE_MR_EVENT(xprtrdma_dma_unmap); DEFINE_MR_EVENT(unmap);
DEFINE_MR_EVENT(xprtrdma_remoteinv); DEFINE_MR_EVENT(remoteinv);
DEFINE_MR_EVENT(xprtrdma_mr_recycle); DEFINE_MR_EVENT(recycle);
/** /**
** Reply events ** Reply events
......
...@@ -97,7 +97,7 @@ fmr_mr_recycle_worker(struct work_struct *work) ...@@ -97,7 +97,7 @@ fmr_mr_recycle_worker(struct work_struct *work)
trace_xprtrdma_mr_recycle(mr); trace_xprtrdma_mr_recycle(mr);
trace_xprtrdma_dma_unmap(mr); trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
...@@ -234,7 +234,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -234,7 +234,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_sg, i, mr->mr_dir); mr->mr_sg, i, mr->mr_dir);
if (!mr->mr_nents) if (!mr->mr_nents)
goto out_dmamap_err; goto out_dmamap_err;
trace_xprtrdma_dma_map(mr); trace_xprtrdma_mr_map(mr);
for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++) for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
dma_pages[i] = sg_dma_address(&mr->mr_sg[i]); dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
...@@ -295,7 +295,7 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) ...@@ -295,7 +295,7 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list) { list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n", dprintk("RPC: %s: unmapping fmr %p\n",
__func__, &mr->fmr); __func__, &mr->fmr);
trace_xprtrdma_localinv(mr); trace_xprtrdma_mr_localinv(mr);
list_add_tail(&mr->fmr.fm_mr->list, &unmap_list); list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
} }
r_xprt->rx_stats.local_inv_needed++; r_xprt->rx_stats.local_inv_needed++;
......
...@@ -123,7 +123,7 @@ frwr_mr_recycle_worker(struct work_struct *work) ...@@ -123,7 +123,7 @@ frwr_mr_recycle_worker(struct work_struct *work)
trace_xprtrdma_mr_recycle(mr); trace_xprtrdma_mr_recycle(mr);
if (state != FRWR_FLUSHED_LI) { if (state != FRWR_FLUSHED_LI) {
trace_xprtrdma_dma_unmap(mr); trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
} }
...@@ -384,7 +384,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, ...@@ -384,7 +384,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir); mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
if (!mr->mr_nents) if (!mr->mr_nents)
goto out_dmamap_err; goto out_dmamap_err;
trace_xprtrdma_dma_map(mr); trace_xprtrdma_mr_map(mr);
ibmr = frwr->fr_mr; ibmr = frwr->fr_mr;
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
...@@ -466,7 +466,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) ...@@ -466,7 +466,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list) list_for_each_entry(mr, mrs, mr_list)
if (mr->mr_handle == rep->rr_inv_rkey) { if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list); list_del_init(&mr->mr_list);
trace_xprtrdma_remoteinv(mr); trace_xprtrdma_mr_remoteinv(mr);
mr->frwr.fr_state = FRWR_IS_INVALID; mr->frwr.fr_state = FRWR_IS_INVALID;
rpcrdma_mr_unmap_and_put(mr); rpcrdma_mr_unmap_and_put(mr);
break; /* only one invalidated MR per RPC */ break; /* only one invalidated MR per RPC */
...@@ -503,7 +503,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) ...@@ -503,7 +503,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
mr->frwr.fr_state = FRWR_IS_INVALID; mr->frwr.fr_state = FRWR_IS_INVALID;
frwr = &mr->frwr; frwr = &mr->frwr;
trace_xprtrdma_localinv(mr); trace_xprtrdma_mr_localinv(mr);
frwr->fr_cqe.done = frwr_wc_localinv; frwr->fr_cqe.done = frwr_wc_localinv;
last = &frwr->fr_invwr; last = &frwr->fr_invwr;
......
...@@ -1288,7 +1288,7 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) ...@@ -1288,7 +1288,7 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
{ {
struct rpcrdma_xprt *r_xprt = mr->mr_xprt; struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
trace_xprtrdma_dma_unmap(mr); trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir); mr->mr_sg, mr->mr_nents, mr->mr_dir);
__rpcrdma_mr_put(&r_xprt->rx_buf, mr); __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment