Commit edef1297 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

SUNRPC: serialize iostats updates

Occasionally mountstats reports a negative retransmission rate.
Ensure that two RPCs completing concurrently don't confuse the sums
in the transport's op_metrics array.

Since pNFS filelayout can invoke rpc_count_iostats() on another
transport from xprt_release(), we can't rely on simply holding the
transport_lock in xprt_release(). There's nothing for it but hard
serialization. One spin lock per RPC operation should make this as
painless as it can be.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 5d01410f
...@@ -27,10 +27,13 @@ ...@@ -27,10 +27,13 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/spinlock.h>
#define RPC_IOSTATS_VERS "1.0" #define RPC_IOSTATS_VERS "1.0"
struct rpc_iostats { struct rpc_iostats {
spinlock_t om_lock;
/* /*
* These counters give an idea about how many request * These counters give an idea about how many request
* transmissions are required, on average, to complete that * transmissions are required, on average, to complete that
......
...@@ -116,7 +116,15 @@ EXPORT_SYMBOL_GPL(svc_seq_show); ...@@ -116,7 +116,15 @@ EXPORT_SYMBOL_GPL(svc_seq_show);
*/ */
struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
{ {
return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); struct rpc_iostats *stats;
int i;
stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
if (stats) {
for (i = 0; i < clnt->cl_maxproc; i++)
spin_lock_init(&stats[i].om_lock);
}
return stats;
} }
EXPORT_SYMBOL_GPL(rpc_alloc_iostats); EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
...@@ -135,20 +143,21 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats); ...@@ -135,20 +143,21 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
* rpc_count_iostats - tally up per-task stats * rpc_count_iostats - tally up per-task stats
* @task: completed rpc_task * @task: completed rpc_task
* @stats: array of stat structures * @stats: array of stat structures
*
* Relies on the caller for serialization.
*/ */
void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
struct rpc_iostats *op_metrics; struct rpc_iostats *op_metrics;
ktime_t delta; ktime_t delta, now;
if (!stats || !req) if (!stats || !req)
return; return;
now = ktime_get();
op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
spin_lock(&op_metrics->om_lock);
op_metrics->om_ops++; op_metrics->om_ops++;
op_metrics->om_ntrans += req->rq_ntrans; op_metrics->om_ntrans += req->rq_ntrans;
op_metrics->om_timeouts += task->tk_timeouts; op_metrics->om_timeouts += task->tk_timeouts;
...@@ -161,8 +170,10 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) ...@@ -161,8 +170,10 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
delta = ktime_sub(ktime_get(), task->tk_start); delta = ktime_sub(now, task->tk_start);
op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
spin_unlock(&op_metrics->om_lock);
} }
EXPORT_SYMBOL_GPL(rpc_count_iostats); EXPORT_SYMBOL_GPL(rpc_count_iostats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment