Commit 219e6de6 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Greg Kroah-Hartman

staging/lustre: use 64-bit times for request times

All request timestamps and deadlines in lustre are recorded in time_t
and timeval units, which overflow in 2038 on 32-bit systems.

In this patch, I'm converting them to time64_t and timespec64,
respectively. Unfortunately, this makes a relatively large patch,
but I could not find an obvious way to split it up some more without
breaking atomicity of the change.

Also unfortunately, this introduces two instances of div_u64_rem()
in the request path, which can be slow on 32-bit architectures. This
can probably be avoided by a larger restructuring of the code, but
it is unlikely that lustre is used in performance critical setups
on 32-bit architectures, so it seems better to optimize for correctness
rather than speed here.
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8cc98071
......@@ -74,7 +74,7 @@ struct ptlrpc_at_array {
struct list_head *paa_reqs_array; /** array to hold requests */
__u32 paa_size; /** the size of array */
__u32 paa_count; /** the total count of reqs */
time_t paa_deadline; /** the earliest deadline of reqs */
time64_t paa_deadline; /** the earliest deadline of reqs */
__u32 *paa_reqs_count; /** the count of reqs in each entry */
};
......
......@@ -1440,7 +1440,7 @@ struct ptlrpc_request {
/* server-side... */
/** request arrival time */
struct timeval rq_arrival_time;
struct timespec64 rq_arrival_time;
/** separated reply state */
struct ptlrpc_reply_state *rq_reply_state;
/** incoming request buffer */
......@@ -1477,18 +1477,18 @@ struct ptlrpc_request {
/**
* when request/reply sent (secs), or time when request should be sent
*/
time_t rq_sent;
time64_t rq_sent;
/** time for request really sent out */
time_t rq_real_sent;
time64_t rq_real_sent;
/** when request must finish. volatile
* so that servers' early reply updates to the deadline aren't
* kept in per-cpu cache */
volatile time_t rq_deadline;
volatile time64_t rq_deadline;
/** when req reply unlink must finish. */
time_t rq_reply_deadline;
time64_t rq_reply_deadline;
/** when req bulk unlink must finish. */
time_t rq_bulk_deadline;
time64_t rq_bulk_deadline;
/**
* service time estimate (secs)
* If the requestsis not served by this time, it is marked as timed out.
......@@ -2323,7 +2323,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
desc = req->rq_bulk;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
req->rq_bulk_deadline > get_seconds())
req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
......@@ -2727,7 +2727,7 @@ static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
req->rq_reply_deadline > get_seconds())
req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_early;
}
......@@ -2739,7 +2739,7 @@ static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
req->rq_reply_deadline > get_seconds())
req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
......@@ -2749,7 +2749,7 @@ static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
req->rq_reply_deadline > get_seconds())
req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
......@@ -2761,7 +2761,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
spin_lock(&req->rq_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
req->rq_reply_deadline > get_seconds()) {
req->rq_reply_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
......
......@@ -864,7 +864,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
req->rq_sent = get_seconds() + resends;
req->rq_sent = ktime_get_real_seconds() + resends;
}
/* It is important to obtain rpc_lock first (if applicable), so that
......
......@@ -271,7 +271,7 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
req->rq_sent = get_seconds() + resends;
req->rq_sent = ktime_get_real_seconds() + resends;
}
level = LUSTRE_IMP_FULL;
resend:
......
......@@ -1690,9 +1690,9 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply()) */
if (aa->aa_resends > new_req->rq_timeout)
new_req->rq_sent = get_seconds() + new_req->rq_timeout;
new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
else
new_req->rq_sent = get_seconds() + aa->aa_resends;
new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
new_req->rq_generation_set = 1;
new_req->rq_import_generation = request->rq_import_generation;
......
......@@ -282,7 +282,7 @@ static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
{
unsigned int nl, oldnl;
struct imp_at *at;
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
LASSERT(req->rq_import);
......@@ -298,7 +298,7 @@ static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
D_ADAPTTO : D_WARNING,
"Reported service time %u > total measured time "
CFS_DURATION_T"\n", service_time,
cfs_time_sub(now, req->rq_sent));
(long)(now - req->rq_sent));
return;
}
......@@ -343,7 +343,7 @@ static int unpack_reply(struct ptlrpc_request *req)
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
{
struct ptlrpc_request *early_req;
time_t olddl;
time64_t olddl;
int rc;
req->rq_early = 0;
......@@ -378,14 +378,14 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
olddl = req->rq_deadline;
/* server assumes it now has rq_timeout from when it sent the
* early reply, so client should give it at least that long. */
req->rq_deadline = get_seconds() + req->rq_timeout +
req->rq_deadline = ktime_get_real_seconds() + req->rq_timeout +
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
"Early reply #%d, new deadline in " CFS_DURATION_T "s (" CFS_DURATION_T "s)",
"Early reply #%d, new deadline in %lds (%lds)",
req->rq_early_count,
cfs_time_sub(req->rq_deadline, get_seconds()),
cfs_time_sub(req->rq_deadline, olddl));
(long)(req->rq_deadline - ktime_get_real_seconds()),
(long)(req->rq_deadline - olddl));
return rc;
}
......@@ -1203,7 +1203,7 @@ static int after_reply(struct ptlrpc_request *req)
struct obd_import *imp = req->rq_import;
struct obd_device *obd = req->rq_import->imp_obd;
int rc;
struct timeval work_start;
struct timespec64 work_start;
long timediff;
LASSERT(obd != NULL);
......@@ -1253,7 +1253,7 @@ static int after_reply(struct ptlrpc_request *req)
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
spin_lock(&req->rq_lock);
......@@ -1283,8 +1283,9 @@ static int after_reply(struct ptlrpc_request *req)
return 0;
}
do_gettimeofday(&work_start);
timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
ktime_get_real_ts64(&work_start);
timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
(work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
if (obd->obd_svc_stats != NULL) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
......@@ -1401,7 +1402,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
int rc;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
if (req->rq_sent && (req->rq_sent > get_seconds()) &&
if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
return 0;
......@@ -1541,7 +1542,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
req->rq_sent > get_seconds())
req->rq_sent > ktime_get_real_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
......@@ -1902,14 +1903,13 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
req->rq_timedout = 1;
spin_unlock(&req->rq_lock);
DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
"/real "CFS_DURATION_T"]",
DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
time_before((unsigned long)req->rq_real_sent, (unsigned long)req->rq_sent) ||
cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
req->rq_real_sent < req->rq_sent ||
req->rq_real_sent >= req->rq_deadline) ?
"timed out for sent delay" : "timed out for slow reply"),
req->rq_sent, req->rq_real_sent);
(s64)req->rq_sent, (s64)req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
......@@ -1967,7 +1967,7 @@ int ptlrpc_expired_set(void *data)
{
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
LASSERT(set != NULL);
......@@ -2050,10 +2050,10 @@ EXPORT_SYMBOL(ptlrpc_interrupted_set);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
struct list_head *tmp;
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
int timeout = 0;
struct ptlrpc_request *req;
int deadline;
time64_t deadline;
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
......@@ -2364,7 +2364,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
*/
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
request->rq_reply_deadline = get_seconds()+LONG_UNLINK;
request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
/*
* Nothing left to do.
......@@ -2939,7 +2939,7 @@ static spinlock_t ptlrpc_last_xid_lock;
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
......@@ -3028,7 +3028,7 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
req->rq_sent = get_seconds();
req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
......
......@@ -64,7 +64,7 @@ void request_out_callback(lnet_event_t *ev)
sptlrpc_request_out_callback(req);
spin_lock(&req->rq_lock);
req->rq_real_sent = get_seconds();
req->rq_real_sent = ktime_get_real_seconds();
if (ev->unlinked)
req->rq_req_unlink = 0;
......@@ -246,7 +246,7 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
__u64 sec = req->rq_arrival_time.tv_sec;
__u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
__u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
__u64 new_seq;
/* set sequence ID for request and add it to history list,
......@@ -327,7 +327,7 @@ void request_in_callback(lnet_event_t *ev)
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
do_gettimeofday(&req->rq_arrival_time);
ktime_get_real_ts64(&req->rq_arrival_time);
req->rq_peer = ev->initiator;
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
......
......@@ -219,7 +219,7 @@ void ptlrpc_deactivate_import(struct obd_import *imp)
EXPORT_SYMBOL(ptlrpc_deactivate_import);
static unsigned int
ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
{
long dl;
......@@ -244,7 +244,7 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
time_t now = get_seconds();
time64_t now = ktime_get_real_seconds();
struct list_head *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
......
......@@ -945,13 +945,13 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
* must be just as careful as the service's request
* parser. Currently I only print stuff here I know is OK
* to look at coz it was set up in request_in_callback()!!! */
seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%ld:%lds(%+lds) ",
seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
req->rq_history_seq, libcfs_nid2str(req->rq_self),
libcfs_id2str(req->rq_peer), req->rq_xid,
req->rq_reqlen, ptlrpc_rqphase2str(req),
req->rq_arrival_time.tv_sec,
req->rq_sent - req->rq_arrival_time.tv_sec,
req->rq_sent - req->rq_deadline);
(s64)req->rq_arrival_time.tv_sec,
(long)(req->rq_sent - req->rq_arrival_time.tv_sec),
(long)(req->rq_sent - req->rq_deadline));
if (svc->srv_ops.so_req_printer == NULL)
seq_printf(s, "\n");
else
......
......@@ -252,7 +252,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0)
req->rq_bulk_deadline = get_seconds() + LONG_UNLINK;
req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
return 1; /* never registered */
......@@ -303,7 +303,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
int service_time = max_t(int, get_seconds() -
int service_time = max_t(int, ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
......@@ -421,7 +421,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
if (unlikely(rc))
goto out;
req->rq_sent = get_seconds();
req->rq_sent = ktime_get_real_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
......@@ -632,8 +632,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
do_gettimeofday(&request->rq_arrival_time);
request->rq_sent = get_seconds();
ktime_get_real_ts64(&request->rq_arrival_time);
request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
......
......@@ -2344,7 +2344,7 @@ void _debug_req(struct ptlrpc_request *req,
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
" req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %d dl " CFS_TIME_T " ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
" req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
req, req->rq_xid, req->rq_transno,
req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
......@@ -2356,8 +2356,8 @@ void _debug_req(struct ptlrpc_request *req,
libcfs_nid2str(nid),
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
req->rq_early_count, req->rq_timedout,
req->rq_deadline,
req->rq_early_count, (s64)req->rq_timedout,
(s64)req->rq_deadline,
atomic_read(&req->rq_refcount),
DEBUG_REQ_FLAGS(req),
req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
......
......@@ -1160,7 +1160,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
}
/* Set timer for closest deadline */
next = (__s32)(array->paa_deadline - get_seconds() -
next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
ptlrpc_at_timer((unsigned long)svcpt);
......@@ -1191,7 +1191,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
spin_lock(&svcpt->scp_at_lock);
LASSERT(list_empty(&req->rq_timed_list));
index = (unsigned long)req->rq_deadline % array->paa_size;
div_u64_rem(req->rq_deadline, array->paa_size, &index);
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
......@@ -1250,8 +1250,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_request *reqcopy;
struct lustre_msg *reqmsg;
long olddl = req->rq_deadline - get_seconds();
time_t newdl;
long olddl = req->rq_deadline - ktime_get_real_seconds();
time64_t newdl;
int rc;
/* deadline is when the client expects us to reply, margin is the
......@@ -1293,21 +1293,21 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
/* Fake our processing time into the future to ask the clients
* for some extra amount of time */
at_measured(&svcpt->scp_at_estimate, at_extra +
get_seconds() -
ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec);
/* Check to see if we've actually increased the deadline -
* we may be past adaptive_max */
if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate)) {
DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%ld), not sending early reply\n",
DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
get_seconds());
ktime_get_real_seconds());
return -ETIMEDOUT;
}
}
newdl = get_seconds() + at_get(&svcpt->scp_at_estimate);
newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (reqcopy == NULL)
......@@ -1390,8 +1390,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
struct ptlrpc_request *rq, *n;
struct list_head work_list;
__u32 index, count;
time_t deadline;
time_t now = get_seconds();
time64_t deadline;
time64_t now = ktime_get_real_seconds();
long delay;
int first, counter = 0;
......@@ -1421,7 +1421,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
server will take. Send early replies to everyone expiring soon. */
INIT_LIST_HEAD(&work_list);
deadline = -1;
index = (unsigned long)array->paa_deadline % array->paa_size;
div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
......@@ -1463,7 +1463,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
chance to send early replies */
LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=" CFS_DURATION_T "(jiff)\n",
CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
......@@ -1834,9 +1834,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
}
/* req_in handling should/must be fast */
if (get_seconds() - req->rq_arrival_time.tv_sec > 5)
if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
cfs_time_sub(get_seconds(),
(long)(ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec));
/* Set rpc server deadline and add it to the timed list */
......@@ -1878,9 +1878,12 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
{
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_request *request;
struct timeval work_start;
struct timeval work_end;
long timediff;
struct timespec64 work_start;
struct timespec64 work_end;
struct timespec64 timediff;
struct timespec64 arrived;
unsigned long timediff_usecs;
unsigned long arrived_usecs;
int rc;
int fail_opc = 0;
......@@ -1903,12 +1906,13 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
do_gettimeofday(&work_start);
timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,
NULL);
ktime_get_real_ts64(&work_start);
timediff = timespec64_sub(work_start, request->rq_arrival_time);
timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
timediff.tv_nsec / NSEC_PER_USEC;
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
timediff);
timediff_usecs);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
svcpt->scp_nreqs_incoming);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
......@@ -1935,17 +1939,18 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
ptlrpc_update_export_timer(request->rq_export,
timediff_usecs >> 19);
}
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
if (get_seconds() > request->rq_deadline) {
if (ktime_get_real_seconds() > request->rq_deadline) {
DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
libcfs_id2str(request->rq_peer),
cfs_time_sub(request->rq_deadline,
(long)(request->rq_deadline -
request->rq_arrival_time.tv_sec),
cfs_time_sub(get_seconds(),
(long)(ktime_get_real_seconds() -
request->rq_deadline));
goto put_conn;
}
......@@ -1971,19 +1976,22 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
lu_context_exit(&request->rq_session);
lu_context_fini(&request->rq_session);
if (unlikely(get_seconds() > request->rq_deadline)) {
if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
"Request took longer than estimated ("
CFS_DURATION_T":"CFS_DURATION_T
"s); client may timeout.",
cfs_time_sub(request->rq_deadline,
request->rq_arrival_time.tv_sec),
cfs_time_sub(get_seconds(),
request->rq_deadline));
}
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
"Request took longer than estimated (%lld:%llds); "
"client may timeout.",
(s64)request->rq_deadline -
request->rq_arrival_time.tv_sec,
(s64)ktime_get_real_seconds() - request->rq_deadline);
}
ktime_get_real_ts64(&work_end);
timediff = timespec64_sub(work_end, work_start);
timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
timediff.tv_nsec / NSEC_PER_USEC;
arrived = timespec64_sub(work_end, request->rq_arrival_time);
arrived_usecs = arrived.tv_sec * USEC_PER_SEC +
arrived.tv_nsec / NSEC_PER_USEC;
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
current_comm(),
(request->rq_export ?
......@@ -1994,8 +2002,8 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg),
timediff,
cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
timediff_usecs,
arrived_usecs,
(request->rq_repmsg ?
lustre_msg_get_transno(request->rq_repmsg) :
request->rq_transno),
......@@ -2009,16 +2017,15 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
LASSERT(opc < LUSTRE_MAX_OPCODES);
lprocfs_counter_add(svc->srv_stats,
opc + EXTRA_MAX_OPCODES,
timediff);
timediff_usecs);
}
}
if (unlikely(request->rq_early_count)) {
DEBUG_REQ(D_ADAPTTO, request,
"sent %d early replies before finishing in "
CFS_DURATION_T"s",
"sent %d early replies before finishing in %llds",
request->rq_early_count,
cfs_time_sub(work_end.tv_sec,
request->rq_arrival_time.tv_sec));
(s64)work_end.tv_sec -
request->rq_arrival_time.tv_sec);
}
out_req:
......@@ -3057,10 +3064,10 @@ EXPORT_SYMBOL(ptlrpc_unregister_service);
static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_request *request = NULL;
struct timeval right_now;
long timediff;
struct timespec64 right_now;
struct timespec64 timediff;
do_gettimeofday(&right_now);
ktime_get_real_ts64(&right_now);
spin_lock(&svcpt->scp_req_lock);
/* How long has the next entry been waiting? */
......@@ -3074,13 +3081,13 @@ static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
return 0;
}
timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
timediff = timespec64_sub(right_now, request->rq_arrival_time);
spin_unlock(&svcpt->scp_req_lock);
if ((timediff / ONE_MILLION) >
if ((timediff.tv_sec) >
(AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
CERROR("%s: unhealthy - request has been waiting %lds\n",
svcpt->scp_service->srv_name, timediff / ONE_MILLION);
CERROR("%s: unhealthy - request has been waiting %llds\n",
svcpt->scp_service->srv_name, (s64)timediff.tv_sec);
return -1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment