Commit df3c30f6 authored by Jian Yu's avatar Jian Yu Committed by Greg Kroah-Hartman

staging: lustre: replace direct HZ access with kernel APIs

On some customer's systems, kernel was compiled with HZ defined to
100, instead of 1000. This improves performance for HPC applications.
However, to use these systems with Lustre, customers have to re-build
Lustre for the kernel because Lustre directly uses the defined
constant HZ.

Since kernel 2.6.21, some non-HZ dependent timing APIs become non-
inline functions, which can be used in Lustre codes to replace the
direct HZ access.

These kernel APIs include:
  jiffies_to_msecs()
  jiffies_to_usecs()
  jiffies_to_timespec()
  msecs_to_jiffies()
  usecs_to_jiffies()
  timespec_to_jiffies()

And here are some samples of the replacement:
  HZ            -> msecs_to_jiffies(MSEC_PER_SEC)
  n * HZ        -> msecs_to_jiffies(n * MSEC_PER_SEC)
  HZ / n        -> msecs_to_jiffies(MSEC_PER_SEC / n)
  n / HZ        -> jiffies_to_msecs(n) / MSEC_PER_SEC
  n / HZ * 1000 -> jiffies_to_msecs(n)

This patch replaces the direct HZ access in lustre modules.
Signed-off-by: default avatarJian Yu <jian.yu@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5443
Reviewed-on: http://review.whamcloud.com/12052Reviewed-by: default avatarBob Glossman <bob.glossman@intel.com>
Reviewed-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: default avatarJames Simmons <uja.ornl@gmail.com>
Reviewed-by: default avatarNathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3cbbf5ed
......@@ -880,7 +880,8 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
force_wait:
if (force)
lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
MSEC_PER_SEC) / 4, NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
atomic_read(&ns->ns_bref) == 0, &lwi);
......
......@@ -1154,7 +1154,8 @@ static int ll_statahead_thread(void *arg)
*/
while (sai->sai_sent != sai->sai_replied) {
/* in case we're not woken up, timeout wait */
lwi = LWI_TIMEOUT(HZ >> 3, NULL, NULL);
lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
sai->sai_sent == sai->sai_replied, &lwi);
}
......
......@@ -549,8 +549,9 @@ static int mgc_requeue_thread(void *data)
* caused the lock revocation to finish its setup, plus some
* random so everyone doesn't try to reconnect at once.
*/
to = MGC_TIMEOUT_MIN_SECONDS * HZ;
to += rand * HZ / 100; /* rand is centi-seconds */
to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC);
/* rand is centi-seconds */
to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100);
lwi = LWI_TIMEOUT(to, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
&lwi);
......
......@@ -499,7 +499,7 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
newctx, newctx->cc_flags);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
} else {
/*
* it's possible newctx == oldctx if we're switching
......@@ -718,8 +718,9 @@ int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
ctx_refresh_interrupt, req);
lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
ctx_refresh_timeout, ctx_refresh_interrupt,
req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
/*
......
......@@ -139,7 +139,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
"max wait time: %ld/%u\n",
"max wait time: %ld/%lu\n",
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
......@@ -158,7 +158,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
HZ);
msecs_to_jiffies(MSEC_PER_SEC));
spin_unlock(&page_pools.epp_lock);
......@@ -432,12 +432,13 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
"max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%d\n",
"max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait, HZ);
page_pools.epp_st_max_wait,
msecs_to_jiffies(MSEC_PER_SEC));
}
}
......
......@@ -182,7 +182,8 @@ static int sec_gc_main(void *arg)
/* check ctx list again before sleep */
sec_process_ctx_list();
lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
lwi = LWI_TIMEOUT(msecs_to_jiffies(SEC_GC_INTERVAL * MSEC_PER_SEC),
NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopping(thread) ||
thread_is_signal(thread),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment