Commit 4d2c7b30 authored by Li Xi's avatar Li Xi Committed by Greg Kroah-Hartman

staging/lustre/ldlm: high load because of negative timeout

When the time of LRU resizing exceeds waiting period of
recalculation, the ldlm daemon will keep on resizing without any
interval of rest. That will cause high CPU load.

This patch fixes the problem by setting the recalculation timestamp
after LRU resizing finishes rather than before it. What is more, an
interval of one second is enforced between each recalculation.
Signed-off-by: default avatarLi Xi <lixi@ddn.com>
Reviewed-on: http://review.whamcloud.com/11227
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5415Reviewed-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: default avatarBobi Jam <bobijam@gmail.com>
Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Signed-off-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c00266e3
...@@ -470,6 +470,7 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) ...@@ -470,6 +470,7 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{ {
time_t recalc_interval_sec; time_t recalc_interval_sec;
int ret;
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) if (recalc_interval_sec < pl->pl_recalc_period)
...@@ -490,16 +491,15 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) ...@@ -490,16 +491,15 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
*/ */
ldlm_cli_pool_pop_slv(pl); ldlm_cli_pool_pop_slv(pl);
pl->pl_recalc_time = get_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock); spin_unlock(&pl->pl_lock);
/* /*
* Do not cancel locks in case lru resize is disabled for this ns. * Do not cancel locks in case lru resize is disabled for this ns.
*/ */
if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) {
return 0; ret = 0;
goto out;
}
/* /*
* In the time of canceling locks on client we do not need to maintain * In the time of canceling locks on client we do not need to maintain
...@@ -507,7 +507,19 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) ...@@ -507,7 +507,19 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* It may be called when SLV has changed much, this is why we do not * It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here. * take into account pl->pl_recalc_time here.
*/ */
return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR); ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
out:
spin_lock(&pl->pl_lock);
/*
* Time of LRU resizing might be longer than period,
* so update after LRU resizing rather than before it.
*/
pl->pl_recalc_time = get_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock);
return ret;
} }
/** /**
...@@ -591,6 +603,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl) ...@@ -591,6 +603,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
} }
recalc_interval_sec = pl->pl_recalc_time - get_seconds() + recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
pl->pl_recalc_period; pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* Prevent too frequent recalculation. */
CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
"too short period(%ld)",
recalc_interval_sec,
pl->pl_recalc_period);
recalc_interval_sec = 1;
}
return recalc_interval_sec; return recalc_interval_sec;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment