Commit 009789f0 authored by Chris Peterson's avatar Chris Peterson Committed by Linus Torvalds

slow-work: use round_jiffies() for thread pool's cull and OOM timers

Round the slow work queue's cull and OOM timeouts to whole second boundary
with round_jiffies().  The slow work queue uses a pair of timers to cull
idle threads and, after OOM, to delay new thread creation.

This patch also extracts the mod_timer() logic for the cull timer into a
separate helper function.

By rounding non-time-critical timers such as these to whole seconds, they
will be batched up to fire at the same time rather than being spread out.
This allows the CPU wake up less, which saves power.
Signed-off-by: default avatarChris Peterson <cpeterso@cpeterso.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b72b71c6
...@@ -318,6 +318,15 @@ int slow_work_enqueue(struct slow_work *work) ...@@ -318,6 +318,15 @@ int slow_work_enqueue(struct slow_work *work)
} }
EXPORT_SYMBOL(slow_work_enqueue); EXPORT_SYMBOL(slow_work_enqueue);
/*
* Schedule a cull of the thread pool at some time in the near future
*/
static void slow_work_schedule_cull(void)
{
mod_timer(&slow_work_cull_timer,
round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
}
/* /*
* Worker thread culling algorithm * Worker thread culling algorithm
*/ */
...@@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void) ...@@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void)
list_empty(&vslow_work_queue) && list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) > atomic_read(&slow_work_thread_count) >
slow_work_min_threads) { slow_work_min_threads) {
mod_timer(&slow_work_cull_timer, slow_work_schedule_cull();
jiffies + SLOW_WORK_CULL_TIMEOUT);
do_cull = true; do_cull = true;
} }
} }
...@@ -393,8 +401,7 @@ static int slow_work_thread(void *_data) ...@@ -393,8 +401,7 @@ static int slow_work_thread(void *_data)
list_empty(&vslow_work_queue) && list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) > atomic_read(&slow_work_thread_count) >
slow_work_min_threads) slow_work_min_threads)
mod_timer(&slow_work_cull_timer, slow_work_schedule_cull();
jiffies + SLOW_WORK_CULL_TIMEOUT);
continue; continue;
} }
...@@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) ...@@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
if (atomic_dec_and_test(&slow_work_thread_count)) if (atomic_dec_and_test(&slow_work_thread_count))
BUG(); /* we're running on a slow work thread... */ BUG(); /* we're running on a slow work thread... */
mod_timer(&slow_work_oom_timer, mod_timer(&slow_work_oom_timer,
jiffies + SLOW_WORK_OOM_TIMEOUT); round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
} else { } else {
/* ratelimit the starting of new threads */ /* ratelimit the starting of new threads */
mod_timer(&slow_work_oom_timer, jiffies + 1); mod_timer(&slow_work_oom_timer, jiffies + 1);
...@@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, ...@@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
if (n < 0 && !slow_work_may_not_start_new_thread) if (n < 0 && !slow_work_may_not_start_new_thread)
slow_work_enqueue(&slow_work_new_thread); slow_work_enqueue(&slow_work_new_thread);
else if (n > 0) else if (n > 0)
mod_timer(&slow_work_cull_timer, slow_work_schedule_cull();
jiffies + SLOW_WORK_CULL_TIMEOUT);
} }
mutex_unlock(&slow_work_user_lock); mutex_unlock(&slow_work_user_lock);
} }
...@@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, ...@@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
atomic_read(&slow_work_thread_count); atomic_read(&slow_work_thread_count);
if (n < 0) if (n < 0)
mod_timer(&slow_work_cull_timer, slow_work_schedule_cull();
jiffies + SLOW_WORK_CULL_TIMEOUT);
} }
mutex_unlock(&slow_work_user_lock); mutex_unlock(&slow_work_user_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment