Commit f009a7a7 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

timers/nohz: Convert tick dependency mask to atomic_t

The tick dependency mask was intially unsigned long because this is the
type on which clear_bit() operates on and fetch_or() accepts it.

But now that we have atomic_fetch_or(), we can instead use
atomic_andnot() to clear the bit. This consolidates the type of our
tick dependency mask, reduce its size on structures and benefit from
possible architecture optimizations on atomic_t operations.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1458830281-4255-3-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5acba71e
......@@ -720,7 +720,7 @@ struct signal_struct {
struct task_cputime cputime_expires;
#ifdef CONFIG_NO_HZ_FULL
unsigned long tick_dep_mask;
atomic_t tick_dep_mask;
#endif
struct list_head cpu_timers[3];
......@@ -1549,7 +1549,7 @@ struct task_struct {
#endif
#ifdef CONFIG_NO_HZ_FULL
unsigned long tick_dep_mask;
atomic_t tick_dep_mask;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
......
......@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
cpumask_var_t tick_nohz_full_mask;
cpumask_var_t housekeeping_mask;
bool tick_nohz_full_running;
static unsigned long tick_dep_mask;
static atomic_t tick_dep_mask;
static void trace_tick_dependency(unsigned long dep)
static bool check_tick_dependency(atomic_t *dep)
{
if (dep & TICK_DEP_MASK_POSIX_TIMER) {
int val = atomic_read(dep);
if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return;
return true;
}
if (dep & TICK_DEP_MASK_PERF_EVENTS) {
if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return;
return true;
}
if (dep & TICK_DEP_MASK_SCHED) {
if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
return;
return true;
}
if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE)
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
return false;
}
static bool can_stop_full_tick(struct tick_sched *ts)
{
WARN_ON_ONCE(!irqs_disabled());
if (tick_dep_mask) {
trace_tick_dependency(tick_dep_mask);
if (check_tick_dependency(&tick_dep_mask))
return false;
}
if (ts->tick_dep_mask) {
trace_tick_dependency(ts->tick_dep_mask);
if (check_tick_dependency(&ts->tick_dep_mask))
return false;
}
if (current->tick_dep_mask) {
trace_tick_dependency(current->tick_dep_mask);
if (check_tick_dependency(&current->tick_dep_mask))
return false;
}
if (current->signal->tick_dep_mask) {
trace_tick_dependency(current->signal->tick_dep_mask);
if (check_tick_dependency(&current->signal->tick_dep_mask))
return false;
}
return true;
}
......@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
preempt_enable();
}
static void tick_nohz_dep_set_all(unsigned long *dep,
static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
unsigned long prev;
int prev;
prev = fetch_or(dep, BIT_MASK(bit));
prev = atomic_fetch_or(dep, BIT(bit));
if (!prev)
tick_nohz_full_kick_all();
}
......@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
clear_bit(bit, &tick_dep_mask);
atomic_andnot(BIT(bit), &tick_dep_mask);
}
/*
......@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
*/
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
unsigned long prev;
int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit));
prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
if (!prev) {
preempt_disable();
/* Perf needs local kick that is NMI safe */
......@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
clear_bit(bit, &ts->tick_dep_mask);
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
/*
......@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
clear_bit(bit, &tsk->tick_dep_mask);
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
/*
......@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
clear_bit(bit, &sig->tick_dep_mask);
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
/*
......@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->tick_stopped) {
if (current->tick_dep_mask || current->signal->tick_dep_mask)
if (atomic_read(&current->tick_dep_mask) ||
atomic_read(&current->signal->tick_dep_mask))
tick_nohz_full_kick();
}
out:
......
......@@ -60,7 +60,7 @@ struct tick_sched {
u64 next_timer;
ktime_t idle_expires;
int do_timer_last;
unsigned long tick_dep_mask;
atomic_t tick_dep_mask;
};
extern struct tick_sched *tick_get_tick_sched(int cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment