Commit 8710e914 authored by Daniel Thompson's avatar Daniel Thompson Committed by Ingo Molnar

timers, sched/clock: Match scope of read and write seqcounts

Currently the scope of the raw_write_seqcount_begin/end() in
sched_clock_register() far exceeds the scope of the read section
in sched_clock(). This gives the impression of safety during
cursory review but achieves little.

Note that this is likely to be a latent issue at present because
sched_clock_register() is typically called before we enable
interrupts, however the issue does risk bugs being needlessly
introduced as the code evolves.

This patch fixes the problem by increasing the scope of the read
locking performed by sched_clock() to cover all data modified by
sched_clock_register.

We also improve clarity by moving writes to struct clock_data
that do not impact sched_clock() outside of the critical
section.
Signed-off-by: default avatarDaniel Thompson <daniel.thompson@linaro.org>
[ Reworked it slightly to apply to tip/timers/core]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Reviewed-by: default avatarStephen Boyd <sboyd@codeaurora.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/1427397806-20889-2-git-send-email-john.stultz@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2557d215
...@@ -58,23 +58,21 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) ...@@ -58,23 +58,21 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
unsigned long long notrace sched_clock(void) unsigned long long notrace sched_clock(void)
{ {
u64 epoch_ns; u64 cyc, res;
u64 epoch_cyc;
u64 cyc;
unsigned long seq; unsigned long seq;
if (cd.suspended)
return cd.epoch_ns;
do { do {
seq = raw_read_seqcount_begin(&cd.seq); seq = raw_read_seqcount_begin(&cd.seq);
epoch_cyc = cd.epoch_cyc;
epoch_ns = cd.epoch_ns; res = cd.epoch_ns;
if (!cd.suspended) {
cyc = read_sched_clock();
cyc = (cyc - cd.epoch_cyc) & sched_clock_mask;
res += cyc_to_ns(cyc, cd.mult, cd.shift);
}
} while (read_seqcount_retry(&cd.seq, seq)); } while (read_seqcount_retry(&cd.seq, seq));
cyc = read_sched_clock(); return res;
cyc = (cyc - epoch_cyc) & sched_clock_mask;
return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
} }
/* /*
...@@ -111,7 +109,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits, ...@@ -111,7 +109,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
{ {
u64 res, wrap, new_mask, new_epoch, cyc, ns; u64 res, wrap, new_mask, new_epoch, cyc, ns;
u32 new_mult, new_shift; u32 new_mult, new_shift;
ktime_t new_wrap_kt;
unsigned long r; unsigned long r;
char r_unit; char r_unit;
...@@ -124,10 +121,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits, ...@@ -124,10 +121,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
new_mask = CLOCKSOURCE_MASK(bits); new_mask = CLOCKSOURCE_MASK(bits);
cd.rate = rate;
/* calculate how many nanosecs until we risk wrapping */ /* calculate how many nanosecs until we risk wrapping */
wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
new_wrap_kt = ns_to_ktime(wrap); cd.wrap_kt = ns_to_ktime(wrap);
/* update epoch for new counter and update epoch_ns from old counter*/ /* update epoch for new counter and update epoch_ns from old counter*/
new_epoch = read(); new_epoch = read();
...@@ -138,8 +136,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits, ...@@ -138,8 +136,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
raw_write_seqcount_begin(&cd.seq); raw_write_seqcount_begin(&cd.seq);
read_sched_clock = read; read_sched_clock = read;
sched_clock_mask = new_mask; sched_clock_mask = new_mask;
cd.rate = rate;
cd.wrap_kt = new_wrap_kt;
cd.mult = new_mult; cd.mult = new_mult;
cd.shift = new_shift; cd.shift = new_shift;
cd.epoch_cyc = new_epoch; cd.epoch_cyc = new_epoch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment