Commit 85c3d2dd authored by Stephen Boyd's avatar Stephen Boyd Committed by John Stultz

sched_clock: Use seqcount instead of rolling our own

We're going to increase the cyc value to 64 bits in the near
future. Doing that is going to break the custom seqcount
implementation in the sched_clock code because 64 bit numbers
aren't guaranteed to be atomic. Replace the cyc_copy with a
seqcount to avoid this problem.

Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
parent 87d8b9eb
...@@ -14,11 +14,12 @@ ...@@ -14,11 +14,12 @@
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/seqlock.h>
struct clock_data { struct clock_data {
u64 epoch_ns; u64 epoch_ns;
u32 epoch_cyc; u32 epoch_cyc;
u32 epoch_cyc_copy; seqcount_t seq;
unsigned long rate; unsigned long rate;
u32 mult; u32 mult;
u32 shift; u32 shift;
...@@ -54,23 +55,16 @@ static unsigned long long notrace sched_clock_32(void) ...@@ -54,23 +55,16 @@ static unsigned long long notrace sched_clock_32(void)
u64 epoch_ns; u64 epoch_ns;
u32 epoch_cyc; u32 epoch_cyc;
u32 cyc; u32 cyc;
unsigned long seq;
if (cd.suspended) if (cd.suspended)
return cd.epoch_ns; return cd.epoch_ns;
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
* epoch_cyc_copy in strict order, and read them in strict order.
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
* the middle of an update, and we should repeat the load.
*/
do { do {
seq = read_seqcount_begin(&cd.seq);
epoch_cyc = cd.epoch_cyc; epoch_cyc = cd.epoch_cyc;
smp_rmb();
epoch_ns = cd.epoch_ns; epoch_ns = cd.epoch_ns;
smp_rmb(); } while (read_seqcount_retry(&cd.seq, seq));
} while (epoch_cyc != cd.epoch_cyc_copy);
cyc = read_sched_clock(); cyc = read_sched_clock();
cyc = (cyc - epoch_cyc) & sched_clock_mask; cyc = (cyc - epoch_cyc) & sched_clock_mask;
...@@ -90,16 +84,12 @@ static void notrace update_sched_clock(void) ...@@ -90,16 +84,12 @@ static void notrace update_sched_clock(void)
ns = cd.epoch_ns + ns = cd.epoch_ns +
cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
cd.mult, cd.shift); cd.mult, cd.shift);
/*
* Write epoch_cyc and epoch_ns in a way that the update is
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags); raw_local_irq_save(flags);
cd.epoch_cyc_copy = cyc; write_seqcount_begin(&cd.seq);
smp_wmb();
cd.epoch_ns = ns; cd.epoch_ns = ns;
smp_wmb();
cd.epoch_cyc = cyc; cd.epoch_cyc = cyc;
write_seqcount_end(&cd.seq);
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
...@@ -195,7 +185,6 @@ static int sched_clock_suspend(void) ...@@ -195,7 +185,6 @@ static int sched_clock_suspend(void)
static void sched_clock_resume(void) static void sched_clock_resume(void)
{ {
cd.epoch_cyc = read_sched_clock(); cd.epoch_cyc = read_sched_clock();
cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false; cd.suspended = false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment