Commit 5680d809 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/clock: Provide better clock continuity

When switching between the unstable and stable variants it is
currently possible that clock discontinuities occur.

And while these will mostly be 'small', attempt to do better.

As observed on my IVB-EP, the sched_clock() is ~1.5s ahead of the
ktime_get_ns() based timeline at the point of switchover
(sched_clock_init_late()) after SMP bringup.

Equally, when the TSC is later found to be unstable -- typically
because SMM tries to hide its SMI latencies by mucking with the TSC --
we want to avoid large jumps.

Since the clocksource watchdog reports the issue after the fact we
cannot exactly fix up time, but since SMI latencies are typically
small (~10ns range), the discontinuity is mainly due to drift between
sched_clock() and ktime_get_ns() (which on my desktop is ~79s over
24days).

I dislike this patch because it adds overhead to the good case in
favour of dealing with badness. But given the widespread failure of
TSC stability this is worth it.

Note that in case the TSC makes drastic jumps after SMP bringup we're
still hosed. There's just not much we can do in that case without
stupid overhead.

If we were to somehow expose tsc_clocksource_reliable (which is hard
because this code is also used on ia64 and parisc) we could avoid some
of the newly introduced overhead.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9881b024
...@@ -86,6 +86,30 @@ void sched_clock_init(void) ...@@ -86,6 +86,30 @@ void sched_clock_init(void)
static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
static int __sched_clock_stable_early; static int __sched_clock_stable_early;
/*
* We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
*/
static __read_mostly u64 raw_offset;
static __read_mostly u64 gtod_offset;
struct sched_clock_data {
u64 tick_raw;
u64 tick_gtod;
u64 clock;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
static inline struct sched_clock_data *this_scd(void)
{
return this_cpu_ptr(&sched_clock_data);
}
static inline struct sched_clock_data *cpu_sdc(int cpu)
{
return &per_cpu(sched_clock_data, cpu);
}
int sched_clock_stable(void) int sched_clock_stable(void)
{ {
return static_branch_likely(&__sched_clock_stable); return static_branch_likely(&__sched_clock_stable);
...@@ -93,6 +117,17 @@ int sched_clock_stable(void) ...@@ -93,6 +117,17 @@ int sched_clock_stable(void)
static void __set_sched_clock_stable(void) static void __set_sched_clock_stable(void)
{ {
struct sched_clock_data *scd = this_scd();
/*
* Attempt to make the (initial) unstable->stable transition continuous.
*/
raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
scd->tick_gtod, gtod_offset,
scd->tick_raw, raw_offset);
static_branch_enable(&__sched_clock_stable); static_branch_enable(&__sched_clock_stable);
tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
} }
...@@ -117,7 +152,23 @@ void set_sched_clock_stable(void) ...@@ -117,7 +152,23 @@ void set_sched_clock_stable(void)
static void __clear_sched_clock_stable(struct work_struct *work) static void __clear_sched_clock_stable(struct work_struct *work)
{ {
/* XXX worry about clock continuity */ struct sched_clock_data *scd = this_scd();
/*
* Attempt to make the stable->unstable transition continuous.
*
* Trouble is, this is typically called from the TSC watchdog
* timer, which is late per definition. This means the tick
* values can already be screwy.
*
* Still do what we can.
*/
gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
scd->tick_gtod, gtod_offset,
scd->tick_raw, raw_offset);
static_branch_disable(&__sched_clock_stable); static_branch_disable(&__sched_clock_stable);
tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
} }
...@@ -134,28 +185,9 @@ void clear_sched_clock_stable(void) ...@@ -134,28 +185,9 @@ void clear_sched_clock_stable(void)
schedule_work(&sched_clock_work); schedule_work(&sched_clock_work);
} }
struct sched_clock_data {
u64 tick_raw;
u64 tick_gtod;
u64 clock;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
static inline struct sched_clock_data *this_scd(void)
{
return this_cpu_ptr(&sched_clock_data);
}
static inline struct sched_clock_data *cpu_sdc(int cpu)
{
return &per_cpu(sched_clock_data, cpu);
}
void sched_clock_init_late(void) void sched_clock_init_late(void)
{ {
sched_clock_running = 2; sched_clock_running = 2;
/* /*
* Ensure that it is impossible to not do a static_key update. * Ensure that it is impossible to not do a static_key update.
* *
...@@ -210,7 +242,7 @@ static u64 sched_clock_local(struct sched_clock_data *scd) ...@@ -210,7 +242,7 @@ static u64 sched_clock_local(struct sched_clock_data *scd)
* scd->tick_gtod + TICK_NSEC); * scd->tick_gtod + TICK_NSEC);
*/ */
clock = scd->tick_gtod + delta; clock = scd->tick_gtod + gtod_offset + delta;
min_clock = wrap_max(scd->tick_gtod, old_clock); min_clock = wrap_max(scd->tick_gtod, old_clock);
max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
...@@ -296,7 +328,7 @@ u64 sched_clock_cpu(int cpu) ...@@ -296,7 +328,7 @@ u64 sched_clock_cpu(int cpu)
u64 clock; u64 clock;
if (sched_clock_stable()) if (sched_clock_stable())
return sched_clock(); return sched_clock() + raw_offset;
if (unlikely(!sched_clock_running)) if (unlikely(!sched_clock_running))
return 0ull; return 0ull;
...@@ -317,23 +349,22 @@ EXPORT_SYMBOL_GPL(sched_clock_cpu); ...@@ -317,23 +349,22 @@ EXPORT_SYMBOL_GPL(sched_clock_cpu);
void sched_clock_tick(void) void sched_clock_tick(void)
{ {
struct sched_clock_data *scd; struct sched_clock_data *scd;
u64 now, now_gtod;
if (sched_clock_stable())
return;
if (unlikely(!sched_clock_running))
return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
/*
* Update these values even if sched_clock_stable(), because it can
* become unstable at any point in time at which point we need some
* values to fall back on.
*
* XXX arguably we can skip this if we expose tsc_clocksource_reliable
*/
scd = this_scd(); scd = this_scd();
now_gtod = ktime_to_ns(ktime_get()); scd->tick_raw = sched_clock();
now = sched_clock(); scd->tick_gtod = ktime_get_ns();
scd->tick_raw = now; if (!sched_clock_stable() && likely(sched_clock_running))
scd->tick_gtod = now_gtod; sched_clock_local(scd);
sched_clock_local(scd);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment