Commit fe9af81e authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Thomas Gleixner

x86/tsc: Redefine notsc to behave as tsc=unstable

Currently, the notsc kernel parameter disables the use of the TSC by
sched_clock(). However, this parameter does not prevent the kernel from
accessing tsc in other places.

The only rationale to boot with notsc is to avoid timing discrepancies on
multi-socket systems where TSC are not properly synchronized, and thus
exclude TSC from being used for time keeping. But that prevents using TSC
as sched_clock() as well, which is not necessary as the core sched_clock()
implementation can handle non synchronized TSC based sched clocks just
fine.

However, there is another method to solve the above problem: booting with
tsc=unstable parameter. This parameter allows sched_clock() to use TSC and
just excludes it from timekeeping.

So there is no real reason to keep notsc, but for compatibility reasons the
parameter has to stay. Make it behave like 'tsc=unstable' instead.

[ tglx: Massaged changelog ]
Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarDou Liyang <douly.fnst@cn.fujitsu.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: steven.sistare@oracle.com
Cc: daniel.m.jordan@oracle.com
Cc: linux@armlinux.org.uk
Cc: schwidefsky@de.ibm.com
Cc: heiko.carstens@de.ibm.com
Cc: john.stultz@linaro.org
Cc: sboyd@codeaurora.org
Cc: hpa@zytor.com
Cc: peterz@infradead.org
Cc: prarit@redhat.com
Cc: feng.tang@intel.com
Cc: pmladek@suse.com
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: linux-s390@vger.kernel.org
Cc: boris.ostrovsky@oracle.com
Cc: jgross@suse.com
Cc: pbonzini@redhat.com
Link: https://lkml.kernel.org/r/20180719205545.16512-12-pasha.tatashin@oracle.com
parent 9b3661cd
...@@ -2835,8 +2835,6 @@ ...@@ -2835,8 +2835,6 @@
nosync [HW,M68K] Disables sync negotiation for all devices. nosync [HW,M68K] Disables sync negotiation for all devices.
notsc [BUGS=X86-32] Disable Time Stamp Counter
nowatchdog [KNL] Disable both lockup detectors, i.e. nowatchdog [KNL] Disable both lockup detectors, i.e.
soft-lockup and NMI watchdog (hard-lockup). soft-lockup and NMI watchdog (hard-lockup).
......
...@@ -92,9 +92,7 @@ APICs ...@@ -92,9 +92,7 @@ APICs
Timing Timing
notsc notsc
Don't use the CPU time stamp counter to read the wall time. Deprecated, use tsc=unstable instead.
This can be used to work around timing problems on multiprocessor systems
with not properly synchronized CPUs.
nohpet nohpet
Don't use the HPET timer. Don't use the HPET timer.
......
...@@ -38,11 +38,6 @@ EXPORT_SYMBOL(tsc_khz); ...@@ -38,11 +38,6 @@ EXPORT_SYMBOL(tsc_khz);
*/ */
static int __read_mostly tsc_unstable; static int __read_mostly tsc_unstable;
/* native_sched_clock() is called before tsc_init(), so
we must start with the TSC soft disabled to prevent
erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
static int __read_mostly tsc_disabled = -1;
static DEFINE_STATIC_KEY_FALSE(__use_tsc); static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable; int tsc_clocksource_reliable;
...@@ -248,8 +243,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable); ...@@ -248,8 +243,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC #ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str) int __init notsc_setup(char *str)
{ {
pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); mark_tsc_unstable("boot parameter notsc");
tsc_disabled = 1;
return 1; return 1;
} }
#else #else
...@@ -1307,7 +1301,7 @@ static void tsc_refine_calibration_work(struct work_struct *work) ...@@ -1307,7 +1301,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
static int __init init_tsc_clocksource(void) static int __init init_tsc_clocksource(void)
{ {
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
return 0; return 0;
if (tsc_unstable) if (tsc_unstable)
...@@ -1414,12 +1408,6 @@ void __init tsc_init(void) ...@@ -1414,12 +1408,6 @@ void __init tsc_init(void)
set_cyc2ns_scale(tsc_khz, cpu, cyc); set_cyc2ns_scale(tsc_khz, cpu, cyc);
} }
if (tsc_disabled > 0)
return;
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
static_branch_enable(&__use_tsc); static_branch_enable(&__use_tsc);
if (!no_sched_irq_time) if (!no_sched_irq_time)
...@@ -1455,7 +1443,7 @@ unsigned long calibrate_delay_is_known(void) ...@@ -1455,7 +1443,7 @@ unsigned long calibrate_delay_is_known(void)
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC); int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu); const struct cpumask *mask = topology_core_cpumask(cpu);
if (tsc_disabled || !constant_tsc || !mask) if (!constant_tsc || !mask)
return 0; return 0;
sibling = cpumask_any_but(mask, cpu); sibling = cpumask_any_but(mask, cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment