Commit eb3e1d37 authored by Michael Kelley's avatar Michael Kelley Committed by Wei Liu

clocksource/drivers/hyper-v: Handle sched_clock differences inline

While the Hyper-V Reference TSC code is architecture neutral, the
pv_ops.time.sched_clock() function is implemented for x86/x64, but not
for ARM64. Current code calls a utility function under arch/x86 (and
coming, under arch/arm64) to handle the difference.

Change this approach to handle the difference inline based on whether
GENERIC_SCHED_CLOCK is present.  The new approach removes code under
arch/* since the difference is tied more to the specifics of the Linux
implementation than to the architecture.

No functional change.
Signed-off-by: default avatarMichael Kelley <mikelley@microsoft.com>
Reviewed-by: default avatarBoqun Feng <boqun.feng@gmail.com>
Acked-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
Link: https://lore.kernel.org/r/1614721102-2241-9-git-send-email-mikelley@microsoft.comSigned-off-by: default avatarWei Liu <wei.liu@kernel.org>
parent e4ab4658
......@@ -29,17 +29,6 @@ static inline u64 hv_get_register(unsigned int reg)
#define hv_get_raw_timer() rdtsc_ordered()
/*
* Reference to pv_ops must be inline so objtool
* detection of noinstr violations can work correctly.
*/
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
#ifdef CONFIG_PARAVIRT
pv_ops.time.sched_clock = sched_clock;
#endif
}
void hyperv_vector_handler(struct pt_regs *regs);
static inline void hv_enable_stimer0_percpu_irq(int irq) {}
......
......@@ -423,6 +423,30 @@ static struct clocksource hyperv_cs_msr = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
* Reference to pv_ops must be inline so objtool
* detection of noinstr violations can work correctly.
*/
#ifdef CONFIG_GENERIC_SCHED_CLOCK
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
/*
* We're on an architecture with generic sched clock (not x86/x64).
* The Hyper-V sched clock read function returns nanoseconds, not
* the normal 100ns units of the Hyper-V synthetic clock.
*/
sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
}
#elif defined CONFIG_PARAVIRT
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
/* We're on x86/x64 *and* using PV ops */
pv_ops.time.sched_clock = sched_clock;
}
#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
#endif /* CONFIG_GENERIC_SCHED_CLOCK */
static bool __init hv_init_tsc_clocksource(void)
{
u64 tsc_msr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment