Commit 6a45a658 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull more x86 updates from Thomas Gleixner:
 "A set of fixes and updates for x86:

   - Unbreak paravirt VDSO clocks.

     While the VDSO code was moved into lib for sharing a subtle check
     for the validity of paravirt clocks got replaced. While the
     replacement works perfectly fine for bare metal as the update of
     the VDSO clock mode is synchronous, it fails for paravirt clocks
     because the hypervisor can invalidate them asynchronously.

     Bring it back as an optional function so it does not inflict this
     on architectures which are free of PV damage.

   - Fix the jiffies to jiffies64 mapping on 64bit so it does not
     trigger an ODR violation on newer compilers

   - Three fixes for the SSBD and *IB* speculation mitigation maze to
     ensure consistency, not disabling of some *IB* variants wrongly and
     to prevent a rogue cross process shutdown of SSBD. All marked for
     stable.

   - Add yet more CPU models to the splitlock detection capable list
     !@#%$!

   - Bring the pr_info() back which tells that TSC deadline timer is
     enabled.

   - Reboot quirk for MacBook6,1"

* tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/vdso: Unbreak paravirt VDSO clocks
  lib/vdso: Provide sanity check for cycles (again)
  clocksource: Remove obsolete ifdef
  x86_64: Fix jiffies ODR violation
  x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
  x86/speculation: Prevent rogue cross-process SSBD shutdown
  x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
  x86/cpu: Add Sapphire Rapids CPU model number
  x86/split_lock: Add Icelake microserver and Tigerlake CPU models
  x86/apic: Make TSC deadline timer detection message visible
  x86/reboot/quirks: Add MacBook6,1 reboot quirk
parents 92ac9712 7778d841
...@@ -89,6 +89,8 @@ ...@@ -89,6 +89,8 @@
#define INTEL_FAM6_COMETLAKE 0xA5 #define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6 #define INTEL_FAM6_COMETLAKE_L 0xA6
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
/* "Small Core" Processors (Atom) */ /* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
......
...@@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) ...@@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return __vdso_data; return __vdso_data;
} }
static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
{
return true;
}
#define vdso_clocksource_ok arch_vdso_clocksource_ok
/*
* Clocksource read value validation to handle PV and HyperV clocksources
* which can be invalidated asynchronously and indicate invalidation by
* returning U64_MAX, which can be effectively tested by checking for a
* negative value after casting it to s64.
*/
static inline bool arch_vdso_cycles_ok(u64 cycles)
{
return (s64)cycles >= 0;
}
#define vdso_cycles_ok arch_vdso_cycles_ok
/* /*
* x86 specific delta calculation. * x86 specific delta calculation.
* *
......
...@@ -2060,7 +2060,7 @@ void __init init_apic_mappings(void) ...@@ -2060,7 +2060,7 @@ void __init init_apic_mappings(void)
unsigned int new_apicid; unsigned int new_apicid;
if (apic_validate_deadline_timer()) if (apic_validate_deadline_timer())
pr_debug("TSC deadline timer available\n"); pr_info("TSC deadline timer available\n");
if (x2apic_mode) { if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id(); boot_cpu_physical_apicid = read_apic_id();
......
...@@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); ...@@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE; SPECTRE_V2_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE; SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
...@@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) ...@@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break; break;
} }
/*
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/* Initialize Indirect Branch Prediction Barrier */ /* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) { if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB); setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
...@@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) ...@@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ? static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional"); "always-on" : "conditional");
spectre_v2_user_ibpb = mode;
} }
/* If enhanced IBRS is enabled no STIBP required */ /*
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) * If enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
*/
if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return; return;
/* /*
* If SMT is not possible or STIBP is not available clear the STIBP * At this point, an STIBP mode other than "off" has been set.
* mode. * If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/ */
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/*
* If STIBP is not available, clear the STIBP mode.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE; mode = SPECTRE_V2_USER_NONE;
spectre_v2_user_stibp = mode;
set_mode: set_mode:
spectre_v2_user = mode; pr_info("%s\n", spectre_v2_user_strings[mode]);
/* Only print the STIBP mode when SMT possible */
if (smt_possible)
pr_info("%s\n", spectre_v2_user_strings[mode]);
} }
static const char * const spectre_v2_strings[] = { static const char * const spectre_v2_strings[] = {
...@@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void) ...@@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void)
{ {
mutex_lock(&spec_ctrl_mutex); mutex_lock(&spec_ctrl_mutex);
switch (spectre_v2_user) { switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE: case SPECTRE_V2_USER_NONE:
break; break;
case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT:
...@@ -1257,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -1257,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{ {
switch (ctrl) { switch (ctrl) {
case PR_SPEC_ENABLE: case PR_SPEC_ENABLE:
if (spectre_v2_user == SPECTRE_V2_USER_NONE) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0; return 0;
/* /*
* Indirect branch speculation is always disabled in strict * Indirect branch speculation is always disabled in strict
* mode. * mode. It can neither be enabled if it was force-disabled
* by a previous prctl call.
*/ */
if (spectre_v2_user == SPECTRE_V2_USER_STRICT || if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
task_spec_ib_force_disable(task))
return -EPERM; return -EPERM;
task_clear_spec_ib_disable(task); task_clear_spec_ib_disable(task);
task_update_spec_tif(task); task_update_spec_tif(task);
...@@ -1275,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -1275,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when * Indirect branch speculation is always allowed when
* mitigation is force disabled. * mitigation is force disabled.
*/ */
if (spectre_v2_user == SPECTRE_V2_USER_NONE) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM; return -EPERM;
if (spectre_v2_user == SPECTRE_V2_USER_STRICT || if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0; return 0;
task_set_spec_ib_disable(task); task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE) if (ctrl == PR_SPEC_FORCE_DISABLE)
...@@ -1309,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) ...@@ -1309,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{ {
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
} }
#endif #endif
...@@ -1340,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task) ...@@ -1340,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED; return PR_SPEC_NOT_AFFECTED;
switch (spectre_v2_user) { if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
case SPECTRE_V2_USER_NONE: spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE; return PR_SPEC_ENABLE;
case SPECTRE_V2_USER_PRCTL: else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
case SPECTRE_V2_USER_SECCOMP: spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return PR_SPEC_DISABLE;
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task)) if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task)) if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case SPECTRE_V2_USER_STRICT: } else
case SPECTRE_V2_USER_STRICT_PREFERRED:
return PR_SPEC_DISABLE;
default:
return PR_SPEC_NOT_AFFECTED; return PR_SPEC_NOT_AFFECTED;
}
} }
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
...@@ -1594,7 +1610,7 @@ static char *stibp_state(void) ...@@ -1594,7 +1610,7 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return ""; return "";
switch (spectre_v2_user) { switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE: case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled"; return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT:
......
...@@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn) ...@@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn)
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
{} {}
}; };
......
...@@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, ...@@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
/* /* Handle change of TIF_SSBD depending on the mitigation method. */
* If TIF_SSBD is different, select the proper mitigation if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
* method. Note that if SSBD mitigation is disabled or permanentely if (tif_diff & _TIF_SSBD)
* enabled this branch can't be taken because nothing can set
* TIF_SSBD.
*/
if (tif_diff & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
amd_set_ssb_virt_state(tifn); amd_set_ssb_virt_state(tifn);
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn); amd_set_core_ssb_state(tifn);
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) { static_cpu_has(X86_FEATURE_AMD_SSBD)) {
msr |= ssbd_tif_to_spec_ctrl(tifn); updmsr |= !!(tif_diff & _TIF_SSBD);
updmsr = true; msr |= ssbd_tif_to_spec_ctrl(tifn);
}
} }
/* /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
* otherwise avoid the MSR write.
*/
if (IS_ENABLED(CONFIG_SMP) && if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) { static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB); updmsr |= !!(tif_diff & _TIF_SPEC_IB);
......
...@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { ...@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
}, },
}, },
{ /* Handle problems with rebooting on Apple MacBook6,1 */
.callback = set_pci_reboot,
.ident = "Apple MacBook6,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
},
},
{ /* Handle problems with rebooting on Apple MacBookPro5 */ { /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot, .callback = set_pci_reboot,
.ident = "Apple MacBookPro5", .ident = "Apple MacBookPro5",
......
...@@ -25,10 +25,6 @@ ...@@ -25,10 +25,6 @@
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_X86_64
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
......
...@@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) ...@@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
OUTPUT_ARCH(i386) OUTPUT_ARCH(i386)
ENTRY(phys_startup_32) ENTRY(phys_startup_32)
jiffies = jiffies_64;
#else #else
OUTPUT_ARCH(i386:x86-64) OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64) ENTRY(phys_startup_64)
jiffies_64 = jiffies;
#endif #endif
jiffies = jiffies_64;
#if defined(CONFIG_X86_64) #if defined(CONFIG_X86_64)
/* /*
* On 64-bit, align RODATA to 2MB so we retain large page mappings for * On 64-bit, align RODATA to 2MB so we retain large page mappings for
......
...@@ -928,14 +928,12 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) ...@@ -928,14 +928,12 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_arch_init(cs); clocksource_arch_init(cs);
#ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE
if (cs->vdso_clock_mode < 0 || if (cs->vdso_clock_mode < 0 ||
cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
cs->name, cs->vdso_clock_mode); cs->name, cs->vdso_clock_mode);
cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
} }
#endif
/* Initialize mult/shift and max_idle_ns */ /* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq); __clocksource_update_freq_scale(cs, scale, freq);
......
...@@ -38,6 +38,13 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd) ...@@ -38,6 +38,13 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
} }
#endif #endif
#ifndef vdso_cycles_ok
static inline bool vdso_cycles_ok(u64 cycles)
{
return true;
}
#endif
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
...@@ -62,6 +69,8 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, ...@@ -62,6 +69,8 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
return -1; return -1;
cycles = __arch_get_hw_counter(vd->clock_mode); cycles = __arch_get_hw_counter(vd->clock_mode);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
ns = vdso_ts->nsec; ns = vdso_ts->nsec;
last = vd->cycle_last; last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
...@@ -130,6 +139,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, ...@@ -130,6 +139,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
return -1; return -1;
cycles = __arch_get_hw_counter(vd->clock_mode); cycles = __arch_get_hw_counter(vd->clock_mode);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
ns = vdso_ts->nsec; ns = vdso_ts->nsec;
last = vd->cycle_last; last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment