Commit 14dcf78a authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/cpus_have_const_cap' into for-next/core

* for-next/cpus_have_const_cap: (38 commits)
  : cpus_have_const_cap() removal
  arm64: Remove cpus_have_const_cap()
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_REPEAT_TLBI
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_NVIDIA_CARMEL_CNP
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_CAVIUM_23154
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_2645198
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1742098
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1542419
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_843419
  arm64: Avoid cpus_have_const_cap() for ARM64_UNMAP_KERNEL_AT_EL0
  arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64}
  arm64: Avoid cpus_have_const_cap() for ARM64_SPECTRE_V2
  arm64: Avoid cpus_have_const_cap() for ARM64_SSBS
  arm64: Avoid cpus_have_const_cap() for ARM64_MTE
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_TLB_RANGE
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_WFXT
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_RNG
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_EPAN
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_GIC_PRIO_MASKING
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_DIT
  ...
parents 2baca17e e8d4006d
...@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu) ...@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
BUG_ON(err); BUG_ON(err);
per_cpu(xen_vcpu, cpu) = vcpup; per_cpu(xen_vcpu, cpu) = vcpup;
if (!xen_kernel_unmapped_at_usr())
xen_setup_runstate_info(cpu);
after_register_vcpu_info: after_register_vcpu_info:
enable_percpu_irq(xen_events_irq, 0); enable_percpu_irq(xen_events_irq, 0);
return 0; return 0;
...@@ -523,9 +520,6 @@ static int __init xen_guest_init(void) ...@@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
return -EINVAL; return -EINVAL;
} }
if (!xen_kernel_unmapped_at_usr())
xen_time_setup_guest();
if (xen_initial_domain()) if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
...@@ -535,7 +529,13 @@ static int __init xen_guest_init(void) ...@@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
} }
early_initcall(xen_guest_init); early_initcall(xen_guest_init);
static int __init xen_pm_init(void) static int xen_starting_runstate_cpu(unsigned int cpu)
{
xen_setup_runstate_info(cpu);
return 0;
}
static int __init xen_late_init(void)
{ {
if (!xen_domain()) if (!xen_domain())
return -ENODEV; return -ENODEV;
...@@ -548,9 +548,16 @@ static int __init xen_pm_init(void) ...@@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
do_settimeofday64(&ts); do_settimeofday64(&ts);
} }
if (xen_kernel_unmapped_at_usr())
return 0; return 0;
xen_time_setup_guest();
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
"arm/xen_runstate:starting",
xen_starting_runstate_cpu, NULL);
} }
late_initcall(xen_pm_init); late_initcall(xen_late_init);
/* empty stubs */ /* empty stubs */
......
...@@ -6,5 +6,5 @@ generic-y += qspinlock.h ...@@ -6,5 +6,5 @@ generic-y += qspinlock.h
generic-y += parport.h generic-y += parport.h
generic-y += user.h generic-y += user.h
generated-y += cpucaps.h generated-y += cpucap-defs.h
generated-y += sysreg-defs.h generated-y += sysreg-defs.h
...@@ -226,8 +226,8 @@ alternative_endif ...@@ -226,8 +226,8 @@ alternative_endif
static __always_inline bool static __always_inline bool
alternative_has_cap_likely(const unsigned long cpucap) alternative_has_cap_likely(const unsigned long cpucap)
{ {
compiletime_assert(cpucap < ARM64_NCAPS, if (!cpucap_is_possible(cpucap))
"cpucap must be < ARM64_NCAPS"); return false;
asm_volatile_goto( asm_volatile_goto(
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops) ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
...@@ -244,8 +244,8 @@ alternative_has_cap_likely(const unsigned long cpucap) ...@@ -244,8 +244,8 @@ alternative_has_cap_likely(const unsigned long cpucap)
static __always_inline bool static __always_inline bool
alternative_has_cap_unlikely(const unsigned long cpucap) alternative_has_cap_unlikely(const unsigned long cpucap)
{ {
compiletime_assert(cpucap < ARM64_NCAPS, if (!cpucap_is_possible(cpucap))
"cpucap must be < ARM64_NCAPS"); return false;
asm_volatile_goto( asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap]) ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
......
...@@ -79,6 +79,14 @@ static inline u64 gic_read_iar_cavium_thunderx(void) ...@@ -79,6 +79,14 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
return 0x3ff; return 0x3ff;
} }
static u64 __maybe_unused gic_read_iar(void)
{
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
}
static inline void gic_write_ctlr(u32 val) static inline void gic_write_ctlr(u32 val)
{ {
write_sysreg_s(val, SYS_ICC_CTLR_EL1); write_sysreg_s(val, SYS_ICC_CTLR_EL1);
......
...@@ -63,7 +63,7 @@ static __always_inline bool __cpu_has_rng(void) ...@@ -63,7 +63,7 @@ static __always_inline bool __cpu_has_rng(void)
{ {
if (unlikely(!system_capabilities_finalized() && !preemptible())) if (unlikely(!system_capabilities_finalized() && !preemptible()))
return this_cpu_has_cap(ARM64_HAS_RNG); return this_cpu_has_cap(ARM64_HAS_RNG);
return cpus_have_const_cap(ARM64_HAS_RNG); return alternative_has_cap_unlikely(ARM64_HAS_RNG);
} }
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
......
...@@ -132,7 +132,7 @@ void flush_dcache_folio(struct folio *); ...@@ -132,7 +132,7 @@ void flush_dcache_folio(struct folio *);
static __always_inline void icache_inval_all_pou(void) static __always_inline void icache_inval_all_pou(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) if (alternative_has_cap_unlikely(ARM64_HAS_CACHE_DIC))
return; return;
asm("ic ialluis"); asm("ic ialluis");
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CPUCAPS_H
#define __ASM_CPUCAPS_H
#include <asm/cpucap-defs.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* Check whether a cpucap is possible at compiletime.
*/
static __always_inline bool
cpucap_is_possible(const unsigned int cap)
{
compiletime_assert(__builtin_constant_p(cap),
"cap must be a constant");
compiletime_assert(cap < ARM64_NCAPS,
"cap must be < ARM64_NCAPS");
switch (cap) {
case ARM64_HAS_PAN:
return IS_ENABLED(CONFIG_ARM64_PAN);
case ARM64_HAS_EPAN:
return IS_ENABLED(CONFIG_ARM64_EPAN);
case ARM64_SVE:
return IS_ENABLED(CONFIG_ARM64_SVE);
case ARM64_SME:
case ARM64_SME2:
case ARM64_SME_FA64:
return IS_ENABLED(CONFIG_ARM64_SME);
case ARM64_HAS_CNP:
return IS_ENABLED(CONFIG_ARM64_CNP);
case ARM64_HAS_ADDRESS_AUTH:
case ARM64_HAS_GENERIC_AUTH:
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
case ARM64_HAS_GIC_PRIO_MASKING:
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
case ARM64_MTE:
return IS_ENABLED(CONFIG_ARM64_MTE);
case ARM64_BTI:
return IS_ENABLED(CONFIG_ARM64_BTI);
case ARM64_HAS_TLB_RANGE:
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
case ARM64_UNMAP_KERNEL_AT_EL0:
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
case ARM64_WORKAROUND_843419:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419);
case ARM64_WORKAROUND_1742098:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_1742098);
case ARM64_WORKAROUND_2645198:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198);
case ARM64_WORKAROUND_2658417:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
case ARM64_WORKAROUND_CAVIUM_23154:
return IS_ENABLED(CONFIG_CAVIUM_ERRATUM_23154);
case ARM64_WORKAROUND_NVIDIA_CARMEL_CNP:
return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
case ARM64_WORKAROUND_REPEAT_TLBI:
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
}
return true;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CPUCAPS_H */
...@@ -440,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void); ...@@ -440,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void);
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
static __always_inline bool boot_capabilities_finalized(void)
{
return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
}
static __always_inline bool system_capabilities_finalized(void) static __always_inline bool system_capabilities_finalized(void)
{ {
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM); return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
...@@ -452,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void) ...@@ -452,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void)
*/ */
static __always_inline bool cpus_have_cap(unsigned int num) static __always_inline bool cpus_have_cap(unsigned int num)
{ {
if (__builtin_constant_p(num) && !cpucap_is_possible(num))
return false;
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
return arch_test_bit(num, system_cpucaps); return arch_test_bit(num, system_cpucaps);
...@@ -460,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num) ...@@ -460,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num)
/* /*
* Test for a capability without a runtime check. * Test for a capability without a runtime check.
* *
* Before capabilities are finalized, this returns false. * Before boot capabilities are finalized, this will BUG().
* After capabilities are finalized, this is patched to avoid a runtime check. * After boot capabilities are finalized, this is patched to avoid a runtime
* check.
* *
* @num must be a compile-time constant. * @num must be a compile-time constant.
*/ */
static __always_inline bool __cpus_have_const_cap(int num) static __always_inline bool cpus_have_final_boot_cap(int num)
{ {
if (num >= ARM64_NCAPS) if (boot_capabilities_finalized())
return false;
return alternative_has_cap_unlikely(num); return alternative_has_cap_unlikely(num);
else
BUG();
} }
/* /*
* Test for a capability without a runtime check. * Test for a capability without a runtime check.
* *
* Before capabilities are finalized, this will BUG(). * Before system capabilities are finalized, this will BUG().
* After capabilities are finalized, this is patched to avoid a runtime check. * After system capabilities are finalized, this is patched to avoid a runtime
* check.
* *
* @num must be a compile-time constant. * @num must be a compile-time constant.
*/ */
static __always_inline bool cpus_have_final_cap(int num) static __always_inline bool cpus_have_final_cap(int num)
{ {
if (system_capabilities_finalized()) if (system_capabilities_finalized())
return __cpus_have_const_cap(num); return alternative_has_cap_unlikely(num);
else else
BUG(); BUG();
} }
/*
* Test for a capability, possibly with a runtime check for non-hyp code.
*
* For hyp code, this behaves the same as cpus_have_final_cap().
*
* For non-hyp code:
* Before capabilities are finalized, this behaves as cpus_have_cap().
* After capabilities are finalized, this is patched to avoid a runtime check.
*
* @num must be a compile-time constant.
*/
static __always_inline bool cpus_have_const_cap(int num)
{
if (is_hyp_code())
return cpus_have_final_cap(num);
else if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);
}
static inline int __attribute_const__ static inline int __attribute_const__
cpuid_feature_extract_signed_field_width(u64 features, int field, int width) cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
{ {
...@@ -628,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1) ...@@ -628,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
return val >= ID_AA64PFR1_EL1_MTE_MTE2; return val >= ID_AA64PFR1_EL1_MTE_MTE2;
} }
void __init setup_cpu_features(void); void __init setup_system_features(void);
void __init setup_user_features(void);
void check_local_cpu_capabilities(void); void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id); u64 read_sanitised_ftr_reg(u32 id);
...@@ -737,13 +728,12 @@ static inline bool system_supports_mixed_endian(void) ...@@ -737,13 +728,12 @@ static inline bool system_supports_mixed_endian(void)
static __always_inline bool system_supports_fpsimd(void) static __always_inline bool system_supports_fpsimd(void)
{ {
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
} }
static inline bool system_uses_hw_pan(void) static inline bool system_uses_hw_pan(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PAN) && return alternative_has_cap_unlikely(ARM64_HAS_PAN);
cpus_have_const_cap(ARM64_HAS_PAN);
} }
static inline bool system_uses_ttbr0_pan(void) static inline bool system_uses_ttbr0_pan(void)
...@@ -754,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void) ...@@ -754,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void)
static __always_inline bool system_supports_sve(void) static __always_inline bool system_supports_sve(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SVE) && return alternative_has_cap_unlikely(ARM64_SVE);
cpus_have_const_cap(ARM64_SVE);
} }
static __always_inline bool system_supports_sme(void) static __always_inline bool system_supports_sme(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SME) && return alternative_has_cap_unlikely(ARM64_SME);
cpus_have_const_cap(ARM64_SME);
} }
static __always_inline bool system_supports_sme2(void) static __always_inline bool system_supports_sme2(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SME) && return alternative_has_cap_unlikely(ARM64_SME2);
cpus_have_const_cap(ARM64_SME2);
} }
static __always_inline bool system_supports_fa64(void) static __always_inline bool system_supports_fa64(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SME) && return alternative_has_cap_unlikely(ARM64_SME_FA64);
cpus_have_const_cap(ARM64_SME_FA64);
} }
static __always_inline bool system_supports_tpidr2(void) static __always_inline bool system_supports_tpidr2(void)
...@@ -783,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void) ...@@ -783,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void)
static __always_inline bool system_supports_cnp(void) static __always_inline bool system_supports_cnp(void)
{ {
return IS_ENABLED(CONFIG_ARM64_CNP) && return alternative_has_cap_unlikely(ARM64_HAS_CNP);
cpus_have_const_cap(ARM64_HAS_CNP);
} }
static inline bool system_supports_address_auth(void) static inline bool system_supports_address_auth(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
} }
static inline bool system_supports_generic_auth(void) static inline bool system_supports_generic_auth(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
} }
static inline bool system_has_full_ptr_auth(void) static inline bool system_has_full_ptr_auth(void)
...@@ -806,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void) ...@@ -806,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void)
static __always_inline bool system_uses_irq_prio_masking(void) static __always_inline bool system_uses_irq_prio_masking(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
} }
static inline bool system_supports_mte(void) static inline bool system_supports_mte(void)
{ {
return IS_ENABLED(CONFIG_ARM64_MTE) && return alternative_has_cap_unlikely(ARM64_MTE);
cpus_have_const_cap(ARM64_MTE);
} }
static inline bool system_has_prio_mask_debugging(void) static inline bool system_has_prio_mask_debugging(void)
...@@ -824,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void) ...@@ -824,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void)
static inline bool system_supports_bti(void) static inline bool system_supports_bti(void)
{ {
return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI); return cpus_have_final_cap(ARM64_BTI);
}
static inline bool system_supports_bti_kernel(void)
{
return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
cpus_have_final_boot_cap(ARM64_BTI);
} }
static inline bool system_supports_tlb_range(void) static inline bool system_supports_tlb_range(void)
{ {
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) && return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
} }
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
......
...@@ -32,6 +32,32 @@ ...@@ -32,6 +32,32 @@
#define VFP_STATE_SIZE ((32 * 8) + 4) #define VFP_STATE_SIZE ((32 * 8) + 4)
#endif #endif
static inline unsigned long cpacr_save_enable_kernel_sve(void)
{
unsigned long old = read_sysreg(cpacr_el1);
unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN;
write_sysreg(old | set, cpacr_el1);
isb();
return old;
}
static inline unsigned long cpacr_save_enable_kernel_sme(void)
{
unsigned long old = read_sysreg(cpacr_el1);
unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_SMEN_EL1EN;
write_sysreg(old | set, cpacr_el1);
isb();
return old;
}
static inline void cpacr_restore(unsigned long cpacr)
{
write_sysreg(cpacr, cpacr_el1);
isb();
}
/* /*
* When we defined the maximum SVE vector length we defined the ABI so * When we defined the maximum SVE vector length we defined the ABI so
* that the maximum vector length included all the reserved for future * that the maximum vector length included all the reserved for future
...@@ -123,10 +149,11 @@ extern void sme_save_state(void *state, int zt); ...@@ -123,10 +149,11 @@ extern void sme_save_state(void *state, int zt);
extern void sme_load_state(void const *state, int zt); extern void sme_load_state(void const *state, int zt);
struct arm64_cpu_capabilities; struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
extern u64 read_smcr_features(void); extern u64 read_smcr_features(void);
......
...@@ -21,12 +21,6 @@ ...@@ -21,12 +21,6 @@
* exceptions should be unmasked. * exceptions should be unmasked.
*/ */
static __always_inline bool __irqflags_uses_pmr(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
}
static __always_inline void __daif_local_irq_enable(void) static __always_inline void __daif_local_irq_enable(void)
{ {
barrier(); barrier();
...@@ -49,7 +43,7 @@ static __always_inline void __pmr_local_irq_enable(void) ...@@ -49,7 +43,7 @@ static __always_inline void __pmr_local_irq_enable(void)
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
__pmr_local_irq_enable(); __pmr_local_irq_enable();
} else { } else {
__daif_local_irq_enable(); __daif_local_irq_enable();
...@@ -77,7 +71,7 @@ static __always_inline void __pmr_local_irq_disable(void) ...@@ -77,7 +71,7 @@ static __always_inline void __pmr_local_irq_disable(void)
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
__pmr_local_irq_disable(); __pmr_local_irq_disable();
} else { } else {
__daif_local_irq_disable(); __daif_local_irq_disable();
...@@ -99,7 +93,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void) ...@@ -99,7 +93,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
*/ */
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
return __pmr_local_save_flags(); return __pmr_local_save_flags();
} else { } else {
return __daif_local_save_flags(); return __daif_local_save_flags();
...@@ -118,7 +112,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) ...@@ -118,7 +112,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled_flags(flags); return __pmr_irqs_disabled_flags(flags);
} else { } else {
return __daif_irqs_disabled_flags(flags); return __daif_irqs_disabled_flags(flags);
...@@ -137,7 +131,7 @@ static __always_inline bool __pmr_irqs_disabled(void) ...@@ -137,7 +131,7 @@ static __always_inline bool __pmr_irqs_disabled(void)
static inline bool arch_irqs_disabled(void) static inline bool arch_irqs_disabled(void)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled(); return __pmr_irqs_disabled();
} else { } else {
return __daif_irqs_disabled(); return __daif_irqs_disabled();
...@@ -169,7 +163,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void) ...@@ -169,7 +163,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void)
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
return __pmr_local_irq_save(); return __pmr_local_irq_save();
} else { } else {
return __daif_local_irq_save(); return __daif_local_irq_save();
...@@ -196,7 +190,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags) ...@@ -196,7 +190,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags)
*/ */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
if (__irqflags_uses_pmr()) { if (system_uses_irq_prio_masking()) {
__pmr_local_irq_restore(flags); __pmr_local_irq_restore(flags);
} else { } else {
__daif_local_irq_restore(flags); __daif_local_irq_restore(flags);
......
...@@ -71,14 +71,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -71,14 +71,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (has_vhe() || has_hvhe()) if (has_vhe() || has_hvhe())
vcpu->arch.hcr_el2 |= HCR_E2H; vcpu->arch.hcr_el2 |= HCR_E2H;
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
/* route synchronous external abort exceptions to EL2 */ /* route synchronous external abort exceptions to EL2 */
vcpu->arch.hcr_el2 |= HCR_TEA; vcpu->arch.hcr_el2 |= HCR_TEA;
/* trap error record accesses */ /* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR; vcpu->arch.hcr_el2 |= HCR_TERR;
} }
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
vcpu->arch.hcr_el2 |= HCR_FWB; vcpu->arch.hcr_el2 |= HCR_FWB;
} else { } else {
/* /*
......
...@@ -1052,7 +1052,7 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) ...@@ -1052,7 +1052,7 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
static inline bool kvm_system_needs_idmapped_vectors(void) static inline bool kvm_system_needs_idmapped_vectors(void)
{ {
return cpus_have_const_cap(ARM64_SPECTRE_V3A); return cpus_have_final_cap(ARM64_SPECTRE_V3A);
} }
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
......
...@@ -218,7 +218,7 @@ static inline void __clean_dcache_guest_page(void *va, size_t size) ...@@ -218,7 +218,7 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
* faulting in pages. Furthermore, FWB implies IDC, so cleaning to * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
* PoU is not required either in this case. * PoU is not required either in this case.
*/ */
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return; return;
kvm_flush_dcache_to_poc(va, size); kvm_flush_dcache_to_poc(va, size);
......
...@@ -57,7 +57,7 @@ typedef struct { ...@@ -57,7 +57,7 @@ typedef struct {
static inline bool arm64_kernel_unmapped_at_el0(void) static inline bool arm64_kernel_unmapped_at_el0(void)
{ {
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
} }
extern void arm64_memblock_init(void); extern void arm64_memblock_init(void);
......
...@@ -152,7 +152,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) ...@@ -152,7 +152,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated. * avoiding the possibility of conflicting TLB entries being allocated.
*/ */
static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) static inline void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
{ {
typedef void (ttbr_replace_func)(phys_addr_t); typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1; extern ttbr_replace_func idmap_cpu_replace_ttbr1;
...@@ -162,17 +162,8 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) ...@@ -162,17 +162,8 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) { if (cnp)
/*
* cpu_replace_ttbr1() is used when there's a boot CPU
* up (i.e. cpufeature framework is not up yet) and
* latter only when we enable CNP via cpufeature's
* enable() callback.
* Also we rely on the system_cpucaps bit being set before
* calling the enable() function.
*/
ttbr1 |= TTBR_CNP_BIT; ttbr1 |= TTBR_CNP_BIT;
}
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
...@@ -189,6 +180,21 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) ...@@ -189,6 +180,21 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
cpu_uninstall_idmap(); cpu_uninstall_idmap();
} }
static inline void cpu_enable_swapper_cnp(void)
{
__cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir, true);
}
static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
{
/*
* Only for early TTBR1 replacement before cpucaps are finalized and
* before we've decided whether to use CNP.
*/
WARN_ON(system_capabilities_finalized());
__cpu_replace_ttbr1(pgdp, idmap, false);
}
/* /*
* It would be nice to return ASIDs back to the allocator, but unfortunately * It would be nice to return ASIDs back to the allocator, but unfortunately
* that introduces a race with a generation rollover where we could erroneously * that introduces a race with a generation rollover where we could erroneously
......
...@@ -44,8 +44,7 @@ struct plt_entry { ...@@ -44,8 +44,7 @@ struct plt_entry {
static inline bool is_forbidden_offset_for_adrp(void *place) static inline bool is_forbidden_offset_for_adrp(void *place)
{ {
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && return cpus_have_final_cap(ARM64_WORKAROUND_843419) &&
cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
((u64)place & 0xfff) >= 0xff8; ((u64)place & 0xfff) >= 0xff8;
} }
......
...@@ -75,11 +75,7 @@ extern bool arm64_use_ng_mappings; ...@@ -75,11 +75,7 @@ extern bool arm64_use_ng_mappings;
* If we have userspace only BTI we don't want to mark kernel pages * If we have userspace only BTI we don't want to mark kernel pages
* guarded even if the system does support BTI. * guarded even if the system does support BTI.
*/ */
#ifdef CONFIG_ARM64_BTI_KERNEL #define PTE_MAYBE_GP (system_supports_bti_kernel() ? PTE_GP : 0)
#define PTE_MAYBE_GP (system_supports_bti() ? PTE_GP : 0)
#else
#define PTE_MAYBE_GP 0
#endif
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
......
...@@ -73,7 +73,7 @@ static __always_inline void arm64_apply_bp_hardening(void) ...@@ -73,7 +73,7 @@ static __always_inline void arm64_apply_bp_hardening(void)
{ {
struct bp_hardening_data *d; struct bp_hardening_data *d;
if (!cpus_have_const_cap(ARM64_SPECTRE_V2)) if (!alternative_has_cap_unlikely(ARM64_SPECTRE_V2))
return; return;
d = this_cpu_ptr(&bp_hardening_data); d = this_cpu_ptr(&bp_hardening_data);
......
...@@ -105,7 +105,7 @@ static inline unsigned long get_trans_granule(void) ...@@ -105,7 +105,7 @@ static inline unsigned long get_trans_granule(void)
#define __tlbi_level(op, addr, level) do { \ #define __tlbi_level(op, addr, level) do { \
u64 arg = addr; \ u64 arg = addr; \
\ \
if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
level) { \ level) { \
u64 ttl = level & 3; \ u64 ttl = level & 3; \
ttl |= get_trans_granule() << 2; \ ttl |= get_trans_granule() << 2; \
...@@ -284,16 +284,15 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ...@@ -284,16 +284,15 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{ {
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
/* /*
* TLB flush deferral is not required on systems which are affected by * TLB flush deferral is not required on systems which are affected by
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
* will have two consecutive TLBI instructions with a dsb(ish) in between * will have two consecutive TLBI instructions with a dsb(ish) in between
* defeating the purpose (i.e save overall 'dsb ish' cost). * defeating the purpose (i.e save overall 'dsb ish' cost).
*/ */
if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI))) if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
return false; return false;
#endif
return true; return true;
} }
......
...@@ -62,7 +62,7 @@ DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); ...@@ -62,7 +62,7 @@ DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
static inline const char * static inline const char *
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
{ {
if (arm64_kernel_unmapped_at_el0()) if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return (char *)(TRAMP_VALIAS + SZ_2K * slot); return (char *)(TRAMP_VALIAS + SZ_2K * slot);
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
......
...@@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) ...@@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
} }
static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
static void __maybe_unused
cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
{
struct arm64_ftr_reg *regp;
regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
if (!regp)
return;
raw_spin_lock(&reg_user_mask_modification);
if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
raw_spin_unlock(&reg_user_mask_modification);
}
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \ .matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
...@@ -727,7 +711,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -727,7 +711,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A510 r0p0 - r1p1 */ /* Cortex-A510 r0p0 - r1p1 */
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
.cpu_enable = cpu_clear_bf16_from_user_emulation,
}, },
#endif #endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
......
This diff is collapsed.
...@@ -113,8 +113,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) ...@@ -113,8 +113,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
if (md->attribute & EFI_MEMORY_XP) if (md->attribute & EFI_MEMORY_XP)
pte = set_pte_bit(pte, __pgprot(PTE_PXN)); pte = set_pte_bit(pte, __pgprot(PTE_PXN));
else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && else if (system_supports_bti_kernel() && spd->has_bti)
system_supports_bti() && spd->has_bti)
pte = set_pte_bit(pte, __pgprot(PTE_GP)); pte = set_pte_bit(pte, __pgprot(PTE_GP));
set_pte(ptep, pte); set_pte(ptep, pte);
return 0; return 0;
......
...@@ -1160,11 +1160,7 @@ static void __init sve_efi_setup(void) ...@@ -1160,11 +1160,7 @@ static void __init sve_efi_setup(void)
panic("Cannot allocate percpu memory for EFI SVE save/restore"); panic("Cannot allocate percpu memory for EFI SVE save/restore");
} }
/* void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
* Enable SVE for EL1.
* Intended for use by the cpufeatures code during CPU boot.
*/
void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
{ {
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb(); isb();
...@@ -1177,7 +1173,7 @@ void __init sve_setup(void) ...@@ -1177,7 +1173,7 @@ void __init sve_setup(void)
unsigned long b; unsigned long b;
int max_bit; int max_bit;
if (!system_supports_sve()) if (!cpus_have_cap(ARM64_SVE))
return; return;
/* /*
...@@ -1267,7 +1263,7 @@ static void sme_free(struct task_struct *task) ...@@ -1267,7 +1263,7 @@ static void sme_free(struct task_struct *task)
task->thread.sme_state = NULL; task->thread.sme_state = NULL;
} }
void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
{ {
/* Set priority for all PEs to architecturally defined minimum */ /* Set priority for all PEs to architecturally defined minimum */
write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK, write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
...@@ -1282,23 +1278,21 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) ...@@ -1282,23 +1278,21 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb(); isb();
} }
/* void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
* This must be called after sme_kernel_enable(), we rely on the
* feature table being sorted to ensure this.
*/
void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
{ {
/* This must be enabled after SME */
BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
/* Allow use of ZT0 */ /* Allow use of ZT0 */
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
SYS_SMCR_EL1); SYS_SMCR_EL1);
} }
/* void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
* This must be called after sme_kernel_enable(), we rely on the
* feature table being sorted to ensure this.
*/
void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
{ {
/* This must be enabled after SME */
BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
/* Allow use of FA64 */ /* Allow use of FA64 */
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK, write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
SYS_SMCR_EL1); SYS_SMCR_EL1);
...@@ -1309,7 +1303,7 @@ void __init sme_setup(void) ...@@ -1309,7 +1303,7 @@ void __init sme_setup(void)
struct vl_info *info = &vl_info[ARM64_VEC_SME]; struct vl_info *info = &vl_info[ARM64_VEC_SME];
int min_bit, max_bit; int min_bit, max_bit;
if (!system_supports_sme()) if (!cpus_have_cap(ARM64_SME))
return; return;
/* /*
...@@ -1470,8 +1464,17 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) ...@@ -1470,8 +1464,17 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
*/ */
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
{ {
/* TODO: implement lazy context saving/restoring */ /* Even if we chose not to use FPSIMD, the hardware could still trap: */
WARN_ON(1); if (!system_supports_fpsimd()) {
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
/*
* When FPSIMD is enabled, we should never take a trap unless something
* has gone very wrong.
*/
BUG();
} }
/* /*
...@@ -1712,13 +1715,23 @@ void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state) ...@@ -1712,13 +1715,23 @@ void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
void fpsimd_restore_current_state(void) void fpsimd_restore_current_state(void)
{ {
/* /*
* For the tasks that were created before we detected the absence of * TIF_FOREIGN_FPSTATE is set on the init task and copied by
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), * arch_dup_task_struct() regardless of whether FP/SIMD is detected.
* e.g, init. This could be then inherited by the children processes. * Thus user threads can have this set even when FP/SIMD hasn't been
* If we later detect that the system doesn't support FP/SIMD, * detected.
* we must clear the flag for all the tasks to indicate that the *
* FPSTATE is clean (as we can't have one) to avoid looping for ever in * When FP/SIMD is detected, begin_new_exec() will set
* do_notify_resume(). * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
* and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
* switching tasks. We detect FP/SIMD before we exec the first user
* process, ensuring this has TIF_FOREIGN_FPSTATE set and
* do_notify_resume() will call fpsimd_restore_current_state() to
* install the user FP/SIMD context.
*
* When FP/SIMD is not detected, nothing else will clear or set
* TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
* we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
* looping forever calling fpsimd_restore_current_state().
*/ */
if (!system_supports_fpsimd()) { if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE); clear_thread_flag(TIF_FOREIGN_FPSTATE);
...@@ -2051,6 +2064,13 @@ static inline void fpsimd_hotplug_init(void) ...@@ -2051,6 +2064,13 @@ static inline void fpsimd_hotplug_init(void)
static inline void fpsimd_hotplug_init(void) { } static inline void fpsimd_hotplug_init(void) { }
#endif #endif
void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p)
{
unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN;
write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1);
isb();
}
/* /*
* FP/SIMD support code initialisation. * FP/SIMD support code initialisation.
*/ */
......
...@@ -200,8 +200,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, ...@@ -200,8 +200,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
break; break;
case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21_NC:
case R_AARCH64_ADR_PREL_PG_HI21: case R_AARCH64_ADR_PREL_PG_HI21:
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || if (!cpus_have_final_cap(ARM64_WORKAROUND_843419))
!cpus_have_const_cap(ARM64_WORKAROUND_843419))
break; break;
/* /*
...@@ -236,13 +235,13 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, ...@@ -236,13 +235,13 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
} }
} }
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) {
cpus_have_const_cap(ARM64_WORKAROUND_843419))
/* /*
* Add some slack so we can skip PLT slots that may trigger * Add some slack so we can skip PLT slots that may trigger
* the erratum due to the placement of the ADRP instruction. * the erratum due to the placement of the ADRP instruction.
*/ */
ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry))); ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
}
return ret; return ret;
} }
......
...@@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next) ...@@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to * If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field. * context-switch the PSTATE field.
*/ */
if (cpus_have_const_cap(ARM64_SSBS)) if (alternative_has_cap_unlikely(ARM64_SSBS))
return; return;
spectre_v4_enable_task_mitigation(next); spectre_v4_enable_task_mitigation(next);
......
...@@ -972,7 +972,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) ...@@ -972,7 +972,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
* When KPTI is in use, the vectors are switched when exiting to * When KPTI is in use, the vectors are switched when exiting to
* user-space. * user-space.
*/ */
if (arm64_kernel_unmapped_at_el0()) if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return; return;
write_sysreg(v, vbar_el1); write_sysreg(v, vbar_el1);
......
...@@ -439,9 +439,10 @@ static void __init hyp_mode_check(void) ...@@ -439,9 +439,10 @@ static void __init hyp_mode_check(void)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
setup_cpu_features(); setup_system_features();
hyp_mode_check(); hyp_mode_check();
apply_alternatives_all(); apply_alternatives_all();
setup_user_features();
mark_linear_text_alias_ro(); mark_linear_text_alias_ro();
} }
......
...@@ -55,13 +55,13 @@ void notrace __cpu_suspend_exit(void) ...@@ -55,13 +55,13 @@ void notrace __cpu_suspend_exit(void)
/* Restore CnP bit in TTBR1_EL1 */ /* Restore CnP bit in TTBR1_EL1 */
if (system_supports_cnp()) if (system_supports_cnp())
cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); cpu_enable_swapper_cnp();
/* /*
* PSTATE was not saved over suspend/resume, re-enable any detected * PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly. * features that might not have been set correctly.
*/ */
if (cpus_have_const_cap(ARM64_HAS_DIT)) if (alternative_has_cap_unlikely(ARM64_HAS_DIT))
set_pstate_dit(1); set_pstate_dit(1);
__uaccess_enable_hw_pan(); __uaccess_enable_hw_pan();
...@@ -98,6 +98,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -98,6 +98,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct sleep_stack_data state; struct sleep_stack_data state;
struct arm_cpuidle_irq_context context; struct arm_cpuidle_irq_context context;
/*
* Some portions of CPU state (e.g. PSTATE.{PAN,DIT}) are initialized
* before alternatives are patched, but are only restored by
* __cpu_suspend_exit() after alternatives are patched. To avoid
* accidentally losing these bits we must not attempt to suspend until
* after alternatives have been patched.
*/
WARN_ON(!system_capabilities_finalized());
/* Report any MTE async fault before going to suspend */ /* Report any MTE async fault before going to suspend */
mte_suspend_enter(); mte_suspend_enter();
......
...@@ -31,7 +31,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end) ...@@ -31,7 +31,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return 0; return 0;
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
/* /*
* The workaround requires an inner-shareable tlbi. * The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact. * We pick the reserved-ASID to minimise the impact.
......
...@@ -631,7 +631,7 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) ...@@ -631,7 +631,7 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
int rt = ESR_ELx_SYS64_ISS_RT(esr); int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
/* Hide DIC so that we can trap the unnecessary maintenance...*/ /* Hide DIC so that we can trap the unnecessary maintenance...*/
val &= ~BIT(CTR_EL0_DIC_SHIFT); val &= ~BIT(CTR_EL0_DIC_SHIFT);
......
...@@ -212,7 +212,7 @@ static int __setup_additional_pages(enum vdso_abi abi, ...@@ -212,7 +212,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
if (IS_ERR(ret)) if (IS_ERR(ret))
goto up_fail; goto up_fail;
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) if (system_supports_bti_kernel())
gp_flags = VM_ARM64_BTI; gp_flags = VM_ARM64_BTI;
vdso_base += VVAR_NR_PAGES * PAGE_SIZE; vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
......
...@@ -284,7 +284,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -284,7 +284,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_arm_pvtime_supported(); r = kvm_arm_pvtime_supported();
break; break;
case KVM_CAP_ARM_EL1_32BIT: case KVM_CAP_ARM_EL1_32BIT:
r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
break; break;
case KVM_CAP_GUEST_DEBUG_HW_BPS: case KVM_CAP_GUEST_DEBUG_HW_BPS:
r = get_num_brps(); r = get_num_brps();
...@@ -296,7 +296,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -296,7 +296,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = kvm_arm_support_pmu_v3(); r = kvm_arm_support_pmu_v3();
break; break;
case KVM_CAP_ARM_INJECT_SERROR_ESR: case KVM_CAP_ARM_INJECT_SERROR_ESR:
r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
break; break;
case KVM_CAP_ARM_VM_IPA_SIZE: case KVM_CAP_ARM_VM_IPA_SIZE:
r = get_kvm_ipa_limit(); r = get_kvm_ipa_limit();
...@@ -1207,7 +1207,7 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, ...@@ -1207,7 +1207,7 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
return 0; return 0;
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
return -EINVAL; return -EINVAL;
/* MTE is incompatible with AArch32 */ /* MTE is incompatible with AArch32 */
...@@ -1777,7 +1777,7 @@ static void hyp_install_host_vector(void) ...@@ -1777,7 +1777,7 @@ static void hyp_install_host_vector(void)
* Call initialization code, and switch to the full blown HYP code. * Call initialization code, and switch to the full blown HYP code.
* If the cpucaps haven't been finalized yet, something has gone very * If the cpucaps haven't been finalized yet, something has gone very
* wrong, and hyp will crash and burn when it uses any * wrong, and hyp will crash and burn when it uses any
* cpus_have_const_cap() wrapper. * cpus_have_*_cap() wrapper.
*/ */
BUG_ON(!system_capabilities_finalized()); BUG_ON(!system_capabilities_finalized());
params = this_cpu_ptr_nvhe_sym(kvm_init_params); params = this_cpu_ptr_nvhe_sym(kvm_init_params);
...@@ -2310,7 +2310,7 @@ static int __init init_hyp_mode(void) ...@@ -2310,7 +2310,7 @@ static int __init init_hyp_mode(void)
if (is_protected_kvm_enabled()) { if (is_protected_kvm_enabled()) {
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH)) cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
pkvm_hyp_init_ptrauth(); pkvm_hyp_init_ptrauth();
init_cpu_logical_map(); init_cpu_logical_map();
......
...@@ -815,7 +815,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, ...@@ -815,7 +815,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
if (events->exception.serror_pending && events->exception.serror_has_esr) if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu); events->exception.serror_esr = vcpu_get_vsesr(vcpu);
...@@ -837,7 +837,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -837,7 +837,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool ext_dabt_pending = events->exception.ext_dabt_pending; bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr) { if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL; return -EINVAL;
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
......
...@@ -401,7 +401,7 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) ...@@ -401,7 +401,7 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
if (device) if (device)
return -EINVAL; return -EINVAL;
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) if (system_supports_bti_kernel())
attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP; attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
} else { } else {
attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN; attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
...@@ -664,7 +664,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) ...@@ -664,7 +664,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
static bool stage2_has_fwb(struct kvm_pgtable *pgt) static bool stage2_has_fwb(struct kvm_pgtable *pgt)
{ {
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return false; return false;
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
......
...@@ -1578,7 +1578,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1578,7 +1578,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (device) if (device)
prot |= KVM_PGTABLE_PROT_DEVICE; prot |= KVM_PGTABLE_PROT_DEVICE;
else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X; prot |= KVM_PGTABLE_PROT_X;
/* /*
......
...@@ -207,7 +207,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, ...@@ -207,7 +207,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* CPU left in the system, and certainly not from non-secure * CPU left in the system, and certainly not from non-secure
* software). * software).
*/ */
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
kvm_set_way_flush(vcpu); kvm_set_way_flush(vcpu);
return true; return true;
......
...@@ -684,7 +684,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -684,7 +684,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (kvm_vgic_global_state.vcpu_base == 0) if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n"); kvm_info("disabling GICv2 emulation\n");
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
group0_trap = true; group0_trap = true;
group1_trap = true; group1_trap = true;
} }
......
...@@ -27,7 +27,7 @@ void __delay(unsigned long cycles) ...@@ -27,7 +27,7 @@ void __delay(unsigned long cycles)
{ {
cycles_t start = get_cycles(); cycles_t start = get_cycles();
if (cpus_have_const_cap(ARM64_HAS_WFXT)) { if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
u64 end = start + cycles; u64 end = start + cycles;
/* /*
......
...@@ -571,7 +571,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -571,7 +571,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
/* Write implies read */ /* Write implies read */
vm_flags |= VM_WRITE; vm_flags |= VM_WRITE;
/* If EPAN is absent then exec implies read */ /* If EPAN is absent then exec implies read */
if (!cpus_have_const_cap(ARM64_HAS_EPAN)) if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
vm_flags |= VM_EXEC; vm_flags |= VM_EXEC;
} }
......
...@@ -555,8 +555,7 @@ bool __init arch_hugetlb_valid_size(unsigned long size) ...@@ -555,8 +555,7 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{ {
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/* /*
* Break-before-make (BBM) is required for all user space mappings * Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable * when the permission changes from executable to non-executable
......
...@@ -68,7 +68,7 @@ static int __init adjust_protection_map(void) ...@@ -68,7 +68,7 @@ static int __init adjust_protection_map(void)
* With Enhanced PAN we can honour the execute-only permissions as * With Enhanced PAN we can honour the execute-only permissions as
* there is no PAN override with such mappings. * there is no PAN override with such mappings.
*/ */
if (cpus_have_const_cap(ARM64_HAS_EPAN)) { if (cpus_have_cap(ARM64_HAS_EPAN)) {
protection_map[VM_EXEC] = PAGE_EXECONLY; protection_map[VM_EXEC] = PAGE_EXECONLY;
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
} }
......
...@@ -1469,8 +1469,7 @@ early_initcall(prevent_bootmem_remove_init); ...@@ -1469,8 +1469,7 @@ early_initcall(prevent_bootmem_remove_init);
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{ {
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/* /*
* Break-before-make (BBM) is required for all user space mappings * Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable * when the permission changes from executable to non-executable
......
...@@ -405,8 +405,7 @@ SYM_FUNC_START(__cpu_setup) ...@@ -405,8 +405,7 @@ SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB tlbi vmalle1 // Invalidate local TLB
dsb nsh dsb nsh
mov x1, #3 << 20 msr cpacr_el1, xzr // Reset cpacr_el1
msr cpacr_el1, x1 // Enable FP/ASIMD
mov x1, #1 << 12 // Reset mdscr_el1 and disable mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x1 // access to the DCC from EL0 msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now, isb // Unmask debug exceptions now,
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
gen := arch/$(ARCH)/include/generated gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm kapi := $(gen)/asm
kapi-hdrs-y := $(kapi)/cpucaps.h $(kapi)/sysreg-defs.h kapi-hdrs-y := $(kapi)/cpucap-defs.h $(kapi)/sysreg-defs.h
targets += $(addprefix ../../../, $(kapi-hdrs-y)) targets += $(addprefix ../../../, $(kapi-hdrs-y))
...@@ -17,7 +17,7 @@ quiet_cmd_gen_cpucaps = GEN $@ ...@@ -17,7 +17,7 @@ quiet_cmd_gen_cpucaps = GEN $@
quiet_cmd_gen_sysreg = GEN $@ quiet_cmd_gen_sysreg = GEN $@
cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@ cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE $(kapi)/cpucap-defs.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
$(call if_changed,gen_cpucaps) $(call if_changed,gen_cpucaps)
$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE $(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE
......
...@@ -27,6 +27,7 @@ HAS_ECV_CNTPOFF ...@@ -27,6 +27,7 @@ HAS_ECV_CNTPOFF
HAS_EPAN HAS_EPAN
HAS_EVT HAS_EVT
HAS_FGT HAS_FGT
HAS_FPSIMD
HAS_GENERIC_AUTH HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_ARCH_QARMA5
...@@ -39,7 +40,6 @@ HAS_LDAPR ...@@ -39,7 +40,6 @@ HAS_LDAPR
HAS_LSE_ATOMICS HAS_LSE_ATOMICS
HAS_MOPS HAS_MOPS
HAS_NESTED_VIRT HAS_NESTED_VIRT
HAS_NO_FPSIMD
HAS_NO_HW_PREFETCH HAS_NO_HW_PREFETCH
HAS_PAN HAS_PAN
HAS_S1PIE HAS_S1PIE
......
...@@ -15,8 +15,8 @@ function fatal(msg) { ...@@ -15,8 +15,8 @@ function fatal(msg) {
/^#/ { next } /^#/ { next }
BEGIN { BEGIN {
print "#ifndef __ASM_CPUCAPS_H" print "#ifndef __ASM_CPUCAP_DEFS_H"
print "#define __ASM_CPUCAPS_H" print "#define __ASM_CPUCAP_DEFS_H"
print "" print ""
print "/* Generated file - do not edit */" print "/* Generated file - do not edit */"
cap_num = 0 cap_num = 0
...@@ -31,7 +31,7 @@ BEGIN { ...@@ -31,7 +31,7 @@ BEGIN {
END { END {
printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num) printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num)
print "" print ""
print "#endif /* __ASM_CPUCAPS_H */" print "#endif /* __ASM_CPUCAP_DEFS_H */"
} }
# Any lines not handled by previous rules are unexpected # Any lines not handled by previous rules are unexpected
......
...@@ -918,7 +918,7 @@ static void arch_timer_evtstrm_enable(unsigned int divider) ...@@ -918,7 +918,7 @@ static void arch_timer_evtstrm_enable(unsigned int divider)
#ifdef CONFIG_ARM64 #ifdef CONFIG_ARM64
/* ECV is likely to require a large divider. Use the EVNTIS flag. */ /* ECV is likely to require a large divider. Use the EVNTIS flag. */
if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) { if (cpus_have_final_cap(ARM64_HAS_ECV) && divider > 15) {
cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE; cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
divider -= 8; divider -= 8;
} }
...@@ -956,6 +956,30 @@ static void arch_timer_configure_evtstream(void) ...@@ -956,6 +956,30 @@ static void arch_timer_configure_evtstream(void)
arch_timer_evtstrm_enable(max(0, lsb)); arch_timer_evtstrm_enable(max(0, lsb));
} }
static int arch_timer_evtstrm_starting_cpu(unsigned int cpu)
{
arch_timer_configure_evtstream();
return 0;
}
static int arch_timer_evtstrm_dying_cpu(unsigned int cpu)
{
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
return 0;
}
static int __init arch_timer_evtstrm_register(void)
{
if (!arch_timer_evt || !evtstrm_enable)
return 0;
return cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
"clockevents/arm/arch_timer_evtstrm:starting",
arch_timer_evtstrm_starting_cpu,
arch_timer_evtstrm_dying_cpu);
}
core_initcall(arch_timer_evtstrm_register);
static void arch_counter_set_user_access(void) static void arch_counter_set_user_access(void)
{ {
u32 cntkctl = arch_timer_get_cntkctl(); u32 cntkctl = arch_timer_get_cntkctl();
...@@ -1017,8 +1041,6 @@ static int arch_timer_starting_cpu(unsigned int cpu) ...@@ -1017,8 +1041,6 @@ static int arch_timer_starting_cpu(unsigned int cpu)
} }
arch_counter_set_user_access(); arch_counter_set_user_access();
if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0; return 0;
} }
...@@ -1165,8 +1187,6 @@ static int arch_timer_dying_cpu(unsigned int cpu) ...@@ -1165,8 +1187,6 @@ static int arch_timer_dying_cpu(unsigned int cpu)
{ {
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
arch_timer_stop(clk); arch_timer_stop(clk);
return 0; return 0;
} }
...@@ -1280,6 +1300,7 @@ static int __init arch_timer_register(void) ...@@ -1280,6 +1300,7 @@ static int __init arch_timer_register(void)
out_free: out_free:
free_percpu(arch_timer_evt); free_percpu(arch_timer_evt);
arch_timer_evt = NULL;
out: out:
return err; return err;
} }
......
...@@ -277,17 +277,6 @@ static void gic_redist_wait_for_rwp(void) ...@@ -277,17 +277,6 @@ static void gic_redist_wait_for_rwp(void)
gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
} }
#ifdef CONFIG_ARM64
static u64 __maybe_unused gic_read_iar(void)
{
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
}
#endif
static void gic_enable_redist(bool enable) static void gic_enable_redist(bool enable)
{ {
void __iomem *rbase; void __iomem *rbase;
......
...@@ -172,6 +172,7 @@ enum cpuhp_state { ...@@ -172,6 +172,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_ARM_TWD_STARTING,
...@@ -189,6 +190,7 @@ enum cpuhp_state { ...@@ -189,6 +190,7 @@ enum cpuhp_state {
/* Must be the last timer callback */ /* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment