Commit c704cf27 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/alternatives' into for-next/core

* for-next/alternatives:
  : Alternatives (code patching) improvements
  arm64: fix the build with binutils 2.27
  arm64: avoid BUILD_BUG_ON() in alternative-macros
  arm64: alternatives: add shared NOP callback
  arm64: alternatives: add alternative_has_feature_*()
  arm64: alternatives: have callbacks take a cap
  arm64: alternatives: make alt_region const
  arm64: alternatives: hoist print out of __apply_alternatives()
  arm64: alternatives: proton-pack: prepare for cap changes
  arm64: alternatives: kvm: prepare for cap changes
  arm64: cpufeature: make cpus_have_cap() noinstr-safe
parents c3976232 ba00c2a0
...@@ -2,10 +2,22 @@ ...@@ -2,10 +2,22 @@
#ifndef __ASM_ALTERNATIVE_MACROS_H #ifndef __ASM_ALTERNATIVE_MACROS_H
#define __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H
#include <linux/bits.h>
#include <linux/const.h>
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
#include <asm/insn-def.h> #include <asm/insn-def.h>
#define ARM64_CB_PATCH ARM64_NCAPS /*
* Binutils 2.27.0 can't handle a 'UL' suffix on constants, so for the assembly
* macros below we must use we must use `(1 << ARM64_CB_SHIFT)`.
*/
#define ARM64_CB_SHIFT 15
#define ARM64_CB_BIT BIT(ARM64_CB_SHIFT)
#if ARM64_NCAPS >= ARM64_CB_BIT
#error "cpucaps have overflown ARM64_CB_BIT"
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -73,8 +85,8 @@ ...@@ -73,8 +85,8 @@
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, cb) \ #define ALTERNATIVE_CB(oldinstr, feature, cb) \
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb)
#else #else
#include <asm/assembler.h> #include <asm/assembler.h>
...@@ -82,7 +94,7 @@ ...@@ -82,7 +94,7 @@
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.word \orig_offset - . .word \orig_offset - .
.word \alt_offset - . .word \alt_offset - .
.hword \feature .hword (\feature)
.byte \orig_len .byte \orig_len
.byte \alt_len .byte \alt_len
.endm .endm
...@@ -141,10 +153,10 @@ ...@@ -141,10 +153,10 @@
661: 661:
.endm .endm
.macro alternative_cb cb .macro alternative_cb cap, cb
.set .Lasm_alt_mode, 0 .set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a" .pushsection .altinstructions, "a"
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 altinstruction_entry 661f, \cb, (1 << ARM64_CB_SHIFT) | \cap, 662f-661f, 0
.popsection .popsection
661: 661:
.endm .endm
...@@ -207,4 +219,46 @@ alternative_endif ...@@ -207,4 +219,46 @@ alternative_endif
#define ALTERNATIVE(oldinstr, newinstr, ...) \ #define ALTERNATIVE(oldinstr, newinstr, ...) \
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
#ifndef __ASSEMBLY__
#include <linux/types.h>
static __always_inline bool
alternative_has_feature_likely(unsigned long feature)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
asm_volatile_goto(
ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops)
:
: [feature] "i" (feature)
:
: l_no);
return true;
l_no:
return false;
}
static __always_inline bool
alternative_has_feature_unlikely(unsigned long feature)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[feature])
:
: [feature] "i" (feature)
:
: l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_MACROS_H */ #endif /* __ASM_ALTERNATIVE_MACROS_H */
...@@ -293,7 +293,7 @@ alternative_endif ...@@ -293,7 +293,7 @@ alternative_endif
alternative_if_not ARM64_KVM_PROTECTED_MODE alternative_if_not ARM64_KVM_PROTECTED_MODE
ASM_BUG() ASM_BUG()
alternative_else_nop_endif alternative_else_nop_endif
alternative_cb kvm_compute_final_ctr_el0 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
movz \reg, #0 movz \reg, #0
movk \reg, #0, lsl #16 movk \reg, #0, lsl #16
movk \reg, #0, lsl #32 movk \reg, #0, lsl #32
...@@ -877,7 +877,7 @@ alternative_endif ...@@ -877,7 +877,7 @@ alternative_endif
.macro __mitigate_spectre_bhb_loop tmp .macro __mitigate_spectre_bhb_loop tmp
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_loop_iter alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
mov \tmp, #32 // Patched to correct the immediate mov \tmp, #32 // Patched to correct the immediate
alternative_cb_end alternative_cb_end
.Lspectre_bhb_loop\@: .Lspectre_bhb_loop\@:
...@@ -890,7 +890,7 @@ alternative_cb_end ...@@ -890,7 +890,7 @@ alternative_cb_end
.macro mitigate_spectre_bhb_loop tmp .macro mitigate_spectre_bhb_loop tmp
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_loop_mitigation_enable alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
b .L_spectre_bhb_loop_done\@ // Patched to NOP b .L_spectre_bhb_loop_done\@ // Patched to NOP
alternative_cb_end alternative_cb_end
__mitigate_spectre_bhb_loop \tmp __mitigate_spectre_bhb_loop \tmp
...@@ -904,7 +904,7 @@ alternative_cb_end ...@@ -904,7 +904,7 @@ alternative_cb_end
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]! stp x2, x3, [sp, #-16]!
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
alternative_cb smccc_patch_fw_mitigation_conduit alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
ldp x2, x3, [sp], #16 ldp x2, x3, [sp], #16
...@@ -914,7 +914,7 @@ alternative_cb_end ...@@ -914,7 +914,7 @@ alternative_cb_end
.macro mitigate_spectre_bhb_clear_insn .macro mitigate_spectre_bhb_clear_insn
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_clearbhb alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
/* Patched to NOP when not supported */ /* Patched to NOP when not supported */
clearbhb clearbhb
isb isb
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifndef __ASM_CPUFEATURE_H #ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H
#include <asm/alternative-macros.h>
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -419,12 +420,8 @@ static __always_inline bool is_hyp_code(void) ...@@ -419,12 +420,8 @@ static __always_inline bool is_hyp_code(void)
} }
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
/* ARM64 CAPS + alternative_cb */ extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
#define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
#define for_each_available_cap(cap) \ #define for_each_available_cap(cap) \
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
...@@ -440,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void); ...@@ -440,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void);
static __always_inline bool system_capabilities_finalized(void) static __always_inline bool system_capabilities_finalized(void)
{ {
return static_branch_likely(&arm64_const_caps_ready); return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM);
} }
/* /*
...@@ -448,11 +445,11 @@ static __always_inline bool system_capabilities_finalized(void) ...@@ -448,11 +445,11 @@ static __always_inline bool system_capabilities_finalized(void)
* *
* Before the capability is detected, this returns false. * Before the capability is detected, this returns false.
*/ */
static inline bool cpus_have_cap(unsigned int num) static __always_inline bool cpus_have_cap(unsigned int num)
{ {
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
return test_bit(num, cpu_hwcaps); return arch_test_bit(num, cpu_hwcaps);
} }
/* /*
...@@ -467,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num) ...@@ -467,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num)
{ {
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
return static_branch_unlikely(&cpu_hwcap_keys[num]); return alternative_has_feature_unlikely(num);
} }
/* /*
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
* specific registers encoded in the instructions). * specific registers encoded in the instructions).
*/ */
.macro kern_hyp_va reg .macro kern_hyp_va reg
alternative_cb kvm_update_va_mask alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
and \reg, \reg, #1 /* mask with va_mask */ and \reg, \reg, #1 /* mask with va_mask */
ror \reg, \reg, #1 /* rotate to the first tag bit */ ror \reg, \reg, #1 /* rotate to the first tag bit */
add \reg, \reg, #0 /* insert the low 12 bits of the tag */ add \reg, \reg, #0 /* insert the low 12 bits of the tag */
...@@ -97,7 +97,7 @@ alternative_cb_end ...@@ -97,7 +97,7 @@ alternative_cb_end
hyp_pa \reg, \tmp hyp_pa \reg, \tmp
/* Load kimage_voffset. */ /* Load kimage_voffset. */
alternative_cb kvm_get_kimage_voffset alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
movz \tmp, #0 movz \tmp, #0
movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #16
movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #32
...@@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) ...@@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
"add %0, %0, #0\n" "add %0, %0, #0\n"
"add %0, %0, #0, lsl 12\n" "add %0, %0, #0, lsl 12\n"
"ror %0, %0, #63\n", "ror %0, %0, #63\n",
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask) kvm_update_va_mask)
: "+r" (v)); : "+r" (v));
return v; return v;
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/alternative-macros.h>
#include <asm/atomic_lse.h> #include <asm/atomic_lse.h>
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
static __always_inline bool system_uses_lse_atomics(void) static __always_inline bool system_uses_lse_atomics(void)
{ {
return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS);
} }
#define __lse_ll_sc_body(op, ...) \ #define __lse_ll_sc_body(op, ...) \
......
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT)
#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT)
/* Volatile, as we may be patching the guts of READ_ONCE() */ /* Volatile, as we may be patching the guts of READ_ONCE() */
static volatile int all_alternatives_applied; static volatile int all_alternatives_applied;
...@@ -136,7 +139,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) ...@@ -136,7 +139,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end); } while (cur += d_size, cur < end);
} }
static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module, static void __nocfi __apply_alternatives(const struct alt_region *region,
bool is_module,
unsigned long *feature_mask) unsigned long *feature_mask)
{ {
struct alt_instr *alt; struct alt_instr *alt;
...@@ -145,30 +149,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu ...@@ -145,30 +149,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
for (alt = region->begin; alt < region->end; alt++) { for (alt = region->begin; alt < region->end; alt++) {
int nr_inst; int nr_inst;
int cap = ALT_CAP(alt);
if (!test_bit(alt->cpufeature, feature_mask)) if (!test_bit(cap, feature_mask))
continue; continue;
/* Use ARM64_CB_PATCH as an unconditional patch */ if (!cpus_have_cap(cap))
if (alt->cpufeature < ARM64_CB_PATCH &&
!cpus_have_cap(alt->cpufeature))
continue; continue;
if (alt->cpufeature == ARM64_CB_PATCH) if (ALT_HAS_CB(alt))
BUG_ON(alt->alt_len != 0); BUG_ON(alt->alt_len != 0);
else else
BUG_ON(alt->alt_len != alt->orig_len); BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n");
origptr = ALT_ORIG_PTR(alt); origptr = ALT_ORIG_PTR(alt);
updptr = is_module ? origptr : lm_alias(origptr); updptr = is_module ? origptr : lm_alias(origptr);
nr_inst = alt->orig_len / AARCH64_INSN_SIZE; nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
if (alt->cpufeature < ARM64_CB_PATCH) if (ALT_HAS_CB(alt))
alt_cb = patch_alternative;
else
alt_cb = ALT_REPL_PTR(alt); alt_cb = ALT_REPL_PTR(alt);
else
alt_cb = patch_alternative;
alt_cb(alt, origptr, updptr, nr_inst); alt_cb(alt, origptr, updptr, nr_inst);
...@@ -201,9 +202,9 @@ void apply_alternatives_vdso(void) ...@@ -201,9 +202,9 @@ void apply_alternatives_vdso(void)
const struct elf64_hdr *hdr; const struct elf64_hdr *hdr;
const struct elf64_shdr *shdr; const struct elf64_shdr *shdr;
const struct elf64_shdr *alt; const struct elf64_shdr *alt;
DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
bitmap_fill(all_capabilities, ARM64_NPATCHABLE); bitmap_fill(all_capabilities, ARM64_NCAPS);
hdr = (struct elf64_hdr *)vdso_start; hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff; shdr = (void *)hdr + hdr->e_shoff;
...@@ -219,30 +220,31 @@ void apply_alternatives_vdso(void) ...@@ -219,30 +220,31 @@ void apply_alternatives_vdso(void)
__apply_alternatives(&region, false, &all_capabilities[0]); __apply_alternatives(&region, false, &all_capabilities[0]);
} }
static const struct alt_region kernel_alternatives = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
};
/* /*
* We might be patching the stop_machine state machine, so implement a * We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here. * really simple polling protocol here.
*/ */
static int __apply_alternatives_multi_stop(void *unused) static int __apply_alternatives_multi_stop(void *unused)
{ {
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
};
/* We always have a CPU 0 at this point (__init) */ /* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) { if (smp_processor_id()) {
while (!all_alternatives_applied) while (!all_alternatives_applied)
cpu_relax(); cpu_relax();
isb(); isb();
} else { } else {
DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
bitmap_complement(remaining_capabilities, boot_capabilities, bitmap_complement(remaining_capabilities, boot_capabilities,
ARM64_NPATCHABLE); ARM64_NCAPS);
BUG_ON(all_alternatives_applied); BUG_ON(all_alternatives_applied);
__apply_alternatives(&region, false, remaining_capabilities); __apply_alternatives(&kernel_alternatives, false,
remaining_capabilities);
/* Barriers provided by the cache flushing */ /* Barriers provided by the cache flushing */
all_alternatives_applied = 1; all_alternatives_applied = 1;
} }
...@@ -252,6 +254,8 @@ static int __apply_alternatives_multi_stop(void *unused) ...@@ -252,6 +254,8 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void) void __init apply_alternatives_all(void)
{ {
pr_info("applying system-wide alternatives\n");
apply_alternatives_vdso(); apply_alternatives_vdso();
/* better not try code patching on a live SMP system */ /* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
...@@ -264,15 +268,13 @@ void __init apply_alternatives_all(void) ...@@ -264,15 +268,13 @@ void __init apply_alternatives_all(void)
*/ */
void __init apply_boot_alternatives(void) void __init apply_boot_alternatives(void)
{ {
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
};
/* If called on non-boot cpu things could go wrong */ /* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0); WARN_ON(smp_processor_id() != 0);
__apply_alternatives(&region, false, &boot_capabilities[0]); pr_info("applying boot alternatives\n");
__apply_alternatives(&kernel_alternatives, false,
&boot_capabilities[0]);
} }
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
...@@ -282,10 +284,18 @@ void apply_alternatives_module(void *start, size_t length) ...@@ -282,10 +284,18 @@ void apply_alternatives_module(void *start, size_t length)
.begin = start, .begin = start,
.end = start + length, .end = start + length,
}; };
DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
bitmap_fill(all_capabilities, ARM64_NPATCHABLE); bitmap_fill(all_capabilities, ARM64_NCAPS);
__apply_alternatives(&region, true, &all_capabilities[0]); __apply_alternatives(&region, true, &all_capabilities[0]);
} }
#endif #endif
noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst)
{
for (int i = 0; i < nr_inst; i++)
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
}
EXPORT_SYMBOL(alt_cb_patch_nops);
...@@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); ...@@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps); EXPORT_SYMBOL(cpu_hwcaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
/* Need also bit for ARM64_CB_PATCH */ DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
bool arm64_use_ng_mappings = false; bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings); EXPORT_SYMBOL(arm64_use_ng_mappings);
...@@ -134,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); ...@@ -134,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
*/ */
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
* go through the verification process to make sure that it
* supports the system capabilities, without using a hotplug
* notifier. This is also used to decide if we could use
* the fast path for checking constant CPU caps.
*/
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
EXPORT_SYMBOL(arm64_const_caps_ready);
static inline void finalize_system_capabilities(void)
{
static_branch_enable(&arm64_const_caps_ready);
}
void dump_cpu_features(void) void dump_cpu_features(void)
{ {
/* file-wide pr_fmt adds "CPU features: " prefix */ /* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
} }
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \ { \
.sign = SIGNED, \ .sign = SIGNED, \
...@@ -1391,6 +1371,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id) ...@@ -1391,6 +1371,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
#include <linux/irqchip/arm-gic-v3.h> #include <linux/irqchip/arm-gic-v3.h>
static bool
has_always(const struct arm64_cpu_capabilities *entry, int scope)
{
return true;
}
static bool static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{ {
...@@ -2110,6 +2096,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) ...@@ -2110,6 +2096,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
} }
static const struct arm64_cpu_capabilities arm64_features[] = { static const struct arm64_cpu_capabilities arm64_features[] = {
{
.capability = ARM64_ALWAYS_BOOT,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_always,
},
{
.capability = ARM64_ALWAYS_SYSTEM,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_always,
},
{ {
.desc = "GIC system register CPU interface", .desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF, .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
...@@ -2953,9 +2949,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask) ...@@ -2953,9 +2949,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
if (!cpus_have_cap(num)) if (!cpus_have_cap(num))
continue; continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
if (boot_scope && caps->cpu_enable) if (boot_scope && caps->cpu_enable)
/* /*
* Capabilities with SCOPE_BOOT_CPU scope are finalised * Capabilities with SCOPE_BOOT_CPU scope are finalised
...@@ -3277,9 +3270,6 @@ void __init setup_cpu_features(void) ...@@ -3277,9 +3270,6 @@ void __init setup_cpu_features(void)
sme_setup(); sme_setup();
minsigstksz_setup(); minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
finalize_system_capabilities();
/* /*
* Check for sane CTR_EL0.CWG value. * Check for sane CTR_EL0.CWG value.
*/ */
......
...@@ -114,7 +114,7 @@ ...@@ -114,7 +114,7 @@
* them if required. * them if required.
*/ */
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
alternative_cb spectre_v4_patch_fw_mitigation_enable alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ // Patched to NOP b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
...@@ -123,7 +123,7 @@ alternative_cb_end ...@@ -123,7 +123,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state mov w1, #\state
alternative_cb smccc_patch_fw_mitigation_conduit alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
...@@ -175,7 +175,7 @@ alternative_else_nop_endif ...@@ -175,7 +175,7 @@ alternative_else_nop_endif
.macro mte_set_kernel_gcr, tmp, tmp2 .macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
alternative_cb kasan_hw_tags_enable alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f b 1f
alternative_cb_end alternative_cb_end
mov \tmp, KERNEL_GCR_EL1 mov \tmp, KERNEL_GCR_EL1
...@@ -186,7 +186,7 @@ alternative_cb_end ...@@ -186,7 +186,7 @@ alternative_cb_end
.macro mte_set_user_gcr, tsk, tmp, tmp2 .macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
alternative_cb kasan_hw_tags_enable alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f b 1f
alternative_cb_end alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL] ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
......
...@@ -73,6 +73,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); ...@@ -73,6 +73,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
KVM_NVHE_ALIAS(alt_cb_patch_nops);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
...@@ -89,10 +90,6 @@ KVM_NVHE_ALIAS(__icache_flags); ...@@ -89,10 +90,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* VMID bits set by the KVM VMID allocator */ /* VMID bits set by the KVM VMID allocator */
KVM_NVHE_ALIAS(kvm_arm_vmid_bits); KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys);
/* Static keys which are set if a vGIC trap should be handled in hyp. */ /* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
......
...@@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, ...@@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
if (spectre_v4_mitigations_off()) if (spectre_v4_mitigations_off())
return; return;
if (cpus_have_final_cap(ARM64_SSBS)) if (cpus_have_cap(ARM64_SSBS))
return; return;
if (spectre_v4_mitigations_dynamic()) if (spectre_v4_mitigations_dynamic())
......
...@@ -196,7 +196,7 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -196,7 +196,7 @@ SYM_CODE_END(__kvm_hyp_vector)
sub sp, sp, #(8 * 4) sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)] stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)] stp x0, x1, [sp, #(8 * 2)]
alternative_cb spectre_bhb_patch_wa3 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3
/* Patched to mov WA3 when supported */ /* Patched to mov WA3 when supported */
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
alternative_cb_end alternative_cb_end
...@@ -216,7 +216,7 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -216,7 +216,7 @@ SYM_CODE_END(__kvm_hyp_vector)
mitigate_spectre_bhb_clear_insn mitigate_spectre_bhb_clear_insn
.endif .endif
.if \indirect != 0 .if \indirect != 0
alternative_cb kvm_patch_vector_branch alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch
/* /*
* For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
* *
......
...@@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt, ...@@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
* dictates it and we don't have any spare bits in the * dictates it and we don't have any spare bits in the
* address), NOP everything after masking the kernel VA. * address), NOP everything after masking the kernel VA.
*/ */
if (has_vhe() || (!tag_val && i > 0)) { if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) {
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
continue; continue;
} }
...@@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
BUG_ON(nr_inst != 4); BUG_ON(nr_inst != 4);
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe())) if (!cpus_have_cap(ARM64_SPECTRE_V3A) ||
WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)))
return; return;
/* /*
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# #
# Internal CPU capabilities constants, keep this list sorted # Internal CPU capabilities constants, keep this list sorted
ALWAYS_BOOT
ALWAYS_SYSTEM
BTI BTI
# Unreliable: use system_supports_32bit_el0() instead. # Unreliable: use system_supports_32bit_el0() instead.
HAS_32BIT_EL0_DO_NOT_USE HAS_32BIT_EL0_DO_NOT_USE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment