Commit a033eec9 authored by Breno Leitao's avatar Breno Leitao Committed by Ingo Molnar

x86/bugs: Rename CONFIG_CPU_SRSO => CONFIG_MITIGATION_SRSO

Step 9/10 of the namespace unification of CPU mitigations related Kconfig options.
Suggested-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarBreno Leitao <leitao@debian.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20231121160740.1249350-10-leitao@debian.org
parent 1da8d217
...@@ -2570,7 +2570,7 @@ config MITIGATION_IBRS_ENTRY ...@@ -2570,7 +2570,7 @@ config MITIGATION_IBRS_ENTRY
This mitigates both spectre_v2 and retbleed at great cost to This mitigates both spectre_v2 and retbleed at great cost to
performance. performance.
config CPU_SRSO config MITIGATION_SRSO
bool "Mitigate speculative RAS overflow on AMD" bool "Mitigate speculative RAS overflow on AMD"
depends on CPU_SUP_AMD && X86_64 && RETHUNK depends on CPU_SUP_AMD && X86_64 && RETHUNK
default y default y
......
...@@ -212,7 +212,7 @@ ...@@ -212,7 +212,7 @@
*/ */
.macro VALIDATE_UNRET_END .macro VALIDATE_UNRET_END
#if defined(CONFIG_NOINSTR_VALIDATION) && \ #if defined(CONFIG_NOINSTR_VALIDATION) && \
(defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
ANNOTATE_RETPOLINE_SAFE ANNOTATE_RETPOLINE_SAFE
nop nop
#endif #endif
...@@ -271,7 +271,7 @@ ...@@ -271,7 +271,7 @@
.Lskip_rsb_\@: .Lskip_rsb_\@:
.endm .endm
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO) #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
#define CALL_UNTRAIN_RET "call entry_untrain_ret" #define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else #else
#define CALL_UNTRAIN_RET "" #define CALL_UNTRAIN_RET ""
...@@ -340,7 +340,7 @@ extern void retbleed_return_thunk(void); ...@@ -340,7 +340,7 @@ extern void retbleed_return_thunk(void);
static inline void retbleed_return_thunk(void) {} static inline void retbleed_return_thunk(void) {}
#endif #endif
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_MITIGATION_SRSO
extern void srso_return_thunk(void); extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void); extern void srso_alias_return_thunk(void);
#else #else
......
...@@ -2458,7 +2458,7 @@ static void __init srso_select_mitigation(void) ...@@ -2458,7 +2458,7 @@ static void __init srso_select_mitigation(void)
break; break;
case SRSO_CMD_SAFE_RET: case SRSO_CMD_SAFE_RET:
if (IS_ENABLED(CONFIG_CPU_SRSO)) { if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
/* /*
* Enable the return thunk for generated code * Enable the return thunk for generated code
* like ftrace, static_call, etc. * like ftrace, static_call, etc.
...@@ -2478,7 +2478,7 @@ static void __init srso_select_mitigation(void) ...@@ -2478,7 +2478,7 @@ static void __init srso_select_mitigation(void)
else else
srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
} else { } else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
} }
break; break;
...@@ -2494,13 +2494,13 @@ static void __init srso_select_mitigation(void) ...@@ -2494,13 +2494,13 @@ static void __init srso_select_mitigation(void)
break; break;
case SRSO_CMD_IBPB_ON_VMEXIT: case SRSO_CMD_IBPB_ON_VMEXIT:
if (IS_ENABLED(CONFIG_CPU_SRSO)) { if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
} }
} else { } else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
} }
break; break;
} }
......
...@@ -142,7 +142,7 @@ SECTIONS ...@@ -142,7 +142,7 @@ SECTIONS
*(.text..__x86.rethunk_untrain) *(.text..__x86.rethunk_untrain)
ENTRY_TEXT ENTRY_TEXT
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_MITIGATION_SRSO
/* /*
* See the comment above srso_alias_untrain_ret()'s * See the comment above srso_alias_untrain_ret()'s
* definition. * definition.
...@@ -508,7 +508,7 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -508,7 +508,7 @@ INIT_PER_CPU(irq_stack_backing_store);
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
#endif #endif
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_MITIGATION_SRSO
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
/* /*
* GNU ld cannot do XOR until 2.41. * GNU ld cannot do XOR until 2.41.
......
...@@ -138,7 +138,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) ...@@ -138,7 +138,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
*/ */
.section .text..__x86.return_thunk .section .text..__x86.return_thunk
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_MITIGATION_SRSO
/* /*
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
...@@ -225,10 +225,10 @@ SYM_CODE_END(srso_return_thunk) ...@@ -225,10 +225,10 @@ SYM_CODE_END(srso_return_thunk)
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret" #define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret" #define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
#else /* !CONFIG_CPU_SRSO */ #else /* !CONFIG_MITIGATION_SRSO */
#define JMP_SRSO_UNTRAIN_RET "ud2" #define JMP_SRSO_UNTRAIN_RET "ud2"
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2" #define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
#endif /* CONFIG_CPU_SRSO */ #endif /* CONFIG_MITIGATION_SRSO */
#ifdef CONFIG_MITIGATION_UNRET_ENTRY #ifdef CONFIG_MITIGATION_UNRET_ENTRY
...@@ -316,7 +316,7 @@ SYM_FUNC_END(retbleed_untrain_ret) ...@@ -316,7 +316,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
#define JMP_RETBLEED_UNTRAIN_RET "ud2" #define JMP_RETBLEED_UNTRAIN_RET "ud2"
#endif /* CONFIG_MITIGATION_UNRET_ENTRY */ #endif /* CONFIG_MITIGATION_UNRET_ENTRY */
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO) #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
SYM_FUNC_START(entry_untrain_ret) SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \ ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
...@@ -325,7 +325,7 @@ SYM_FUNC_START(entry_untrain_ret) ...@@ -325,7 +325,7 @@ SYM_FUNC_START(entry_untrain_ret)
SYM_FUNC_END(entry_untrain_ret) SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret)
#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_CPU_SRSO */ #endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_MITIGATION_SRSO */
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
......
...@@ -131,7 +131,7 @@ ...@@ -131,7 +131,7 @@
*/ */
.macro VALIDATE_UNRET_BEGIN .macro VALIDATE_UNRET_BEGIN
#if defined(CONFIG_NOINSTR_VALIDATION) && \ #if defined(CONFIG_NOINSTR_VALIDATION) && \
(defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
.Lhere_\@: .Lhere_\@:
.pushsection .discard.validate_unret .pushsection .discard.validate_unret
.long .Lhere_\@ - . .long .Lhere_\@ - .
......
...@@ -38,7 +38,7 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION)) ...@@ -38,7 +38,7 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y) vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \ vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
$(if $(or $(CONFIG_MITIGATION_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret) $(if $(or $(CONFIG_MITIGATION_UNRET_ENTRY),$(CONFIG_MITIGATION_SRSO)), --unret)
objtool-args = $(vmlinux-objtool-args-y) --link objtool-args = $(vmlinux-objtool-args-y) --link
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment