Commit 7b75782f authored by Breno Leitao's avatar Breno Leitao Committed by Ingo Molnar

x86/bugs: Rename CONFIG_SLS => CONFIG_MITIGATION_SLS

Step 6/10 of the namespace unification of CPU mitigations related Kconfig options.
Suggested-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarBreno Leitao <leitao@debian.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20231121160740.1249350-7-leitao@debian.org
parent aefb2f2e
...@@ -2577,7 +2577,7 @@ config CPU_SRSO ...@@ -2577,7 +2577,7 @@ config CPU_SRSO
help help
Enable the SRSO mitigation needed on AMD Zen1-4 machines. Enable the SRSO mitigation needed on AMD Zen1-4 machines.
config SLS config MITIGATION_SLS
bool "Mitigate Straight-Line-Speculation" bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64 depends on CC_HAS_SLS && X86_64
select OBJTOOL if HAVE_OBJTOOL select OBJTOOL if HAVE_OBJTOOL
......
...@@ -205,7 +205,7 @@ ifdef CONFIG_MITIGATION_RETPOLINE ...@@ -205,7 +205,7 @@ ifdef CONFIG_MITIGATION_RETPOLINE
endif endif
endif endif
ifdef CONFIG_SLS ifdef CONFIG_MITIGATION_SLS
KBUILD_CFLAGS += -mharden-sls=all KBUILD_CFLAGS += -mharden-sls=all
endif endif
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define RET jmp __x86_return_thunk #define RET jmp __x86_return_thunk
#else /* CONFIG_MITIGATION_RETPOLINE */ #else /* CONFIG_MITIGATION_RETPOLINE */
#ifdef CONFIG_SLS #ifdef CONFIG_MITIGATION_SLS
#define RET ret; int3 #define RET ret; int3
#else #else
#define RET ret #define RET ret
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define ASM_RET "jmp __x86_return_thunk\n\t" #define ASM_RET "jmp __x86_return_thunk\n\t"
#else /* CONFIG_MITIGATION_RETPOLINE */ #else /* CONFIG_MITIGATION_RETPOLINE */
#ifdef CONFIG_SLS #ifdef CONFIG_MITIGATION_SLS
#define ASM_RET "ret; int3\n\t" #define ASM_RET "ret; int3\n\t"
#else #else
#define ASM_RET "ret\n\t" #define ASM_RET "ret\n\t"
......
...@@ -708,8 +708,8 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) ...@@ -708,8 +708,8 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
/* /*
* The compiler is supposed to EMIT an INT3 after every unconditional * The compiler is supposed to EMIT an INT3 after every unconditional
* JMP instruction due to AMD BTC. However, if the compiler is too old * JMP instruction due to AMD BTC. However, if the compiler is too old
* or SLS isn't enabled, we still need an INT3 after indirect JMPs * or MITIGATION_SLS isn't enabled, we still need an INT3 after
* even on Intel. * indirect JMPs even on Intel.
*/ */
if (op == JMP32_INSN_OPCODE && i < insn->length) if (op == JMP32_INSN_OPCODE && i < insn->length)
bytes[i++] = INT3_INSN_OPCODE; bytes[i++] = INT3_INSN_OPCODE;
......
...@@ -307,7 +307,8 @@ union ftrace_op_code_union { ...@@ -307,7 +307,8 @@ union ftrace_op_code_union {
} __attribute__((packed)); } __attribute__((packed));
}; };
#define RET_SIZE (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) #define RET_SIZE \
(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
static unsigned long static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
......
...@@ -469,7 +469,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) ...@@ -469,7 +469,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
} else { } else {
EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
EMIT1(0xCC); /* int3 */ EMIT1(0xCC); /* int3 */
} }
...@@ -484,7 +484,7 @@ static void emit_return(u8 **pprog, u8 *ip) ...@@ -484,7 +484,7 @@ static void emit_return(u8 **pprog, u8 *ip)
emit_jump(&prog, x86_return_thunk, ip); emit_jump(&prog, x86_return_thunk, ip);
} else { } else {
EMIT1(0xC3); /* ret */ EMIT1(0xC3); /* ret */
if (IS_ENABLED(CONFIG_SLS)) if (IS_ENABLED(CONFIG_MITIGATION_SLS))
EMIT1(0xCC); /* int3 */ EMIT1(0xCC); /* int3 */
} }
......
...@@ -264,7 +264,7 @@ endif ...@@ -264,7 +264,7 @@ endif
objtool-args-$(CONFIG_UNWINDER_ORC) += --orc objtool-args-$(CONFIG_UNWINDER_ORC) += --orc
objtool-args-$(CONFIG_MITIGATION_RETPOLINE) += --retpoline objtool-args-$(CONFIG_MITIGATION_RETPOLINE) += --retpoline
objtool-args-$(CONFIG_RETHUNK) += --rethunk objtool-args-$(CONFIG_RETHUNK) += --rethunk
objtool-args-$(CONFIG_SLS) += --sls objtool-args-$(CONFIG_MITIGATION_SLS) += --sls
objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval
objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call
objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment