Commit 4461438a authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Borislav Petkov (AMD)

x86/retpoline: Ensure default return thunk isn't used at runtime

Make sure the default return thunk is not used after all return
instructions have been patched by the alternatives because the default
return thunk is insufficient when it comes to mitigating Retbleed or
SRSO.

Fix based on an earlier version by David Kaplan <david.kaplan@amd.com>.

  [ bp: Fix the compilation error of warn_thunk_thunk being an invisible
        symbol, hoist thunk macro into calling.h ]
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Co-developed-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20231010171020.462211-4-david.kaplan@amd.com
Link: https://lore.kernel.org/r/20240104132446.GEZZaxnrIgIyat0pqf@fat_crate.local
parent 0911b8c5
...@@ -426,3 +426,63 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -426,3 +426,63 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_X86_64
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func
SYM_FUNC_START(\name)
pushq %rbp
movq %rsp, %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rax
pushq %r8
pushq %r9
pushq %r10
pushq %r11
call \func
popq %r11
popq %r10
popq %r9
popq %r8
popq %rax
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
RET
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
#else /* CONFIG_X86_32 */
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
SYM_CODE_START_NOALIGN(\name)
pushl %eax
pushl %ecx
pushl %edx
.if \put_ret_addr_in_eax
/* Place EIP in the arg1 */
movl 3*4(%esp), %eax
.endif
call \func
popl %edx
popl %ecx
popl %eax
RET
_ASM_NOKPROBE(\name)
SYM_CODE_END(\name)
.endm
#endif
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include "calling.h"
.pushsection .noinstr.text, "ax" .pushsection .noinstr.text, "ax"
SYM_FUNC_START(entry_ibpb) SYM_FUNC_START(entry_ibpb)
...@@ -20,3 +22,5 @@ SYM_FUNC_END(entry_ibpb) ...@@ -20,3 +22,5 @@ SYM_FUNC_END(entry_ibpb)
EXPORT_SYMBOL_GPL(entry_ibpb); EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection .popsection
THUNK warn_thunk_thunk, __warn_thunk
...@@ -4,33 +4,15 @@ ...@@ -4,33 +4,15 @@
* Copyright 2008 by Steven Rostedt, Red Hat, Inc * Copyright 2008 by Steven Rostedt, Red Hat, Inc
* (inspired by Andi Kleen's thunk_64.S) * (inspired by Andi Kleen's thunk_64.S)
*/ */
#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
/* put return address in eax (arg1) */ #include <linux/export.h>
.macro THUNK name, func, put_ret_addr_in_eax=0 #include <linux/linkage.h>
SYM_CODE_START_NOALIGN(\name) #include <asm/asm.h>
pushl %eax
pushl %ecx
pushl %edx
.if \put_ret_addr_in_eax #include "calling.h"
/* Place EIP in the arg1 */
movl 3*4(%esp), %eax
.endif
call \func THUNK preempt_schedule_thunk, preempt_schedule
popl %edx THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
popl %ecx EXPORT_SYMBOL(preempt_schedule_thunk)
popl %eax EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
RET
_ASM_NOKPROBE(\name)
SYM_CODE_END(\name)
.endm
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
...@@ -9,39 +9,6 @@ ...@@ -9,39 +9,6 @@
#include "calling.h" #include "calling.h"
#include <asm/asm.h> #include <asm/asm.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func
SYM_FUNC_START(\name)
pushq %rbp
movq %rsp, %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rax
pushq %r8
pushq %r9
pushq %r10
pushq %r11
call \func
popq %r11
popq %r10
popq %r9
popq %r8
popq %rax
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
RET
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk) EXPORT_SYMBOL(preempt_schedule_thunk)
......
...@@ -357,6 +357,8 @@ extern void entry_ibpb(void); ...@@ -357,6 +357,8 @@ extern void entry_ibpb(void);
extern void (*x86_return_thunk)(void); extern void (*x86_return_thunk)(void);
extern void __warn_thunk(void);
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
extern void call_depth_return_thunk(void); extern void call_depth_return_thunk(void);
......
...@@ -2850,3 +2850,8 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu ...@@ -2850,3 +2850,8 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu
return cpu_show_common(dev, attr, buf, X86_BUG_GDS); return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
} }
#endif #endif
void __warn_thunk(void)
{
WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
}
...@@ -369,19 +369,16 @@ SYM_FUNC_END(call_depth_return_thunk) ...@@ -369,19 +369,16 @@ SYM_FUNC_END(call_depth_return_thunk)
* 'JMP __x86_return_thunk' sites are changed to something else by * 'JMP __x86_return_thunk' sites are changed to something else by
* apply_returns(). * apply_returns().
* *
* This should be converted eventually to call a warning function which * The ALTERNATIVE below adds a really loud warning to catch the case
* should scream loudly when the default return thunk is called after * where the insufficient default return thunk ends up getting used for
* alternatives have been applied. * whatever reason like miscompilation or failure of
* * objtool/alternatives/etc to patch all the return sites.
* That warning function cannot BUG() because the bug splat cannot be
* displayed in all possible configurations, leading to users not really
* knowing why the machine froze.
*/ */
SYM_CODE_START(__x86_return_thunk) SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
ret "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
int3 int3
SYM_CODE_END(__x86_return_thunk) SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment