Commit 8b63cba7 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Borislav Petkov (AMD)

x86/decompressor: Store boot_params pointer in callee save register

Instead of pushing and popping %RSI several times to preserve the struct
boot_params pointer across the execution of the startup code, move it
into a callee save register before the first call into C, and copy it
back when needed.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230807162720.545787-8-ardb@kernel.org
parent d7156b98
...@@ -405,10 +405,14 @@ SYM_CODE_START(startup_64) ...@@ -405,10 +405,14 @@ SYM_CODE_START(startup_64)
lretq lretq
.Lon_kernel_cs: .Lon_kernel_cs:
/*
* RSI holds a pointer to a boot_params structure provided by the
* loader, and this needs to be preserved across C function calls. So
* move it into a callee saved register.
*/
movq %rsi, %r15
pushq %rsi
call load_stage1_idt call load_stage1_idt
popq %rsi
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
/* /*
...@@ -419,12 +423,10 @@ SYM_CODE_START(startup_64) ...@@ -419,12 +423,10 @@ SYM_CODE_START(startup_64)
* CPUID instructions being issued, so go ahead and do that now via * CPUID instructions being issued, so go ahead and do that now via
* sev_enable(), which will also handle the rest of the SEV-related * sev_enable(), which will also handle the rest of the SEV-related
* detection/setup to ensure that has been done in advance of any dependent * detection/setup to ensure that has been done in advance of any dependent
* code. * code. Pass the boot_params pointer as the first argument.
*/ */
pushq %rsi movq %r15, %rdi
movq %rsi, %rdi /* real mode address */
call sev_enable call sev_enable
popq %rsi
#endif #endif
/* /*
...@@ -437,13 +439,10 @@ SYM_CODE_START(startup_64) ...@@ -437,13 +439,10 @@ SYM_CODE_START(startup_64)
* - Non zero RDX means trampoline needs to enable 5-level * - Non zero RDX means trampoline needs to enable 5-level
* paging. * paging.
* *
* RSI holds real mode data and needs to be preserved across * Pass the boot_params pointer as the first argument.
* this function call.
*/ */
pushq %rsi movq %r15, %rdi
movq %rsi, %rdi /* real mode address */
call paging_prepare call paging_prepare
popq %rsi
/* Save the trampoline address in RCX */ /* Save the trampoline address in RCX */
movq %rax, %rcx movq %rax, %rcx
...@@ -456,9 +455,9 @@ SYM_CODE_START(startup_64) ...@@ -456,9 +455,9 @@ SYM_CODE_START(startup_64)
* because the architecture does not guarantee that GPRs will retain * because the architecture does not guarantee that GPRs will retain
* their full 64-bit values across a 32-bit mode switch. * their full 64-bit values across a 32-bit mode switch.
*/ */
pushq %r15
pushq %rbp pushq %rbp
pushq %rbx pushq %rbx
pushq %rsi
/* /*
* Push the 64-bit address of trampoline_return() onto the new stack. * Push the 64-bit address of trampoline_return() onto the new stack.
...@@ -475,9 +474,9 @@ SYM_CODE_START(startup_64) ...@@ -475,9 +474,9 @@ SYM_CODE_START(startup_64)
lretq lretq
trampoline_return: trampoline_return:
/* Restore live 64-bit registers */ /* Restore live 64-bit registers */
popq %rsi
popq %rbx popq %rbx
popq %rbp popq %rbp
popq %r15
/* Restore the stack, the 32-bit trampoline uses its own stack */ /* Restore the stack, the 32-bit trampoline uses its own stack */
leaq rva(boot_stack_end)(%rbx), %rsp leaq rva(boot_stack_end)(%rbx), %rsp
...@@ -487,14 +486,9 @@ trampoline_return: ...@@ -487,14 +486,9 @@ trampoline_return:
* *
* RDI is address of the page table to use instead of page table * RDI is address of the page table to use instead of page table
* in trampoline memory (if required). * in trampoline memory (if required).
*
* RSI holds real mode data and needs to be preserved across
* this function call.
*/ */
pushq %rsi
leaq rva(top_pgtable)(%rbx), %rdi leaq rva(top_pgtable)(%rbx), %rdi
call cleanup_trampoline call cleanup_trampoline
popq %rsi
/* Zero EFLAGS */ /* Zero EFLAGS */
pushq $0 pushq $0
...@@ -504,7 +498,6 @@ trampoline_return: ...@@ -504,7 +498,6 @@ trampoline_return:
* Copy the compressed kernel to the end of our buffer * Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe. * where decompression in place becomes safe.
*/ */
pushq %rsi
leaq (_bss-8)(%rip), %rsi leaq (_bss-8)(%rip), %rsi
leaq rva(_bss-8)(%rbx), %rdi leaq rva(_bss-8)(%rbx), %rdi
movl $(_bss - startup_32), %ecx movl $(_bss - startup_32), %ecx
...@@ -512,7 +505,6 @@ trampoline_return: ...@@ -512,7 +505,6 @@ trampoline_return:
std std
rep movsq rep movsq
cld cld
popq %rsi
/* /*
* The GDT may get overwritten either during the copy we just did or * The GDT may get overwritten either during the copy we just did or
...@@ -544,30 +536,28 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -544,30 +536,28 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
shrq $3, %rcx shrq $3, %rcx
rep stosq rep stosq
pushq %rsi
call load_stage2_idt call load_stage2_idt
/* Pass boot_params to initialize_identity_maps() */ /* Pass boot_params to initialize_identity_maps() */
movq (%rsp), %rdi movq %r15, %rdi
call initialize_identity_maps call initialize_identity_maps
popq %rsi
/* /*
* Do the extraction, and jump to the new kernel.. * Do the extraction, and jump to the new kernel..
*/ */
pushq %rsi /* Save the real mode argument */ /* pass struct boot_params pointer */
movq %rsi, %rdi /* real mode address */ movq %r15, %rdi
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
leaq input_data(%rip), %rdx /* input_data */ leaq input_data(%rip), %rdx /* input_data */
movl input_len(%rip), %ecx /* input_len */ movl input_len(%rip), %ecx /* input_len */
movq %rbp, %r8 /* output target address */ movq %rbp, %r8 /* output target address */
movl output_len(%rip), %r9d /* decompressed length, end of relocs */ movl output_len(%rip), %r9d /* decompressed length, end of relocs */
call extract_kernel /* returns kernel entry point in %rax */ call extract_kernel /* returns kernel entry point in %rax */
popq %rsi
/* /*
* Jump to the decompressed kernel. * Jump to the decompressed kernel.
*/ */
movq %r15, %rsi
jmp *%rax jmp *%rax
SYM_FUNC_END(.Lrelocated) SYM_FUNC_END(.Lrelocated)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment