Commit b9676962 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Ingo Molnar

efi/arm64: Clean EFI stub exit code from cache instead of avoiding it

Commit 9f922377 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.

However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.

So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
parent 0698fac4
......@@ -12,32 +12,32 @@
__INIT
ENTRY(efi_enter_kernel)
SYM_CODE_START(efi_enter_kernel)
/*
* efi_entry() will have copied the kernel image if necessary and we
* end up here with device tree address in x1 and the kernel entry
* point stored in x0. Save those values in registers which are
* callee preserved.
*/
mov x19, x0 // relocated Image address
ldr w2, =stext_offset
add x19, x0, x2 // relocated Image entrypoint
mov x20, x1 // DTB address
/*
* Flush the copied Image to the PoC, and ensure it is not shadowed by
* Clean the copied Image to the PoC, and ensure it is not shadowed by
* stale icache entries from before relocation.
*/
ldr w1, =kernel_size
bl __flush_dcache_area
bl __clean_dcache_area_poc
ic ialluis
dsb sy
/*
* Jump across, into the copy of the image that we just cleaned
* to the PoC, so that we can safely disable the MMU and caches.
* Clean the remainder of this routine to the PoC
* so that we can safely disable the MMU and caches.
*/
ldr w0, .Ljmp
sub x0, x19, w0, sxtw
br x0
adr x0, 0f
ldr w1, 3f
bl __clean_dcache_area_poc
0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
......@@ -63,6 +63,6 @@ ENTRY(efi_enter_kernel)
mov x1, xzr
mov x2, xzr
mov x3, xzr
b stext
ENDPROC(efi_enter_kernel)
.Ljmp: .long _text - 0b
br x19
SYM_CODE_END(efi_enter_kernel)
3: .long . - 0b
......@@ -13,6 +13,7 @@
#ifdef CONFIG_EFI
__efistub_kernel_size = _edata - _text;
__efistub_stext_offset = stext - _text;
/*
......@@ -34,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
__efistub___flush_dcache_area = __pi___flush_dcache_area;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN
__efistub___memcpy = __pi_memcpy;
......@@ -43,7 +44,6 @@ __efistub___memset = __pi_memset;
#endif
__efistub__text = _text;
__efistub_stext = stext;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub_screen_info = screen_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment