Commit 7ddb0c3d authored by Marc Zyngier's avatar Marc Zyngier Committed by Will Deacon

arm64: Rename the VHE switch to "finalise_el2"

as we are about to perform a lot more in 'mutate_to_vhe' than
we currently do, this function really becomes the point where
we finalise the basic EL2 configuration.

Reflect this into the code by renaming a bunch of things:
- HVC_VHE_RESTART -> HVC_FINALISE_EL2
- switch_to_vhe --> finalise_el2
- mutate_to_vhe -> __finalise_el2

No functional changes.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220630160500.1536744-2-maz@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 0aaa6853
...@@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h): ...@@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
* :: * ::
x0 = HVC_VHE_RESTART (arm64 only) x0 = HVC_FINALISE_EL2 (arm64 only)
Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling Finish configuring EL2 depending on the command-line options,
the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU including an attempt to upgrade the kernel's exception level from
being off, and VHE not being disabled by any other means (command line EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU
option, for example). supporting VHE, the EL2 MMU being off, and VHE not being disabled by
any other means (command line option, for example).
Any other value of r0/x0 triggers a hypervisor-specific handling, Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here. which is not documented here.
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
#define HVC_RESET_VECTORS 2 #define HVC_RESET_VECTORS 2
/* /*
* HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
*/ */
#define HVC_VHE_RESTART 3 #define HVC_FINALISE_EL2 3
/* Max number of HYP stub hypercalls */ /* Max number of HYP stub hypercalls */
#define HVC_STUB_HCALL_NR 4 #define HVC_STUB_HCALL_NR 4
......
...@@ -459,7 +459,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -459,7 +459,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x22 // pass FDT address in x0 mov x0, x22 // pass FDT address in x0
bl init_feature_override // Parse cpu feature overrides bl init_feature_override // Parse cpu feature overrides
mov x0, x20 mov x0, x20
bl switch_to_vhe // Prefer VHE if possible bl finalise_el2 // Prefer VHE if possible
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
bl start_kernel bl start_kernel
ASM_BUG() ASM_BUG()
...@@ -542,7 +542,7 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) ...@@ -542,7 +542,7 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
eret eret
__cpu_stick_to_vhe: __cpu_stick_to_vhe:
mov x0, #HVC_VHE_RESTART mov x0, #HVC_FINALISE_EL2
hvc #0 hvc #0
mov x0, #BOOT_CPU_MODE_EL2 mov x0, #BOOT_CPU_MODE_EL2
ret ret
...@@ -592,7 +592,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) ...@@ -592,7 +592,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
mov x20, x0 // preserve boot mode mov x20, x0 // preserve boot mode
bl switch_to_vhe bl finalise_el2
bl __cpu_secondary_check52bitva bl __cpu_secondary_check52bitva
#if VA_BITS > 48 #if VA_BITS > 48
ldr_l x0, vabits_actual ldr_l x0, vabits_actual
......
...@@ -51,8 +51,8 @@ SYM_CODE_START_LOCAL(elx_sync) ...@@ -51,8 +51,8 @@ SYM_CODE_START_LOCAL(elx_sync)
msr vbar_el2, x1 msr vbar_el2, x1
b 9f b 9f
1: cmp x0, #HVC_VHE_RESTART 1: cmp x0, #HVC_FINALISE_EL2
b.eq mutate_to_vhe b.eq __finalise_el2
2: cmp x0, #HVC_SOFT_RESTART 2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f b.ne 3f
...@@ -73,8 +73,8 @@ SYM_CODE_START_LOCAL(elx_sync) ...@@ -73,8 +73,8 @@ SYM_CODE_START_LOCAL(elx_sync)
eret eret
SYM_CODE_END(elx_sync) SYM_CODE_END(elx_sync)
// nVHE? No way! Give me the real thing! SYM_CODE_START_LOCAL(__finalise_el2)
SYM_CODE_START_LOCAL(mutate_to_vhe) // nVHE? No way! Give me the real thing!
// Sanity check: MMU *must* be off // Sanity check: MMU *must* be off
mrs x1, sctlr_el2 mrs x1, sctlr_el2
tbnz x1, #0, 1f tbnz x1, #0, 1f
...@@ -140,10 +140,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe) ...@@ -140,10 +140,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr spsr_el1, x0 msr spsr_el1, x0
b enter_vhe b enter_vhe
SYM_CODE_END(mutate_to_vhe) SYM_CODE_END(__finalise_el2)
// At the point where we reach enter_vhe(), we run with // At the point where we reach enter_vhe(), we run with
// the MMU off (which is enforced by mutate_to_vhe()). // the MMU off (which is enforced by __finalise_el2()).
// We thus need to be in the idmap, or everything will // We thus need to be in the idmap, or everything will
// explode when enabling the MMU. // explode when enabling the MMU.
...@@ -222,11 +222,11 @@ SYM_FUNC_START(__hyp_reset_vectors) ...@@ -222,11 +222,11 @@ SYM_FUNC_START(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors) SYM_FUNC_END(__hyp_reset_vectors)
/* /*
* Entry point to switch to VHE if deemed capable * Entry point to finalise EL2 and switch to VHE if deemed capable
* *
* w0: boot mode, as returned by init_kernel_el() * w0: boot mode, as returned by init_kernel_el()
*/ */
SYM_FUNC_START(switch_to_vhe) SYM_FUNC_START(finalise_el2)
// Need to have booted at EL2 // Need to have booted at EL2
cmp w0, #BOOT_CPU_MODE_EL2 cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f b.ne 1f
...@@ -236,9 +236,8 @@ SYM_FUNC_START(switch_to_vhe) ...@@ -236,9 +236,8 @@ SYM_FUNC_START(switch_to_vhe)
cmp x0, #CurrentEL_EL1 cmp x0, #CurrentEL_EL1
b.ne 1f b.ne 1f
// Turn the world upside down mov x0, #HVC_FINALISE_EL2
mov x0, #HVC_VHE_RESTART
hvc #0 hvc #0
1: 1:
ret ret
SYM_FUNC_END(switch_to_vhe) SYM_FUNC_END(finalise_el2)
...@@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter) ...@@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume) SYM_CODE_START(cpu_resume)
bl init_kernel_el bl init_kernel_el
bl switch_to_vhe bl finalise_el2
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment