Commit 7e80f637 authored by Marc Zyngier's avatar Marc Zyngier

arm64: KVM: Move stashing of x0/x1 into the vector code itself

All our useful entry points into the hypervisor are starting by
saving x0 and x1 on the stack. Let's move those into the vectors
by introducing macros that annotate whether a vector is valid or
not, thus indicating whether we want to stash registers or not.

The only drawback is that we now also stash registers for el2_error,
but this should never happen, and we pop them back right at the
start of the handling sequence.
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 3c5e8123
...@@ -55,7 +55,6 @@ ENTRY(__vhe_hyp_call) ...@@ -55,7 +55,6 @@ ENTRY(__vhe_hyp_call)
ENDPROC(__vhe_hyp_call) ENDPROC(__vhe_hyp_call)
el1_sync: // Guest trapped into EL2 el1_sync: // Guest trapped into EL2
stp x0, x1, [sp, #-16]!
mrs x0, esr_el2 mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT lsr x0, x0, #ESR_ELx_EC_SHIFT
...@@ -136,18 +135,18 @@ alternative_else_nop_endif ...@@ -136,18 +135,18 @@ alternative_else_nop_endif
b __guest_exit b __guest_exit
el1_irq: el1_irq:
stp x0, x1, [sp, #-16]!
get_vcpu_ptr x1, x0 get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IRQ mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit b __guest_exit
el1_error: el1_error:
stp x0, x1, [sp, #-16]!
get_vcpu_ptr x1, x0 get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_EL1_SERROR mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit b __guest_exit
el2_error: el2_error:
ldp x0, x1, [sp], #16
/* /*
* Only two possibilities: * Only two possibilities:
* 1) Either we come from the exit path, having just unmasked * 1) Either we come from the exit path, having just unmasked
...@@ -198,32 +197,41 @@ ENDPROC(\label) ...@@ -198,32 +197,41 @@ ENDPROC(\label)
invalid_vector el2h_sync_invalid invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid invalid_vector el2h_fiq_invalid
invalid_vector el1_sync_invalid
invalid_vector el1_irq_invalid
invalid_vector el1_fiq_invalid invalid_vector el1_fiq_invalid
.ltorg .ltorg
.align 11 .align 11
.macro valid_vect target
.align 7
stp x0, x1, [sp, #-16]!
b \target
.endm
.macro invalid_vect target
.align 7
b \target
.endm
ENTRY(__kvm_hyp_vector) ENTRY(__kvm_hyp_vector)
ventry el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_sync_invalid // Synchronous EL2t
ventry el2t_irq_invalid // IRQ EL2t invalid_vect el2t_irq_invalid // IRQ EL2t
ventry el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t
ventry el2t_error_invalid // Error EL2t invalid_vect el2t_error_invalid // Error EL2t
ventry el2h_sync_invalid // Synchronous EL2h invalid_vect el2h_sync_invalid // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h invalid_vect el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h
ventry el2_error // Error EL2h valid_vect el2_error // Error EL2h
ventry el1_sync // Synchronous 64-bit EL1 valid_vect el1_sync // Synchronous 64-bit EL1
ventry el1_irq // IRQ 64-bit EL1 valid_vect el1_irq // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
ventry el1_error // Error 64-bit EL1 valid_vect el1_error // Error 64-bit EL1
ventry el1_sync // Synchronous 32-bit EL1 valid_vect el1_sync // Synchronous 32-bit EL1
ventry el1_irq // IRQ 32-bit EL1 valid_vect el1_irq // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector) ENDPROC(__kvm_hyp_vector)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment