Commit a97a65d5 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S: 64-bit CONFIG_RELOCATABLE support for interrupts

64-bit Book3S exception handlers must find the dynamic kernel base
to add to the target address when branching beyond __end_interrupts,
in order to support kernel running at non-0 physical address.

Support this in KVM by branching with CTR, similarly to regular
interrupt handlers. The guest CTR saved in HSTATE_SCRATCH1 and
restored after the branch.

Without this, the host kernel hangs and crashes randomly when it is
running at a non-0 address and a KVM guest is started.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7ede5317
...@@ -97,6 +97,15 @@ ...@@ -97,6 +97,15 @@
ld reg,PACAKBASE(r13); \ ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l; ori reg,reg,(ABS_ADDR(label))@l;
/*
* Branches from unrelocated code (e.g., interrupts) to labels outside
* head-y require >64K offsets.
*/
#define __LOAD_FAR_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l; \
addis reg,reg,(ABS_ADDR(label))@h;
/* Exception register prefixes */ /* Exception register prefixes */
#define EXC_HV H #define EXC_HV H
#define EXC_STD #define EXC_STD
...@@ -227,12 +236,40 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -227,12 +236,40 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtctr reg; \ mtctr reg; \
bctr bctr
/*
* KVM requires __LOAD_FAR_HANDLER.
*
* __BRANCH_TO_KVM_EXIT branches are also a special case because they
* explicitly use r9 then reload it from PACA before branching. Hence
* the double-underscore.
*/
#define __BRANCH_TO_KVM_EXIT(area, label) \
mfctr r9; \
std r9,HSTATE_SCRATCH1(r13); \
__LOAD_FAR_HANDLER(r9, label); \
mtctr r9; \
ld r9,area+EX_R9(r13); \
bctr
#define BRANCH_TO_KVM(reg, label) \
__LOAD_FAR_HANDLER(reg, label); \
mtctr reg; \
bctr
#else #else
#define BRANCH_TO_COMMON(reg, label) \ #define BRANCH_TO_COMMON(reg, label) \
b label b label
#define BRANCH_TO_KVM(reg, label) \
b label
#define __BRANCH_TO_KVM_EXIT(area, label) \
ld r9,area+EX_R9(r13); \
b label
#endif #endif
#define __KVM_HANDLER(area, h, n) \ #define __KVM_HANDLER(area, h, n) \
BEGIN_FTR_SECTION_NESTED(947) \ BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \ ld r10,area+EX_CFAR(r13); \
...@@ -246,8 +283,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -246,8 +283,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
std r12,HSTATE_SCRATCH0(r13); \ std r12,HSTATE_SCRATCH0(r13); \
sldi r12,r9,32; \ sldi r12,r9,32; \
ori r12,r12,(n); \ ori r12,r12,(n); \
ld r9,area+EX_R9(r13); \ /* This reloads r9 before branching to kvmppc_interrupt */ \
b kvmppc_interrupt __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt)
#define __KVM_HANDLER_SKIP(area, h, n) \ #define __KVM_HANDLER_SKIP(area, h, n) \
cmpwi r10,KVM_GUEST_MODE_SKIP; \ cmpwi r10,KVM_GUEST_MODE_SKIP; \
...@@ -260,8 +297,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -260,8 +297,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
std r12,HSTATE_SCRATCH0(r13); \ std r12,HSTATE_SCRATCH0(r13); \
sldi r12,r9,32; \ sldi r12,r9,32; \
ori r12,r12,(n); \ ori r12,r12,(n); \
ld r9,area+EX_R9(r13); \ /* This reloads r9 before branching to kvmppc_interrupt */ \
b kvmppc_interrupt; \ __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt); \
89: mtocrf 0x80,r9; \ 89: mtocrf 0x80,r9; \
ld r9,area+EX_R9(r13); \ ld r9,area+EX_R9(r13); \
ld r10,area+EX_R10(r13); \ ld r10,area+EX_R10(r13); \
......
...@@ -142,7 +142,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -142,7 +142,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
lbz r0,HSTATE_HWTHREAD_REQ(r13) lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0 cmpwi r0,0
beq 1f beq 1f
b kvm_start_guest BRANCH_TO_KVM(r10, kvm_start_guest)
1: 1:
#endif #endif
......
...@@ -1060,15 +1060,16 @@ kvmppc_interrupt_hv: ...@@ -1060,15 +1060,16 @@ kvmppc_interrupt_hv:
* R12 = (guest CR << 32) | interrupt vector * R12 = (guest CR << 32) | interrupt vector
* R13 = PACA * R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0 * guest R12 saved in shadow VCPU SCRATCH0
* guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
* guest R13 saved in SPRN_SCRATCH0 * guest R13 saved in SPRN_SCRATCH0
*/ */
std r9, HSTATE_SCRATCH1(r13) std r9, HSTATE_SCRATCH2(r13)
lbz r9, HSTATE_IN_GUEST(r13) lbz r9, HSTATE_IN_GUEST(r13)
cmpwi r9, KVM_GUEST_MODE_HOST_HV cmpwi r9, KVM_GUEST_MODE_HOST_HV
beq kvmppc_bad_host_intr beq kvmppc_bad_host_intr
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
cmpwi r9, KVM_GUEST_MODE_GUEST cmpwi r9, KVM_GUEST_MODE_GUEST
ld r9, HSTATE_SCRATCH1(r13) ld r9, HSTATE_SCRATCH2(r13)
beq kvmppc_interrupt_pr beq kvmppc_interrupt_pr
#endif #endif
/* We're now back in the host but in guest MMU context */ /* We're now back in the host but in guest MMU context */
...@@ -1088,7 +1089,7 @@ kvmppc_interrupt_hv: ...@@ -1088,7 +1089,7 @@ kvmppc_interrupt_hv:
std r6, VCPU_GPR(R6)(r9) std r6, VCPU_GPR(R6)(r9)
std r7, VCPU_GPR(R7)(r9) std r7, VCPU_GPR(R7)(r9)
std r8, VCPU_GPR(R8)(r9) std r8, VCPU_GPR(R8)(r9)
ld r0, HSTATE_SCRATCH1(r13) ld r0, HSTATE_SCRATCH2(r13)
std r0, VCPU_GPR(R9)(r9) std r0, VCPU_GPR(R9)(r9)
std r10, VCPU_GPR(R10)(r9) std r10, VCPU_GPR(R10)(r9)
std r11, VCPU_GPR(R11)(r9) std r11, VCPU_GPR(R11)(r9)
...@@ -1151,7 +1152,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -1151,7 +1152,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
11: stw r3,VCPU_HEIR(r9) 11: stw r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */ /* these are volatile across C function calls */
#ifdef CONFIG_RELOCATABLE
ld r3, HSTATE_SCRATCH1(r13)
mtctr r3
#else
mfctr r3 mfctr r3
#endif
mfxer r4 mfxer r4
std r3, VCPU_CTR(r9) std r3, VCPU_CTR(r9)
std r4, VCPU_XER(r9) std r4, VCPU_XER(r9)
......
...@@ -175,9 +175,16 @@ kvmppc_interrupt_pr: ...@@ -175,9 +175,16 @@ kvmppc_interrupt_pr:
* R12 = (guest CR << 32) | exit handler id * R12 = (guest CR << 32) | exit handler id
* R13 = PACA * R13 = PACA
* HSTATE.SCRATCH0 = guest R12 * HSTATE.SCRATCH0 = guest R12
* HSTATE.SCRATCH1 = guest CTR if RELOCATABLE
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* Match 32-bit entry */ /* Match 32-bit entry */
#ifdef CONFIG_RELOCATABLE
std r9, HSTATE_SCRATCH2(r13)
ld r9, HSTATE_SCRATCH1(r13)
mtctr r9
ld r9, HSTATE_SCRATCH2(r13)
#endif
rotldi r12, r12, 32 /* Flip R12 halves for stw */ rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */ srdi r12, r12, 32 /* shift trap into low half */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment