Commit 27025a60 authored by Liu Ping Fan's avatar Liu Ping Fan Committed by Alexander Graf

powerpc: kvm: optimize "sc 1" as fast return

In some scene, e.g openstack CI, PR guest can trigger "sc 1" frequently,
this patch optimizes the path by directly delivering BOOK3S_INTERRUPT_SYSCALL
to HV guest, so powernv can return to HV guest without heavy exit, i.e,
no need to swap TLB, HTAB,.. etc
Signed-off-by: default avatarLiu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 8a3caa6d
...@@ -669,12 +669,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -669,12 +669,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* hcall - punt to userspace */ /* hcall - punt to userspace */
int i; int i;
if (vcpu->arch.shregs.msr & MSR_PR) { /* hypercall with MSR_PR has already been handled in rmode,
/* sc 1 from userspace - reflect to guest syscall */ * and never reaches here.
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); */
r = RESUME_GUEST;
break;
}
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i) for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
......
...@@ -686,6 +686,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -686,6 +686,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
5: mtspr SPRN_SRR0, r6 5: mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7 mtspr SPRN_SRR1, r7
/*
* Required state:
* R4 = vcpu
* R10: value for HSRR0
* R11: value for HSRR1
* R13 = PACA
*/
fast_guest_return: fast_guest_return:
li r0,0 li r0,0
stb r0,VCPU_CEDED(r4) /* cancel cede */ stb r0,VCPU_CEDED(r4) /* cancel cede */
...@@ -1471,7 +1478,8 @@ kvmppc_hisi: ...@@ -1471,7 +1478,8 @@ kvmppc_hisi:
hcall_try_real_mode: hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9) ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR andi. r0,r11,MSR_PR
bne guest_exit_cont /* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
clrrdi r3,r3,2 clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont bge guest_exit_cont
...@@ -1492,6 +1500,15 @@ hcall_try_real_mode: ...@@ -1492,6 +1500,15 @@ hcall_try_real_mode:
ld r11,VCPU_MSR(r4) ld r11,VCPU_MSR(r4)
b fast_guest_return b fast_guest_return
sc_1_fast_return:
mtspr SPRN_SRR0,r10
mtspr SPRN_SRR1,r11
li r10, BOOK3S_INTERRUPT_SYSCALL
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
mr r4,r9
b fast_guest_return
/* We've attempted a real mode hcall, but it's punted it back /* We've attempted a real mode hcall, but it's punted it back
* to userspace. We need to restore some clobbered volatiles * to userspace. We need to restore some clobbered volatiles
* before resuming the pass-it-to-qemu path */ * before resuming the pass-it-to-qemu path */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment