Commit fd6d53b1 authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Use decrementer to wake napping threads

This arranges for threads that are napping due to their vcpu having
ceded or due to not having a vcpu to wake up at the end of the guest's
timeslice without having to be poked with an IPI.  We do that by
arranging for the decrementer to contain a value no greater than the
number of timebase ticks remaining until the end of the timeslice.
In the case of a thread with no vcpu, this number is in the hypervisor
decrementer already.  In the case of a ceded vcpu, we use the smaller
of the HDEC value and the DEC value.

Using the DEC like this when ceded means we need to save and restore
the guest decrementer value around the nap.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent ccc07772
...@@ -172,6 +172,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -172,6 +172,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3
/* set our bit in napping_threads */ /* set our bit in napping_threads */
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)
lbz r7, HSTATE_PTID(r13) lbz r7, HSTATE_PTID(r13)
...@@ -223,6 +226,12 @@ kvm_novcpu_wakeup: ...@@ -223,6 +226,12 @@ kvm_novcpu_wakeup:
cmpdi r3, 0 cmpdi r3, 0
bge kvm_novcpu_exit bge kvm_novcpu_exit
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r0, 0
blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0 cmpdi r4, 0
...@@ -1493,10 +1502,10 @@ kvmhv_do_exit: /* r12 = trap, r13 = paca */ ...@@ -1493,10 +1502,10 @@ kvmhv_do_exit: /* r12 = trap, r13 = paca */
cmpwi r3,0x100 /* Are we the first here? */ cmpwi r3,0x100 /* Are we the first here? */
bge 43f bge 43f
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
beq 40f beq 43f
li r0,0 li r0,0
mtspr SPRN_HDEC,r0 mtspr SPRN_HDEC,r0
40:
/* /*
* Send an IPI to any napping threads, since an HDEC interrupt * Send an IPI to any napping threads, since an HDEC interrupt
* doesn't wake CPUs up from nap. * doesn't wake CPUs up from nap.
...@@ -2124,6 +2133,27 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ ...@@ -2124,6 +2133,27 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
/* save FP state */ /* save FP state */
bl kvmppc_save_fp bl kvmppc_save_fp
/*
* Set DEC to the smaller of DEC and HDEC, so that we wake
* no later than the end of our timeslice (HDEC interrupts
* don't wake us from nap).
*/
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
cmpw r3, r4
ble 67f
mtspr SPRN_DEC, r4
67:
/* save expiry time of guest decrementer */
extsw r3, r3
add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_TB_OFFSET(r5)
subf r3, r6, r3 /* convert to host TB value */
std r3, VCPU_DEC_EXPIRES(r4)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
addi r3, r4, VCPU_TB_CEDE addi r3, r4, VCPU_TB_CEDE
...@@ -2181,6 +2211,15 @@ kvm_end_cede: ...@@ -2181,6 +2211,15 @@ kvm_end_cede:
/* load up FP state */ /* load up FP state */
bl kvmppc_load_fp bl kvmppc_load_fp
/* Restore guest decrementer */
ld r3, VCPU_DEC_EXPIRES(r4)
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_TB_OFFSET(r5)
add r3, r3, r6 /* convert host TB to guest TB value */
mftb r7
subf r3, r7, r3
mtspr SPRN_DEC, r3
/* Load NV GPRS */ /* Load NV GPRS */
ld r14, VCPU_GPR(R14)(r4) ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(R15)(r4) ld r15, VCPU_GPR(R15)(r4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment