Commit 072df813 authored by Paul Mackerras's avatar Paul Mackerras

Merge branch 'kvm-ppc-fixes' into kvm-ppc-next

This merges in a couple of fixes from the kvm-ppc-fixes branch that
modify the same areas of code as some commits from the kvm-ppc-next
branch, in order to resolve the conflicts.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parents c0101509 38c53af8
...@@ -651,6 +651,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -651,6 +651,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r); hnow_r = hpte_new_to_old_r(hnow_r);
} }
/*
* If the HPT is being resized, don't update the HPTE,
* instead let the guest retry after the resize operation is complete.
* The synchronization for mmu_ready test vs. set is provided
* by the HPTE lock.
*/
if (!kvm->arch.mmu_ready)
goto out_unlock;
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2]) rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */ /* HPTE has been changed under us; let the guest retry */
......
...@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret; return ret;
dir = iommu_tce_direction(tce); dir = iommu_tce_direction(tce);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
return H_PARAMETER; ret = H_PARAMETER;
goto unlock_exit;
}
entry = ioba >> stt->page_shift; entry = ioba >> stt->page_shift;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE) { if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
stit->tbl, entry); stit->tbl, entry);
} else { else
idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
entry, ua, dir); entry, ua, dir);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
continue; continue;
if (ret == H_TOO_HARD) if (ret == H_TOO_HARD)
return ret; goto unlock_exit;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry); kvmppc_clear_tce(stit->tbl, entry);
...@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
kvmppc_tce_put(stt, entry, tce); kvmppc_tce_put(stt, entry, tce);
return H_SUCCESS; unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
......
...@@ -2717,11 +2717,13 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2717,11 +2717,13 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals. * Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up * If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s). * and return without going into the guest(s).
* If the mmu_ready flag has been cleared, don't go into the
* guest because that means a HPT resize operation is in progress.
*/ */
local_irq_disable(); local_irq_disable();
hard_irq_disable(); hard_irq_disable();
if (lazy_irq_pending() || need_resched() || if (lazy_irq_pending() || need_resched() ||
recheck_signals(&core_info)) { recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
local_irq_enable(); local_irq_enable();
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */ /* Unlock all except the primary vcore */
...@@ -3120,7 +3122,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -3120,7 +3122,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int n_ceded, i; int n_ceded, i, r;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
struct kvm_vcpu *v; struct kvm_vcpu *v;
...@@ -3174,6 +3176,30 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -3174,6 +3176,30 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) { !signal_pending(current)) {
/* See if the MMU is ready to go */
if (!vcpu->kvm->arch.mmu_ready) {
spin_unlock(&vc->lock);
mutex_lock(&vcpu->kvm->lock);
r = 0;
if (!vcpu->kvm->arch.mmu_ready) {
if (!kvm_is_radix(vcpu->kvm))
r = kvmppc_hv_setup_htab_rma(vcpu);
if (!r) {
if (cpu_has_feature(CPU_FTR_ARCH_300))
kvmppc_setup_partition_table(vcpu->kvm);
vcpu->kvm->arch.mmu_ready = 1;
}
}
mutex_unlock(&vcpu->kvm->lock);
spin_lock(&vc->lock);
if (r) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
}
}
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc); kvmppc_vcore_end_preempt(vc);
...@@ -3293,24 +3319,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3293,24 +3319,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb(); smp_mb();
/* On the first time here, set up MMU if necessary */
if (!vcpu->kvm->arch.mmu_ready) {
mutex_lock(&kvm->lock);
r = 0;
if (!kvm->arch.mmu_ready) {
if (!kvm_is_radix(vcpu->kvm))
r = kvmppc_hv_setup_htab_rma(vcpu);
if (!r) {
if (cpu_has_feature(CPU_FTR_ARCH_300))
kvmppc_setup_partition_table(kvm);
kvm->arch.mmu_ready = 1;
}
}
mutex_unlock(&kvm->lock);
if (r)
goto out;
}
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */ /* Save userspace EBB and other register values */
...@@ -3358,7 +3366,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3358,7 +3366,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
mtspr(SPRN_VRSAVE, user_vrsave); mtspr(SPRN_VRSAVE, user_vrsave);
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r; return r;
......
...@@ -1025,13 +1025,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ...@@ -1025,13 +1025,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
beq no_xive beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4) ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS li r9, TM_QW1_OS
stdcix r11,r9,r10
eieio eieio
stdcix r11,r9,r10
lwz r11, VCPU_XIVE_CAM_WORD(r4) lwz r11, VCPU_XIVE_CAM_WORD(r4)
li r9, TM_QW1_OS + TM_WORD2 li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10 stwcix r11,r9,r10
li r9, 1 li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4) stw r9, VCPU_XIVE_PUSHED(r4)
eieio
no_xive: no_xive:
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
...@@ -1346,6 +1347,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -1346,6 +1347,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 3f bne 3f
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
PPC_MSGSYNC PPC_MSGSYNC
lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13) lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0 cmpwi r0, 0
...@@ -1436,8 +1438,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ ...@@ -1436,8 +1438,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0 cmpldi cr0, r10, 0
beq 1f beq 1f
/* First load to pull the context, we ignore the value */ /* First load to pull the context, we ignore the value */
lwzx r11, r7, r10
eieio eieio
lwzx r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */ /* Second load to recover the context state (Words 0 and 1) */
ldx r11, r6, r10 ldx r11, r6, r10
b 3f b 3f
...@@ -1445,8 +1447,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ ...@@ -1445,8 +1447,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0 cmpldi cr0, r10, 0
beq 1f beq 1f
/* First load to pull the context, we ignore the value */ /* First load to pull the context, we ignore the value */
lwzcix r11, r7, r10
eieio eieio
lwzcix r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */ /* Second load to recover the context state (Words 0 and 1) */
ldcix r11, r6, r10 ldcix r11, r6, r10
3: std r11, VCPU_XIVE_SAVED_STATE(r9) 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
...@@ -1456,6 +1458,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ ...@@ -1456,6 +1458,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r10, VCPU_XIVE_PUSHED(r9) stw r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
eieio
1: 1:
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
/* Save more register state */ /* Save more register state */
...@@ -2838,6 +2841,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2838,6 +2841,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6) PPC_MSGCLR(6)
/* see if it's a host IPI */ /* see if it's a host IPI */
li r3, 1 li r3, 1
BEGIN_FTR_SECTION
PPC_MSGSYNC
lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13) lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0 cmpwi r0, 0
bnelr bnelr
......
...@@ -643,7 +643,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -643,7 +643,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
#endif #endif
case KVM_CAP_PPC_HTM: case KVM_CAP_PPC_HTM:
r = is_kvmppc_hv_enabled(kvm) && r = hv_enabled &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment