Commit 2a1d7946 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fix from Paolo Bonzini:
 "Fix SRCU bug introduced in the merge window"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86/xen: Take srcu lock when accessing kvm_memslots()
parents 799bac55 9c1a0744
...@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_map map; struct kvm_host_map map;
struct kvm_steal_time *st; struct kvm_steal_time *st;
int idx;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
...@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted) if (vcpu->arch.st.preempted)
return; return;
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true)) &vcpu->arch.st.cache, true))
goto out; return;
st = map.hva + st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
...@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
out:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
int idx;
if (vcpu->preempted && !vcpu->arch.guest_state_protected) if (vcpu->preempted && !vcpu->arch.guest_state_protected)
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_xen_msr_enabled(vcpu->kvm)) if (kvm_xen_msr_enabled(vcpu->kvm))
kvm_xen_runstate_set_preempted(vcpu); kvm_xen_runstate_set_preempted(vcpu);
else else
kvm_steal_time_set_preempted(vcpu); kvm_steal_time_set_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
static_call(kvm_x86_vcpu_put)(vcpu); static_call(kvm_x86_vcpu_put)(vcpu);
vcpu->arch.last_host_tsc = rdtsc(); vcpu->arch.last_host_tsc = rdtsc();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment