Commit 5befdc38 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini

KVM: Simplify kvm->tlbs_dirty handling

When this was introduced, kvm_flush_remote_tlbs() could be called
without holding mmu_lock.  It is now acknowledged that the function
must be called before releasing mmu_lock, and all callers have already
been changed to do so.

There is no need to use smp_mb() and cmpxchg() any more.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f18eb31f
...@@ -913,7 +913,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -913,7 +913,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the * used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages. * freed pages.
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case. * We set tlbs_dirty to let the notifier know this change and delay the flush
* until such a case actually happens.
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
...@@ -942,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -942,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return -EINVAL; return -EINVAL;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++; vcpu->kvm->tlbs_dirty = true;
continue; continue;
} }
...@@ -957,7 +958,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -957,7 +958,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]); drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu->kvm->tlbs_dirty++; vcpu->kvm->tlbs_dirty = true;
continue; continue;
} }
......
...@@ -401,7 +401,9 @@ struct kvm { ...@@ -401,7 +401,9 @@ struct kvm {
unsigned long mmu_notifier_seq; unsigned long mmu_notifier_seq;
long mmu_notifier_count; long mmu_notifier_count;
#endif #endif
long tlbs_dirty; /* Protected by mmu_lock */
bool tlbs_dirty;
struct list_head devices; struct list_head devices;
}; };
......
...@@ -186,12 +186,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) ...@@ -186,12 +186,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
void kvm_flush_remote_tlbs(struct kvm *kvm) void kvm_flush_remote_tlbs(struct kvm *kvm)
{ {
long dirty_count = kvm->tlbs_dirty;
smp_mb();
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush; ++kvm->stat.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); kvm->tlbs_dirty = false;
} }
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment