Commit 9a43c5d9 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: cleanup __kvm_sync_page and its callers

Calling kvm_unlink_unsync_page in the middle of __kvm_sync_page makes
things unnecessarily tricky.  If kvm_mmu_prepare_zap_page is called,
it will call kvm_unlink_unsync_page too.  So kvm_unlink_unsync_page can
be called just as well at the beginning or the end of __kvm_sync_page...
which means that we might do it in kvm_sync_page too and remove the
parameter.

kvm_sync_page ends up being the same code that kvm_sync_pages used
to have before the previous patch.
Reviewed-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent df748f86
...@@ -1917,16 +1917,13 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, ...@@ -1917,16 +1917,13 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
/* @sp->gfn should be write-protected at the call site */ /* @sp->gfn should be write-protected at the call site */
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list, bool clear_unsync) struct list_head *invalid_list)
{ {
if (sp->role.cr4_pae != !!is_pae(vcpu)) { if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return 1; return 1;
} }
if (clear_unsync)
kvm_unlink_unsync_page(vcpu->kvm, sp);
if (vcpu->arch.mmu.sync_page(vcpu, sp)) { if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return 1; return 1;
...@@ -1956,7 +1953,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, ...@@ -1956,7 +1953,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
int ret; int ret;
ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); ret = __kvm_sync_page(vcpu, sp, &invalid_list);
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret); kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret);
return ret; return ret;
...@@ -1972,7 +1969,8 @@ static void mmu_audit_disable(void) { } ...@@ -1972,7 +1969,8 @@ static void mmu_audit_disable(void) { }
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list) struct list_head *invalid_list)
{ {
return __kvm_sync_page(vcpu, sp, invalid_list, true); kvm_unlink_unsync_page(vcpu->kvm, sp);
return __kvm_sync_page(vcpu, sp, invalid_list);
} }
/* @gfn should be write-protected at the call site */ /* @gfn should be write-protected at the call site */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment