Commit cf48f9e2 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Rename rmap_write_protect() to kvm_vcpu_write_protect_gfn()

rmap_write_protect() is a poor name because it also write-protects SPTEs
in the TDP MMU, not just SPTEs in the rmap. It is also confusing that
rmap_write_protect() is not a simple wrapper around
__rmap_write_protect(), since that is the common pattern for functions
with double-underscore names.

Rename rmap_write_protect() to kvm_vcpu_write_protect_gfn() to convey
that KVM is write-protecting a specific gfn in the context of a vCPU.

No functional change intended.
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220119230739.2234394-2-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 413af660
...@@ -1419,7 +1419,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, ...@@ -1419,7 +1419,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
return write_protected; return write_protected;
} }
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
...@@ -2022,7 +2022,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2022,7 +2022,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
bool protected = false; bool protected = false;
for_each_sp(pages, sp, parents, i) for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu, sp->gfn); protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
if (protected) { if (protected) {
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true); kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
...@@ -2147,7 +2147,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2147,7 +2147,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_add_head(&sp->hash_link, sp_list); hlist_add_head(&sp->hash_link, sp_list);
if (!direct) { if (!direct) {
account_shadowed(vcpu->kvm, sp); account_shadowed(vcpu->kvm, sp);
if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn)) if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
} }
trace_kvm_mmu_get_page(sp, true); trace_kvm_mmu_get_page(sp, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment