Commit c3c6c9fc authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Sean Christopherson

KVM: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c

Rename mmu->sync_page to mmu->sync_spte and move the code out
of FNAME(sync_page)'s loop body into mmu.c.

No functionalities change intended.
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Link: https://lore.kernel.org/r/20230216154115.710033-6-jiangshanlai@gmail.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 8ef228c2
...@@ -439,8 +439,8 @@ struct kvm_mmu { ...@@ -439,8 +439,8 @@ struct kvm_mmu {
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t gva_or_gpa, u64 access, gpa_t gva_or_gpa, u64 access,
struct x86_exception *exception); struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu, int (*sync_spte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp); struct kvm_mmu_page *sp, int i);
void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa);
struct kvm_mmu_root_info root; struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role; union kvm_cpu_role cpu_role;
......
...@@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
* differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
* reserved bits checks will be wrong, etc... * reserved bits checks will be wrong, etc...
*/ */
if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page || if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
(sp->role.word ^ root_role.word) & ~sync_role_ign.word)) (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
return false; return false;
...@@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
int flush = 0;
int i;
if (!kvm_sync_page_check(vcpu, sp)) if (!kvm_sync_page_check(vcpu, sp))
return -1; return -1;
return vcpu->arch.mmu->sync_page(vcpu, sp); for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i);
if (ret < -1)
return -1;
flush |= ret;
}
/*
* Note, any flush is purely for KVM's correctness, e.g. when dropping
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
* unmap or dirty logging event doesn't fail to flush. The guest is
* responsible for flushing the TLB to ensure any changes in protection
* bits are recognized, i.e. until the guest flushes or page faults on
* a relevant address, KVM is architecturally allowed to let vCPUs use
* cached translations with the old protection bits.
*/
return flush;
} }
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
...@@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context) ...@@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context)
{ {
context->page_fault = nonpaging_page_fault; context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa; context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = NULL; context->sync_spte = NULL;
context->invlpg = NULL; context->invlpg = NULL;
} }
...@@ -5095,7 +5115,7 @@ static void paging64_init_context(struct kvm_mmu *context) ...@@ -5095,7 +5115,7 @@ static void paging64_init_context(struct kvm_mmu *context)
{ {
context->page_fault = paging64_page_fault; context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa; context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page; context->sync_spte = paging64_sync_spte;
context->invlpg = paging64_invlpg; context->invlpg = paging64_invlpg;
} }
...@@ -5103,7 +5123,7 @@ static void paging32_init_context(struct kvm_mmu *context) ...@@ -5103,7 +5123,7 @@ static void paging32_init_context(struct kvm_mmu *context)
{ {
context->page_fault = paging32_page_fault; context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa; context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page; context->sync_spte = paging32_sync_spte;
context->invlpg = paging32_invlpg; context->invlpg = paging32_invlpg;
} }
...@@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, ...@@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
context->cpu_role.as_u64 = cpu_role.as_u64; context->cpu_role.as_u64 = cpu_role.as_u64;
context->root_role.word = root_role.word; context->root_role.word = root_role.word;
context->page_fault = kvm_tdp_page_fault; context->page_fault = kvm_tdp_page_fault;
context->sync_page = NULL; context->sync_spte = NULL;
context->invlpg = NULL; context->invlpg = NULL;
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
...@@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->page_fault = ept_page_fault; context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa; context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page; context->sync_spte = ept_sync_spte;
context->invlpg = ept_invlpg; context->invlpg = ept_invlpg;
update_permission_bitmask(context, true); update_permission_bitmask(context, true);
......
...@@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* can't change unless all sptes pointing to it are nuked first. * can't change unless all sptes pointing to it are nuked first.
* *
* Returns * Returns
* < 0: the sp should be zapped * < 0: failed to sync spte
* 0: the sp is synced and no tlb flushing is required * 0: the spte is synced and no tlb flushing is required
* > 0: the sp is synced and tlb flushing is required * > 0: the spte is synced and tlb flushing is required
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{ {
int i;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
bool flush = false; u64 *sptep, spte;
struct kvm_memory_slot *slot;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); unsigned pte_access;
pt_element_t gpte;
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { gpa_t pte_gpa;
u64 *sptep, spte; gfn_t gfn;
struct kvm_memory_slot *slot;
unsigned pte_access;
pt_element_t gpte;
gpa_t pte_gpa;
gfn_t gfn;
if (!sp->spt[i])
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); if (!sp->spt[i])
return 0;
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
sizeof(pt_element_t))) pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
return -1;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
flush = true; sizeof(pt_element_t)))
continue; return -1;
}
gfn = gpte_to_gfn(gpte); if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
pte_access = sp->role.access; return 1;
pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) gfn = gpte_to_gfn(gpte);
continue; pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
/* if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
* Drop the SPTE if the new protections would result in a RWX=0 return 0;
* SPTE or if the gfn is changing. The RWX=0 case only affects
* EPT with execute-only support, i.e. EPT without an effective
* "present" bit, as all other paging modes will create a
* read-only SPTE if pte_access is zero.
*/
if ((!pte_access && !shadow_present_mask) ||
gfn != kvm_mmu_page_get_gfn(sp, i)) {
drop_spte(vcpu->kvm, &sp->spt[i]);
flush = true;
continue;
}
/* Update the shadowed access bits in case they changed. */ /*
kvm_mmu_page_set_access(sp, i, pte_access); * Drop the SPTE if the new protections would result in a RWX=0
* SPTE or if the gfn is changing. The RWX=0 case only affects
* EPT with execute-only support, i.e. EPT without an effective
* "present" bit, as all other paging modes will create a
* read-only SPTE if pte_access is zero.
*/
if ((!pte_access && !shadow_present_mask) ||
gfn != kvm_mmu_page_get_gfn(sp, i)) {
drop_spte(vcpu->kvm, &sp->spt[i]);
return 1;
}
sptep = &sp->spt[i]; /* Update the shadowed access bits in case they changed. */
spte = *sptep; kvm_mmu_page_set_access(sp, i, pte_access);
host_writable = spte & shadow_host_writable_mask;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
spte_to_pfn(spte), spte, true, false,
host_writable, &spte);
flush |= mmu_spte_update(sptep, spte); sptep = &sp->spt[i];
} spte = *sptep;
host_writable = spte & shadow_host_writable_mask;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
spte_to_pfn(spte), spte, true, false,
host_writable, &spte);
/* return mmu_spte_update(sptep, spte);
* Note, any flush is purely for KVM's correctness, e.g. when dropping
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
* unmap or dirty logging event doesn't fail to flush. The guest is
* responsible for flushing the TLB to ensure any changes in protection
* bits are recognized, i.e. until the guest flushes or page faults on
* a relevant address, KVM is architecturally allowed to let vCPUs use
* cached translations with the old protection bits.
*/
return flush;
} }
#undef pt_element_t #undef pt_element_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment