Commit 0f53b5b1 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Marcelo Tosatti

KVM: MMU: cleanup pte write path

This patch does:
- call vcpu->arch.mmu.update_pte directly
- use gfn_to_pfn_atomic in update_pte path

The suggestion is from Avi.
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 5d163b1c
...@@ -255,6 +255,8 @@ struct kvm_mmu { ...@@ -255,6 +255,8 @@ struct kvm_mmu {
int (*sync_page)(struct kvm_vcpu *vcpu, int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp); struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte, unsigned long mmu_seq);
hpa_t root_hpa; hpa_t root_hpa;
int root_level; int root_level;
int shadow_root_level; int shadow_root_level;
...@@ -335,11 +337,6 @@ struct kvm_vcpu_arch { ...@@ -335,11 +337,6 @@ struct kvm_vcpu_arch {
u64 *last_pte_updated; u64 *last_pte_updated;
gfn_t last_pte_gfn; gfn_t last_pte_gfn;
struct {
pfn_t pfn; /* pfn corresponding to that gfn */
unsigned long mmu_seq;
} update_pte;
struct fpu guest_fpu; struct fpu guest_fpu;
u64 xcr0; u64 xcr0;
......
...@@ -1204,6 +1204,13 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1204,6 +1204,13 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ {
} }
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *pte, unsigned long mmu_seq)
{
WARN_ON(1);
}
#define KVM_PAGE_ARRAY_NR 16 #define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages { struct kvm_mmu_pages {
...@@ -2796,6 +2803,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -2796,6 +2803,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->prefetch_page = nonpaging_prefetch_page; context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page; context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg; context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->root_level = 0; context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
...@@ -2925,6 +2933,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, ...@@ -2925,6 +2933,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->prefetch_page = paging64_prefetch_page; context->prefetch_page = paging64_prefetch_page;
context->sync_page = paging64_sync_page; context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg; context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->free = paging_free; context->free = paging_free;
context->root_level = level; context->root_level = level;
context->shadow_root_level = level; context->shadow_root_level = level;
...@@ -2953,6 +2962,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, ...@@ -2953,6 +2962,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->prefetch_page = paging32_prefetch_page; context->prefetch_page = paging32_prefetch_page;
context->sync_page = paging32_sync_page; context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg; context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->root_level = PT32_ROOT_LEVEL; context->root_level = PT32_ROOT_LEVEL;
context->shadow_root_level = PT32E_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
...@@ -2977,6 +2987,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -2977,6 +2987,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->prefetch_page = nonpaging_prefetch_page; context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page; context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg; context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level(); context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = true; context->direct_map = true;
...@@ -3081,8 +3092,6 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -3081,8 +3092,6 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
static int init_kvm_mmu(struct kvm_vcpu *vcpu) static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.update_pte.pfn = bad_pfn;
if (mmu_is_nested(vcpu)) if (mmu_is_nested(vcpu))
return init_kvm_nested_mmu(vcpu); return init_kvm_nested_mmu(vcpu);
else if (tdp_enabled) else if (tdp_enabled)
...@@ -3156,7 +3165,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, ...@@ -3156,7 +3165,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, struct kvm_mmu_page *sp,
u64 *spte, u64 *spte,
const void *new) const void *new, unsigned long mmu_seq)
{ {
if (sp->role.level != PT_PAGE_TABLE_LEVEL) { if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped; ++vcpu->kvm->stat.mmu_pde_zapped;
...@@ -3164,10 +3173,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, ...@@ -3164,10 +3173,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
} }
++vcpu->kvm->stat.mmu_pte_updated; ++vcpu->kvm->stat.mmu_pte_updated;
if (!sp->role.cr4_pae) vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq);
paging32_update_pte(vcpu, sp, spte, new);
else
paging64_update_pte(vcpu, sp, spte, new);
} }
static bool need_remote_flush(u64 old, u64 new) static bool need_remote_flush(u64 old, u64 new)
...@@ -3202,27 +3208,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) ...@@ -3202,27 +3208,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
return !!(spte && (*spte & shadow_accessed_mask)); return !!(spte && (*spte & shadow_accessed_mask));
} }
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 gpte)
{
gfn_t gfn;
pfn_t pfn;
if (!is_present_gpte(gpte))
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
vcpu->arch.update_pte.pfn = pfn;
}
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
u64 *spte = vcpu->arch.last_pte_updated; u64 *spte = vcpu->arch.last_pte_updated;
...@@ -3244,21 +3229,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -3244,21 +3229,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct hlist_node *node; struct hlist_node *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
u64 entry, gentry; unsigned long mmu_seq;
u64 *spte; u64 entry, gentry, *spte;
unsigned offset = offset_in_page(gpa); unsigned pte_size, page_offset, misaligned, quadrant, offset;
unsigned pte_size; int level, npte, invlpg_counter, r, flooded = 0;
unsigned page_offset;
unsigned misaligned;
unsigned quadrant;
int level;
int flooded = 0;
int npte;
int r;
int invlpg_counter;
bool remote_flush, local_flush, zap_page; bool remote_flush, local_flush, zap_page;
zap_page = remote_flush = local_flush = false; zap_page = remote_flush = local_flush = false;
offset = offset_in_page(gpa);
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
...@@ -3293,7 +3271,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -3293,7 +3271,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
break; break;
} }
mmu_guess_page_from_pte_write(vcpu, gpa, gentry); mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
gentry = 0; gentry = 0;
...@@ -3365,7 +3345,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -3365,7 +3345,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
if (gentry && if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word) !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word)) & mask.word))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); mmu_pte_write_new_pte(vcpu, sp, spte, &gentry,
mmu_seq);
if (!remote_flush && need_remote_flush(entry, *spte)) if (!remote_flush && need_remote_flush(entry, *spte))
remote_flush = true; remote_flush = true;
++spte; ++spte;
...@@ -3375,10 +3356,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -3375,10 +3356,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
vcpu->arch.update_pte.pfn = bad_pfn;
}
} }
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
......
...@@ -325,7 +325,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, ...@@ -325,7 +325,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
} }
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte) u64 *spte, const void *pte, unsigned long mmu_seq)
{ {
pt_element_t gpte; pt_element_t gpte;
unsigned pte_access; unsigned pte_access;
...@@ -337,12 +337,14 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -337,12 +337,14 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
pfn = vcpu->arch.update_pte.pfn; pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (is_error_pfn(pfn)) if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return; return;
if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) }
if (mmu_notifier_retry(vcpu, mmu_seq))
return; return;
kvm_get_pfn(pfn);
/* /*
* we call mmu_set_spte() with host_writable = true beacuse that * we call mmu_set_spte() with host_writable = true beacuse that
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment