Commit fa1de2bf authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: add missing reserved bits check in speculative path

In the speculative path, we should check guest pte's reserved bits just as
the real processor does
Reported-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6e3e243c
...@@ -2697,6 +2697,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, ...@@ -2697,6 +2697,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
return; return;
} }
if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
return;
++vcpu->kvm->stat.mmu_pte_updated; ++vcpu->kvm->stat.mmu_pte_updated;
if (!sp->role.cr4_pae) if (!sp->role.cr4_pae)
paging32_update_pte(vcpu, sp, spte, new); paging32_update_pte(vcpu, sp, spte, new);
...@@ -2775,6 +2778,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -2775,6 +2778,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
bool guest_initiated) bool guest_initiated)
{ {
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct hlist_node *node; struct hlist_node *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
...@@ -2849,6 +2853,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -2849,6 +2853,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
} }
} }
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
pte_size = sp->role.cr4_pae ? 8 : 4; pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
...@@ -2896,7 +2901,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -2896,7 +2901,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
while (npte--) { while (npte--) {
entry = *spte; entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte); mmu_pte_write_zap_pte(vcpu, sp, spte);
if (gentry) if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (!remote_flush && need_remote_flush(entry, *spte)) if (!remote_flush && need_remote_flush(entry, *spte))
remote_flush = true; remote_flush = true;
......
...@@ -638,8 +638,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -638,8 +638,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return -EINVAL; return -EINVAL;
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
if (gfn != sp->gfns[i] || if (is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL)
!is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { || gfn != sp->gfns[i] || !is_present_gpte(gpte)
|| !(gpte & PT_ACCESSED_MASK)) {
u64 nonpresent; u64 nonpresent;
if (is_present_gpte(gpte) || !clear_unsync) if (is_present_gpte(gpte) || !clear_unsync)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment