Commit 5b7e0102 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Replace role.glevels with role.cr4_pae

There is no real distinction between glevels=3 and glevels=4; both have
exactly the same format and the code is treated exactly the same way.  Drop
role.glevels and replace is with role.cr4_pae (which is meaningful).  This
simplifies the code a bit.

As a side effect, it allows sharing shadow page tables between pae and
longmode guest page tables at the same guest page.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent e269fb21
...@@ -171,8 +171,8 @@ struct kvm_pte_chain { ...@@ -171,8 +171,8 @@ struct kvm_pte_chain {
union kvm_mmu_page_role { union kvm_mmu_page_role {
unsigned word; unsigned word;
struct { struct {
unsigned glevels:4;
unsigned level:4; unsigned level:4;
unsigned cr4_pae:1;
unsigned quadrant:2; unsigned quadrant:2;
unsigned pad_for_nice_hex_output:6; unsigned pad_for_nice_hex_output:6;
unsigned direct:1; unsigned direct:1;
......
...@@ -1206,7 +1206,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); ...@@ -1206,7 +1206,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
if (sp->role.glevels != vcpu->arch.mmu.root_level) { if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_zap_page(vcpu->kvm, sp); kvm_mmu_zap_page(vcpu->kvm, sp);
return 1; return 1;
} }
...@@ -1329,7 +1329,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1329,7 +1329,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.level = level; role.level = level;
role.direct = direct; role.direct = direct;
if (role.direct) if (role.direct)
role.glevels = 0; role.cr4_pae = 0;
role.access = access; role.access = access;
if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
...@@ -2443,7 +2443,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu) ...@@ -2443,7 +2443,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
else else
r = paging32_init_context(vcpu); r = paging32_init_context(vcpu);
vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
return r; return r;
} }
...@@ -2532,7 +2532,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, ...@@ -2532,7 +2532,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
} }
++vcpu->kvm->stat.mmu_pte_updated; ++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL) if (!sp->role.cr4_pae)
paging32_update_pte(vcpu, sp, spte, new); paging32_update_pte(vcpu, sp, spte, new);
else else
paging64_update_pte(vcpu, sp, spte, new); paging64_update_pte(vcpu, sp, spte, new);
...@@ -2681,7 +2681,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -2681,7 +2681,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
continue; continue;
pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4; misaligned |= bytes < 4;
if (misaligned || flooded) { if (misaligned || flooded) {
...@@ -2705,7 +2705,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -2705,7 +2705,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
page_offset = offset; page_offset = offset;
level = sp->role.level; level = sp->role.level;
npte = 1; npte = 1;
if (sp->role.glevels == PT32_ROOT_LEVEL) { if (!sp->role.cr4_pae) {
page_offset <<= 1; /* 32->64 */ page_offset <<= 1; /* 32->64 */
/* /*
* A 32-bit pde maps 4MB while the shadow pdes map * A 32-bit pde maps 4MB while the shadow pdes map
......
...@@ -28,9 +28,10 @@ ...@@ -28,9 +28,10 @@
\ \
role.word = __entry->role; \ role.word = __entry->role; \
\ \
trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \ trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s %spge" \
" %snxe root %u %s%c", \ " %snxe root %u %s%c", \
__entry->gfn, role.level, role.glevels, \ __entry->gfn, role.level, \
role.cr4_pae ? " pae" : "", \
role.quadrant, \ role.quadrant, \
role.direct ? " direct" : "", \ role.direct ? " direct" : "", \
access_str[role.access], \ access_str[role.access], \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment