Commit 1313cc2b authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: mmu: Add guest_mode to kvm_mmu_page_role

L1 and L2 need to have disjoint mappings, so that L1's APIC access
page (under VMX) can be omitted from L2's mappings.
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ab5df31c
...@@ -258,7 +258,8 @@ union kvm_mmu_page_role { ...@@ -258,7 +258,8 @@ union kvm_mmu_page_role {
unsigned smep_andnot_wp:1; unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1; unsigned smap_andnot_wp:1;
unsigned ad_disabled:1; unsigned ad_disabled:1;
unsigned :7; unsigned guest_mode:1;
unsigned :6;
/* /*
* This is left at the top of the word so that * This is left at the top of the word so that
......
...@@ -4468,6 +4468,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4468,6 +4468,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = &vcpu->arch.mmu;
context->base_role.word = 0; context->base_role.word = 0;
context->base_role.guest_mode = is_guest_mode(vcpu);
context->base_role.smm = is_smm(vcpu); context->base_role.smm = is_smm(vcpu);
context->base_role.ad_disabled = (shadow_accessed_mask == 0); context->base_role.ad_disabled = (shadow_accessed_mask == 0);
context->page_fault = tdp_page_fault; context->page_fault = tdp_page_fault;
...@@ -4534,6 +4535,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) ...@@ -4534,6 +4535,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
= smep && !is_write_protection(vcpu); = smep && !is_write_protection(vcpu);
context->base_role.smap_andnot_wp context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu); = smap && !is_write_protection(vcpu);
context->base_role.guest_mode = is_guest_mode(vcpu);
context->base_role.smm = is_smm(vcpu); context->base_role.smm = is_smm(vcpu);
reset_shadow_zero_bits_mask(vcpu, context); reset_shadow_zero_bits_mask(vcpu, context);
} }
...@@ -4559,7 +4561,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -4559,7 +4561,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = false; context->direct_map = false;
context->base_role.ad_disabled = !accessed_dirty; context->base_role.ad_disabled = !accessed_dirty;
context->base_role.guest_mode = 1;
update_permission_bitmask(vcpu, context, true); update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context); update_last_nonleaf_level(vcpu, context);
...@@ -4820,6 +4822,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -4820,6 +4822,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mask.smep_andnot_wp = 1; mask.smep_andnot_wp = 1;
mask.smap_andnot_wp = 1; mask.smap_andnot_wp = 1;
mask.smm = 1; mask.smm = 1;
mask.guest_mode = 1;
mask.ad_disabled = 1; mask.ad_disabled = 1;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment