Commit 2d48a985 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: MMU: Track NX state in struct kvm_mmu

With Nested Paging emulation the NX state between the two
MMU contexts may differ. To make sure that always the right
fault error code is recorded this patch moves the NX state
into struct kvm_mmu so that the code can distinguish between
L1 and L2 NX state.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 81407ca5
...@@ -259,6 +259,8 @@ struct kvm_mmu { ...@@ -259,6 +259,8 @@ struct kvm_mmu {
u64 *lm_root; u64 *lm_root;
u64 rsvd_bits_mask[2][4]; u64 rsvd_bits_mask[2][4];
bool nx;
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
}; };
......
...@@ -2634,6 +2634,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -2634,6 +2634,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->shadow_root_level = PT32E_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = true; context->direct_map = true;
context->nx = false;
return 0; return 0;
} }
...@@ -2687,7 +2688,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, ...@@ -2687,7 +2688,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
int maxphyaddr = cpuid_maxphyaddr(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu);
u64 exb_bit_rsvd = 0; u64 exb_bit_rsvd = 0;
if (!is_nx(vcpu)) if (!context->nx)
exb_bit_rsvd = rsvd_bits(63, 63); exb_bit_rsvd = rsvd_bits(63, 63);
switch (level) { switch (level) {
case PT32_ROOT_LEVEL: case PT32_ROOT_LEVEL:
...@@ -2746,6 +2747,8 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, ...@@ -2746,6 +2747,8 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, struct kvm_mmu *context,
int level) int level)
{ {
context->nx = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, context, level); reset_rsvds_bits_mask(vcpu, context, level);
ASSERT(is_pae(vcpu)); ASSERT(is_pae(vcpu));
...@@ -2772,6 +2775,8 @@ static int paging64_init_context(struct kvm_vcpu *vcpu, ...@@ -2772,6 +2775,8 @@ static int paging64_init_context(struct kvm_vcpu *vcpu,
static int paging32_init_context(struct kvm_vcpu *vcpu, static int paging32_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
context->nx = false;
reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
context->new_cr3 = paging_new_cr3; context->new_cr3 = paging_new_cr3;
...@@ -2810,19 +2815,24 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -2810,19 +2815,24 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->set_cr3 = kvm_x86_ops->set_tdp_cr3; context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
context->get_cr3 = get_cr3; context->get_cr3 = get_cr3;
context->inject_page_fault = kvm_inject_page_fault; context->inject_page_fault = kvm_inject_page_fault;
context->nx = is_nx(vcpu);
if (!is_paging(vcpu)) { if (!is_paging(vcpu)) {
context->nx = false;
context->gva_to_gpa = nonpaging_gva_to_gpa; context->gva_to_gpa = nonpaging_gva_to_gpa;
context->root_level = 0; context->root_level = 0;
} else if (is_long_mode(vcpu)) { } else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
context->gva_to_gpa = paging64_gva_to_gpa; context->gva_to_gpa = paging64_gva_to_gpa;
context->root_level = PT64_ROOT_LEVEL; context->root_level = PT64_ROOT_LEVEL;
} else if (is_pae(vcpu)) { } else if (is_pae(vcpu)) {
context->nx = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
context->gva_to_gpa = paging64_gva_to_gpa; context->gva_to_gpa = paging64_gva_to_gpa;
context->root_level = PT32E_ROOT_LEVEL; context->root_level = PT32E_ROOT_LEVEL;
} else { } else {
context->nx = false;
reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
context->gva_to_gpa = paging32_gva_to_gpa; context->gva_to_gpa = paging32_gva_to_gpa;
context->root_level = PT32_ROOT_LEVEL; context->root_level = PT32_ROOT_LEVEL;
...@@ -2878,17 +2888,21 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -2878,17 +2888,21 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
* functions between mmu and nested_mmu are swapped. * functions between mmu and nested_mmu are swapped.
*/ */
if (!is_paging(vcpu)) { if (!is_paging(vcpu)) {
g_context->nx = false;
g_context->root_level = 0; g_context->root_level = 0;
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
} else if (is_long_mode(vcpu)) { } else if (is_long_mode(vcpu)) {
g_context->nx = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
g_context->root_level = PT64_ROOT_LEVEL; g_context->root_level = PT64_ROOT_LEVEL;
g_context->gva_to_gpa = paging64_gva_to_gpa_nested; g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else if (is_pae(vcpu)) { } else if (is_pae(vcpu)) {
g_context->nx = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
g_context->root_level = PT32E_ROOT_LEVEL; g_context->root_level = PT32E_ROOT_LEVEL;
g_context->gva_to_gpa = paging64_gva_to_gpa_nested; g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else { } else {
g_context->nx = false;
reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
g_context->root_level = PT32_ROOT_LEVEL; g_context->root_level = PT32_ROOT_LEVEL;
g_context->gva_to_gpa = paging32_gva_to_gpa_nested; g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
......
...@@ -105,7 +105,7 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) ...@@ -105,7 +105,7 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
#if PTTYPE == 64 #if PTTYPE == 64
if (is_nx(vcpu)) if (vcpu->arch.mmu.nx)
access &= ~(gpte >> PT64_NX_SHIFT); access &= ~(gpte >> PT64_NX_SHIFT);
#endif #endif
return access; return access;
...@@ -272,7 +272,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -272,7 +272,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
walker->error_code |= PFERR_WRITE_MASK; walker->error_code |= PFERR_WRITE_MASK;
if (user_fault) if (user_fault)
walker->error_code |= PFERR_USER_MASK; walker->error_code |= PFERR_USER_MASK;
if (fetch_fault && is_nx(vcpu)) if (fetch_fault && mmu->nx)
walker->error_code |= PFERR_FETCH_MASK; walker->error_code |= PFERR_FETCH_MASK;
if (rsvd_fault) if (rsvd_fault)
walker->error_code |= PFERR_RSVD_MASK; walker->error_code |= PFERR_RSVD_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment