Commit 6539e738 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: MMU: Implement nested gva_to_gpa functions

This patch adds the functions to do a nested l2_gva to
l1_gpa page table walk.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 14dfe855
...@@ -295,6 +295,16 @@ struct kvm_vcpu_arch { ...@@ -295,6 +295,16 @@ struct kvm_vcpu_arch {
*/ */
struct kvm_mmu mmu; struct kvm_mmu mmu;
/*
* Paging state of an L2 guest (used for nested npt)
*
* This context will save all necessary information to walk page tables
* of the an L2 guest. This context is only initialized for page table
* walking and not for faulting since we never handle l2 page faults on
* the host.
*/
struct kvm_mmu nested_mmu;
/* /*
* Pointer to the mmu context currently used for * Pointer to the mmu context currently used for
* gva_to_gpa translations. * gva_to_gpa translations.
......
...@@ -2466,6 +2466,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -2466,6 +2466,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
return vaddr; return vaddr;
} }
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error)
{
if (error)
*error = 0;
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code) u32 error_code)
{ {
......
...@@ -276,6 +276,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -276,6 +276,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
write_fault, user_fault, fetch_fault); write_fault, user_fault, fetch_fault);
} }
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
int write_fault, int user_fault,
int fetch_fault)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
addr, write_fault, user_fault,
fetch_fault);
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte) u64 *spte, const void *pte)
{ {
...@@ -660,6 +670,27 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, ...@@ -660,6 +670,27 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
return gpa; return gpa;
} }
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr,
access & PFERR_WRITE_MASK,
access & PFERR_USER_MASK,
access & PFERR_FETCH_MASK);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
} else if (error)
*error = walker.error_code;
return gpa;
}
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp) struct kvm_mmu_page *sp)
{ {
......
...@@ -50,6 +50,11 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) ...@@ -50,6 +50,11 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
#endif #endif
} }
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
static inline int is_pae(struct kvm_vcpu *vcpu) static inline int is_pae(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment