Commit cb0f722a authored by Wei Huang's avatar Wei Huang Committed by Paolo Bonzini

KVM: x86/mmu: Support shadowing NPT when 5-level paging is enabled in host

When the 5-level page table CPU flag is set in the host, but the guest
has CR4.LA57=0 (including the case of a 32-bit guest), the top level of
the shadow NPT page tables will be fixed, consisting of one pointer to
a lower-level table and 511 non-present entries.  Extend the existing
code that creates the fixed PML4 or PDP table, to provide a fixed PML5
table if needed.

This is not needed on EPT because the number of layers in the tables
is specified in the EPTP instead of depending on the host CR4.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarWei Huang <wei.huang2@amd.com>
Message-Id: <20210818165549.3771014-3-wei.huang2@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 746700d2
...@@ -441,6 +441,7 @@ struct kvm_mmu { ...@@ -441,6 +441,7 @@ struct kvm_mmu {
u64 *pae_root; u64 *pae_root;
u64 *pml4_root; u64 *pml4_root;
u64 *pml5_root;
/* /*
* check zero bits on shadow page table entries, these * check zero bits on shadow page table entries, these
......
...@@ -3536,15 +3536,22 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3536,15 +3536,22 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table. * the shadow page table may be a PAE or a long mode page table.
*/ */
pm_mask = PT_PRESENT_MASK | shadow_me_mask; pm_mask = PT_PRESENT_MASK | shadow_me_mask;
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
if (WARN_ON_ONCE(!mmu->pml4_root)) { if (WARN_ON_ONCE(!mmu->pml4_root)) {
r = -EIO; r = -EIO;
goto out_unlock; goto out_unlock;
} }
mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
if (WARN_ON_ONCE(!mmu->pml5_root)) {
r = -EIO;
goto out_unlock;
}
mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
}
} }
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
...@@ -3563,7 +3570,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3563,7 +3570,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
mmu->pae_root[i] = root | pm_mask; mmu->pae_root[i] = root | pm_mask;
} }
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
mmu->root_hpa = __pa(mmu->pml5_root);
else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
mmu->root_hpa = __pa(mmu->pml4_root); mmu->root_hpa = __pa(mmu->pml4_root);
else else
mmu->root_hpa = __pa(mmu->pae_root); mmu->root_hpa = __pa(mmu->pae_root);
...@@ -3579,7 +3588,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3579,7 +3588,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
u64 *pml4_root, *pae_root; u64 *pml5_root = NULL;
u64 *pml4_root = NULL;
u64 *pae_root;
/* /*
* When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
...@@ -3591,21 +3602,15 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) ...@@ -3591,21 +3602,15 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
mmu->shadow_root_level < PT64_ROOT_4LEVEL) mmu->shadow_root_level < PT64_ROOT_4LEVEL)
return 0; return 0;
/* if (mmu->pae_root && mmu->pml4_root && mmu->pml5_root)
* This mess only works with 4-level paging and needs to be updated to
* work with 5-level paging.
*/
if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
return -EIO;
if (mmu->pae_root && mmu->pml4_root)
return 0; return 0;
/* /*
* The special roots should always be allocated in concert. Yell and * The special roots should always be allocated in concert. Yell and
* bail if KVM ends up in a state where only one of the roots is valid. * bail if KVM ends up in a state where only one of the roots is valid.
*/ */
if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root)) if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
mmu->pml5_root))
return -EIO; return -EIO;
/* /*
...@@ -3616,16 +3621,31 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) ...@@ -3616,16 +3621,31 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
if (!pae_root) if (!pae_root)
return -ENOMEM; return -ENOMEM;
#ifdef CONFIG_X86_64
pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml4_root) { if (!pml4_root)
free_page((unsigned long)pae_root); goto err_pml4;
return -ENOMEM;
if (mmu->shadow_root_level > PT64_ROOT_4LEVEL) {
pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml5_root)
goto err_pml5;
} }
#endif
mmu->pae_root = pae_root; mmu->pae_root = pae_root;
mmu->pml4_root = pml4_root; mmu->pml4_root = pml4_root;
mmu->pml5_root = pml5_root;
return 0; return 0;
#ifdef CONFIG_X86_64
err_pml5:
free_page((unsigned long)pml4_root);
err_pml4:
free_page((unsigned long)pae_root);
return -ENOMEM;
#endif
} }
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
...@@ -5461,6 +5481,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu) ...@@ -5461,6 +5481,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu)
set_memory_encrypted((unsigned long)mmu->pae_root, 1); set_memory_encrypted((unsigned long)mmu->pae_root, 1);
free_page((unsigned long)mmu->pae_root); free_page((unsigned long)mmu->pae_root);
free_page((unsigned long)mmu->pml4_root); free_page((unsigned long)mmu->pml4_root);
free_page((unsigned long)mmu->pml5_root);
} }
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment