Commit 7b52345e authored by Sheng Yang's avatar Sheng Yang Committed by Avi Kivity

KVM: MMU: Add EPT support

Enable kvm_set_spte() to generate EPT entries.
Signed-off-by: default avatarSheng Yang <sheng.yang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 67253af5
...@@ -152,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache; ...@@ -152,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache;
static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte;
static u64 __read_mostly shadow_base_present_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{ {
...@@ -160,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) ...@@ -160,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
void kvm_mmu_set_base_ptes(u64 base_pte)
{
shadow_base_present_pte = base_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static int is_write_protection(struct kvm_vcpu *vcpu) static int is_write_protection(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr0 & X86_CR0_WP; return vcpu->arch.cr0 & X86_CR0_WP;
...@@ -198,7 +221,7 @@ static int is_writeble_pte(unsigned long pte) ...@@ -198,7 +221,7 @@ static int is_writeble_pte(unsigned long pte)
static int is_dirty_pte(unsigned long pte) static int is_dirty_pte(unsigned long pte)
{ {
return pte & PT_DIRTY_MASK; return pte & shadow_dirty_mask;
} }
static int is_rmap_pte(u64 pte) static int is_rmap_pte(u64 pte)
...@@ -513,7 +536,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -513,7 +536,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
return; return;
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte); pfn = spte_to_pfn(*spte);
if (*spte & PT_ACCESSED_MASK) if (*spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte)) if (is_writeble_pte(*spte))
kvm_release_pfn_dirty(pfn); kvm_release_pfn_dirty(pfn);
...@@ -1039,17 +1062,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ...@@ -1039,17 +1062,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* whether the guest actually used the pte (in order to detect * whether the guest actually used the pte (in order to detect
* demand paging). * demand paging).
*/ */
spte = PT_PRESENT_MASK | PT_DIRTY_MASK; spte = shadow_base_present_pte | shadow_dirty_mask;
if (!speculative) if (!speculative)
pte_access |= PT_ACCESSED_MASK; pte_access |= PT_ACCESSED_MASK;
if (!dirty) if (!dirty)
pte_access &= ~ACC_WRITE_MASK; pte_access &= ~ACC_WRITE_MASK;
if (!(pte_access & ACC_EXEC_MASK)) if (pte_access & ACC_EXEC_MASK)
spte |= PT64_NX_MASK; spte |= shadow_x_mask;
else
spte |= PT_PRESENT_MASK; spte |= shadow_nx_mask;
if (pte_access & ACC_USER_MASK) if (pte_access & ACC_USER_MASK)
spte |= PT_USER_MASK; spte |= shadow_user_mask;
if (largepage) if (largepage)
spte |= PT_PAGE_SIZE_MASK; spte |= PT_PAGE_SIZE_MASK;
...@@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, ...@@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
} }
table[index] = __pa(new_table->spt) | PT_PRESENT_MASK table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
| PT_WRITABLE_MASK | PT_USER_MASK; | PT_WRITABLE_MASK | shadow_user_mask;
} }
table_addr = table[index] & PT64_BASE_ADDR_MASK; table_addr = table[index] & PT64_BASE_ADDR_MASK;
} }
...@@ -1599,7 +1622,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) ...@@ -1599,7 +1622,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{ {
u64 *spte = vcpu->arch.last_pte_updated; u64 *spte = vcpu->arch.last_pte_updated;
return !!(spte && (*spte & PT_ACCESSED_MASK)); return !!(spte && (*spte & shadow_accessed_mask));
} }
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
......
...@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque) ...@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque)
kvm_x86_ops = ops; kvm_x86_ops = ops;
kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
return 0; return 0;
out: out:
......
...@@ -434,6 +434,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); ...@@ -434,6 +434,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
void kvm_mmu_set_base_ptes(u64 base_pte);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment