Commit eb259186 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: improve spte audit

Both audit_mappings() and audit_sptes_have_rmaps() need to walk vcpu's page
table, so we can do these checking in a spte walking
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 49edf878
...@@ -19,23 +19,24 @@ ...@@ -19,23 +19,24 @@
static const char *audit_msg; static const char *audit_msg;
typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep); typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp, static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
inspect_spte_fn fn) inspect_spte_fn fn, int level)
{ {
int i; int i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
u64 ent = sp->spt[i]; u64 *ent = sp->spt;
if (is_shadow_present_pte(ent)) { fn(vcpu, ent + i, level);
if (!is_last_spte(ent, sp->role.level)) {
struct kvm_mmu_page *child; if (is_shadow_present_pte(ent[i]) &&
child = page_header(ent & PT64_BASE_ADDR_MASK); !is_last_spte(ent[i], level)) {
__mmu_spte_walk(kvm, child, fn); struct kvm_mmu_page *child;
} else
fn(kvm, &sp->spt[i]); child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
__mmu_spte_walk(vcpu, child, fn, level - 1);
} }
} }
} }
...@@ -47,21 +48,25 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -47,21 +48,25 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root); sp = page_header(root);
__mmu_spte_walk(vcpu->kvm, sp, fn); __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
return; return;
} }
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root && VALID_PAGE(root)) { if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = page_header(root); sp = page_header(root);
__mmu_spte_walk(vcpu->kvm, sp, fn); __mmu_spte_walk(vcpu, sp, fn, 2);
} }
} }
return; return;
} }
...@@ -75,80 +80,55 @@ static void walk_all_active_sps(struct kvm *kvm, sp_handler fn) ...@@ -75,80 +80,55 @@ static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
fn(kvm, sp); fn(kvm, sp);
} }
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
gva_t va, int level)
{ {
u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK); struct kvm_mmu_page *sp;
int i; gfn_t gfn;
gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1)); pfn_t pfn;
hpa_t hpa;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
u64 *sptep = pt + i;
struct kvm_mmu_page *sp;
gfn_t gfn;
pfn_t pfn;
hpa_t hpa;
sp = page_header(__pa(sptep));
if (sp->unsync) {
if (level != PT_PAGE_TABLE_LEVEL) {
printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
audit_msg, sp, level);
return;
}
if (*sptep == shadow_notrap_nonpresent_pte) {
printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
audit_msg, sp);
return;
}
}
if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { sp = page_header(__pa(sptep));
printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
audit_msg, sp); if (sp->unsync) {
if (level != PT_PAGE_TABLE_LEVEL) {
printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
audit_msg, sp, level);
return; return;
} }
if (!is_shadow_present_pte(*sptep) || if (*sptep == shadow_notrap_nonpresent_pte) {
!is_last_spte(*sptep, level)) printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
audit_msg, sp);
return; return;
}
}
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
audit_msg, sp);
return;
}
if (is_error_pfn(pfn)) { if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
kvm_release_pfn_clean(pfn); return;
return;
}
hpa = pfn << PAGE_SHIFT; gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) if (is_error_pfn(pfn)) {
printk(KERN_ERR "xx audit error: (%s) levels %d" kvm_release_pfn_clean(pfn);
" gva %lx pfn %llx hpa %llx ent %llxn", return;
audit_msg, vcpu->arch.mmu.root_level,
va, pfn, hpa, *sptep);
} }
}
static void audit_mappings(struct kvm_vcpu *vcpu) hpa = pfn << PAGE_SHIFT;
{ if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
unsigned i; printk(KERN_ERR "xx audit error: (%s) levels %d"
"pfn %llx hpa %llx ent %llxn",
if (vcpu->arch.mmu.root_level == 4) audit_msg, vcpu->arch.mmu.root_level,
audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4); pfn, hpa, *sptep);
else
for (i = 0; i < 4; ++i)
if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
audit_mappings_page(vcpu,
vcpu->arch.mmu.pae_root[i],
i << 30,
2);
} }
void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{ {
unsigned long *rmapp; unsigned long *rmapp;
struct kvm_mmu_page *rev_sp; struct kvm_mmu_page *rev_sp;
...@@ -180,9 +160,10 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -180,9 +160,10 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
} }
} }
void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu) static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{ {
mmu_spte_walk(vcpu, inspect_spte_has_rmap); if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
inspect_spte_has_rmap(vcpu->kvm, sptep);
} }
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
...@@ -234,13 +215,22 @@ static void audit_all_active_sps(struct kvm *kvm) ...@@ -234,13 +215,22 @@ static void audit_all_active_sps(struct kvm *kvm)
walk_all_active_sps(kvm, audit_sp); walk_all_active_sps(kvm, audit_sp);
} }
static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{
audit_sptes_have_rmaps(vcpu, sptep, level);
audit_mappings(vcpu, sptep, level);
}
static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
{
mmu_spte_walk(vcpu, audit_spte);
}
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point) static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
{ {
audit_msg = audit_point_name[audit_point]; audit_msg = audit_point_name[audit_point];
audit_all_active_sps(vcpu->kvm); audit_all_active_sps(vcpu->kvm);
if (strcmp("pre pte write", audit_msg) != 0) audit_vcpu_spte(vcpu);
audit_mappings(vcpu);
audit_sptes_have_rmaps(vcpu);
} }
static bool mmu_audit; static bool mmu_audit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment