Commit 1047df1f authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: don't walk every parent pages while mark unsync

While we mark the parent's unsync_child_bitmap, if the parent is already
unsynced, it no need walk it's parent, it can reduce some unnecessary
workload
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 7a8f1a74
...@@ -175,7 +175,7 @@ struct kvm_shadow_walk_iterator { ...@@ -175,7 +175,7 @@ struct kvm_shadow_walk_iterator {
shadow_walk_okay(&(_walker)); \ shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker))) shadow_walk_next(&(_walker)))
typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp); typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
static struct kmem_cache *pte_chain_cache; static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache; static struct kmem_cache *rmap_desc_cache;
...@@ -1024,7 +1024,6 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, ...@@ -1024,7 +1024,6 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
BUG(); BUG();
} }
static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
{ {
struct kvm_pte_chain *pte_chain; struct kvm_pte_chain *pte_chain;
...@@ -1034,63 +1033,37 @@ static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) ...@@ -1034,63 +1033,37 @@ static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
if (!sp->multimapped && sp->parent_pte) { if (!sp->multimapped && sp->parent_pte) {
parent_sp = page_header(__pa(sp->parent_pte)); parent_sp = page_header(__pa(sp->parent_pte));
fn(parent_sp); fn(parent_sp, sp->parent_pte);
mmu_parent_walk(parent_sp, fn);
return; return;
} }
hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
if (!pte_chain->parent_ptes[i]) u64 *spte = pte_chain->parent_ptes[i];
if (!spte)
break; break;
parent_sp = page_header(__pa(pte_chain->parent_ptes[i])); parent_sp = page_header(__pa(spte));
fn(parent_sp); fn(parent_sp, spte);
mmu_parent_walk(parent_sp, fn);
} }
} }
static void kvm_mmu_update_unsync_bitmap(u64 *spte) static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{ {
unsigned int index; mmu_parent_walk(sp, mark_unsync);
struct kvm_mmu_page *sp = page_header(__pa(spte));
index = spte - sp->spt;
if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
sp->unsync_children++;
WARN_ON(!sp->unsync_children);
} }
static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
{ {
struct kvm_pte_chain *pte_chain; unsigned int index;
struct hlist_node *node;
int i;
if (!sp->parent_pte) index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return; return;
if (sp->unsync_children++)
if (!sp->multimapped) {
kvm_mmu_update_unsync_bitmap(sp->parent_pte);
return; return;
} kvm_mmu_mark_parents_unsync(sp);
hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
if (!pte_chain->parent_ptes[i])
break;
kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
}
}
static int unsync_walk_fn(struct kvm_mmu_page *sp)
{
kvm_mmu_update_parents_unsync(sp);
return 1;
}
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
mmu_parent_walk(sp, unsync_walk_fn);
kvm_mmu_update_parents_unsync(sp);
} }
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment