Commit 48c0e4e9 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Marcelo Tosatti

KVM: MMU: move mmu pages calculated out of mmu lock

kvm_mmu_calculate_mmu_pages need to walk all memslots and it's protected by
kvm->slots_lock, so move it out of mmu spinlock
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 1b7fd45c
...@@ -6105,7 +6105,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -6105,7 +6105,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
int user_alloc) int user_alloc)
{ {
int npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
if (!user_alloc && !old.user_alloc && old.rmap && !npages) { if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
int ret; int ret;
...@@ -6120,12 +6120,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -6120,12 +6120,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
"failed to munmap memory\n"); "failed to munmap memory\n");
} }
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) { if (nr_mmu_pages)
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
}
kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment