Commit 0bd595fc authored by Hollis Blanchard's avatar Hollis Blanchard Committed by Avi Kivity

KVM: ppc: kvmppc_44x_shadow_release() does not require mmap_sem to be locked

And it gets in the way of get_user_pages_fast().
Signed-off-by: default avatarHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 9c3e4aab
...@@ -110,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) ...@@ -110,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
} }
/* Must be called with mmap_sem locked for writing. */
static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
unsigned int index) unsigned int index)
{ {
...@@ -150,17 +149,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, ...@@ -150,17 +149,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
/* Get reference to new page. */ /* Get reference to new page. */
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn); new_page = gfn_to_page(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_page(new_page)) { if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page); kvm_release_page_clean(new_page);
up_read(&current->mm->mmap_sem);
return; return;
} }
hpaddr = page_to_phys(new_page); hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */ /* Drop reference to old page. */
kvmppc_44x_shadow_release(vcpu, victim); kvmppc_44x_shadow_release(vcpu, victim);
up_read(&current->mm->mmap_sem);
vcpu->arch.shadow_pages[victim] = new_page; vcpu->arch.shadow_pages[victim] = new_page;
...@@ -194,7 +192,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -194,7 +192,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
int i; int i;
/* XXX Replace loop with fancy data structures. */ /* XXX Replace loop with fancy data structures. */
down_write(&current->mm->mmap_sem);
for (i = 0; i <= tlb_44x_hwater; i++) { for (i = 0; i <= tlb_44x_hwater; i++) {
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
unsigned int tid; unsigned int tid;
...@@ -219,7 +216,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -219,7 +216,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
stlbe->tid, stlbe->word0, stlbe->word1, stlbe->tid, stlbe->word0, stlbe->word1,
stlbe->word2, handler); stlbe->word2, handler);
} }
up_write(&current->mm->mmap_sem);
} }
/* Invalidate all mappings on the privilege switch after PID has been changed. /* Invalidate all mappings on the privilege switch after PID has been changed.
...@@ -231,7 +227,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) ...@@ -231,7 +227,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
if (vcpu->arch.swap_pid) { if (vcpu->arch.swap_pid) {
/* XXX Replace loop with fancy data structures. */ /* XXX Replace loop with fancy data structures. */
down_write(&current->mm->mmap_sem);
for (i = 0; i <= tlb_44x_hwater; i++) { for (i = 0; i <= tlb_44x_hwater; i++) {
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
...@@ -243,7 +238,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) ...@@ -243,7 +238,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
stlbe->tid, stlbe->word0, stlbe->word1, stlbe->tid, stlbe->word0, stlbe->word1,
stlbe->word2, handler); stlbe->word2, handler);
} }
up_write(&current->mm->mmap_sem);
vcpu->arch.swap_pid = 0; vcpu->arch.swap_pid = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment