Commit dacc3ed1 authored by James Hogan's avatar James Hogan

KVM: MIPS: Use uaccess to read/modify guest instructions

Now that we have GVA page tables, use standard user accesses with page
faults disabled to read & modify guest instructions. This should be more
robust (than the rather dodgy method of accessing guest mapped segments
by just directly addressing them) and will also work with Enhanced
Virtual Addressing (EVA) host kernel configurations where dedicated
instructions are needed for accessing user mode memory.

For simplicity and speed we do this regardless of the guest segment the
address resides in, rather than handling guest KSeg0 specially with
kmap_atomic() as before.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent 7a156e9f
...@@ -639,8 +639,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); ...@@ -639,8 +639,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user); bool user);
extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva);
extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern void kvm_local_flush_tlb_all(void); extern void kvm_local_flush_tlb_all(void);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
...@@ -29,28 +30,15 @@ ...@@ -29,28 +30,15 @@
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
union mips_instruction replace) union mips_instruction replace)
{ {
unsigned long paddr, flags; unsigned long vaddr = (unsigned long)opc;
void *vaddr; int err;
if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) { err = put_user(replace.word, opc);
paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, if (unlikely(err)) {
(unsigned long)opc);
vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
vaddr += paddr & ~PAGE_MASK;
memcpy(vaddr, (void *)&replace, sizeof(u32));
local_flush_icache_range((unsigned long)vaddr,
(unsigned long)vaddr + 32);
kunmap_atomic(vaddr);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags);
memcpy((void *)opc, (void *)&replace, sizeof(u32));
__local_flush_icache_user_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags);
} else {
kvm_err("%s: Invalid address: %p\n", __func__, opc); kvm_err("%s: Invalid address: %p\n", __func__, opc);
return -EFAULT; return err;
} }
__local_flush_icache_user_range(vaddr, vaddr + 4);
return 0; return 0;
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -134,34 +135,6 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) ...@@ -134,34 +135,6 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
return err; return err;
} }
/* Translate guest KSEG0 addresses to Host PA */
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva)
{
gfn_t gfn;
unsigned long offset = gva & ~PAGE_MASK;
struct kvm *kvm = vcpu->kvm;
if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
__builtin_return_address(0), gva);
return KVM_INVALID_PAGE;
}
gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
if (gfn >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
gva);
return KVM_INVALID_PAGE;
}
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
return KVM_INVALID_ADDR;
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
}
static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
unsigned long addr) unsigned long addr)
{ {
...@@ -551,51 +524,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -551,51 +524,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long paddr, flags, vpn2, asid;
unsigned long va = (unsigned long)opc;
void *vaddr;
u32 inst; u32 inst;
int index; int err;
if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 || err = get_user(inst, opc);
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { if (unlikely(err)) {
local_irq_save(flags);
index = kvm_mips_host_tlb_lookup(vcpu, va);
if (index >= 0) {
inst = *(opc);
} else {
vpn2 = va & VPN2_MASK;
asid = kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID;
index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
if (index < 0) {
kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
__func__, opc, vcpu, read_c0_entryhi());
kvm_mips_dump_host_tlbs();
kvm_mips_dump_guest_tlbs(vcpu);
local_irq_restore(flags);
return KVM_INVALID_INST;
}
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
&vcpu->arch.guest_tlb[index], va)) {
kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, opc, index, vcpu,
read_c0_entryhi());
kvm_mips_dump_guest_tlbs(vcpu);
local_irq_restore(flags);
return KVM_INVALID_INST;
}
inst = *(opc);
}
local_irq_restore(flags);
} else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
vaddr += paddr & ~PAGE_MASK;
inst = *(u32 *)vaddr;
kunmap_atomic(vaddr);
} else {
kvm_err("%s: illegal address: %p\n", __func__, opc); kvm_err("%s: illegal address: %p\n", __func__, opc);
return KVM_INVALID_INST; return KVM_INVALID_INST;
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -798,6 +799,12 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -798,6 +799,12 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_trap_emul_vcpu_reenter(run, vcpu); kvm_trap_emul_vcpu_reenter(run, vcpu);
/*
* We use user accessors to access guest memory, but we don't want to
* invoke Linux page faulting.
*/
pagefault_disable();
/* Disable hardware page table walking while in guest */ /* Disable hardware page table walking while in guest */
htw_stop(); htw_stop();
...@@ -823,6 +830,8 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -823,6 +830,8 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
htw_start(); htw_start();
pagefault_enable();
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment