Commit 258f3a2e authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Convert emulation to use asm/inst.h

Convert various MIPS KVM guest instruction emulation functions to decode
instructions (and encode translations) using the union mips_instruction
and related enumerations in asm/inst.h rather than #defines and
hardcoded values.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Acked-by: default avatarRalf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d5cd26bc
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/inst.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
/* MIPS KVM register ids */ /* MIPS KVM register ids */
...@@ -733,21 +734,21 @@ enum emulation_result kvm_mips_check_privilege(u32 cause, ...@@ -733,21 +734,21 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_cache(u32 inst, enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc, u32 *opc,
u32 cause, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_CP0(u32 inst, enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 *opc,
u32 cause, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_store(u32 inst, enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_load(u32 inst, enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
...@@ -758,11 +759,14 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); ...@@ -758,11 +759,14 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */ /* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(u32 inst, u32 *opc, extern int kvm_mips_trans_cache_index(union mips_instruction inst,
u32 *opc, struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_cache_va(u32 inst, u32 *opc, struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu);
extern int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu);
/* Misc */ /* Misc */
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
......
...@@ -103,7 +103,7 @@ enum rt_op { ...@@ -103,7 +103,7 @@ enum rt_op {
bltzal_op, bgezal_op, bltzall_op, bgezall_op, bltzal_op, bgezal_op, bltzall_op, bgezall_op,
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17, rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b, rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
}; };
/* /*
...@@ -586,6 +586,36 @@ struct r_format { /* Register format */ ...@@ -586,6 +586,36 @@ struct r_format { /* Register format */
;)))))) ;))))))
}; };
struct c0r_format { /* C0 register format */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5,
__BITFIELD_FIELD(unsigned int rt : 5,
__BITFIELD_FIELD(unsigned int rd : 5,
__BITFIELD_FIELD(unsigned int z: 8,
__BITFIELD_FIELD(unsigned int sel : 3,
;))))))
};
struct mfmc0_format { /* MFMC0 register format */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5,
__BITFIELD_FIELD(unsigned int rt : 5,
__BITFIELD_FIELD(unsigned int rd : 5,
__BITFIELD_FIELD(unsigned int re : 5,
__BITFIELD_FIELD(unsigned int sc : 1,
__BITFIELD_FIELD(unsigned int : 2,
__BITFIELD_FIELD(unsigned int sel : 3,
;))))))))
};
struct co_format { /* C0 CO format */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int co : 1,
__BITFIELD_FIELD(unsigned int code : 19,
__BITFIELD_FIELD(unsigned int func : 6,
;))))
};
struct p_format { /* Performance counter format (R10000) */ struct p_format { /* Performance counter format (R10000) */
__BITFIELD_FIELD(unsigned int opcode : 6, __BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5, __BITFIELD_FIELD(unsigned int rs : 5,
...@@ -937,6 +967,9 @@ union mips_instruction { ...@@ -937,6 +967,9 @@ union mips_instruction {
struct u_format u_format; struct u_format u_format;
struct c_format c_format; struct c_format c_format;
struct r_format r_format; struct r_format r_format;
struct c0r_format c0r_format;
struct mfmc0_format mfmc0_format;
struct co_format co_format;
struct p_format p_format; struct p_format p_format;
struct f_format f_format; struct f_format f_format;
struct ma_format ma_format; struct ma_format ma_format;
......
...@@ -20,21 +20,14 @@ ...@@ -20,21 +20,14 @@
#include "commpage.h" #include "commpage.h"
#define SYNCI_TEMPLATE 0x041f0000
#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
#define SYNCI_OFFSET ((x) & 0xffff)
#define LW_TEMPLATE 0x8c000000
#define CLEAR_TEMPLATE 0x00000020
#define SW_TEMPLATE 0xac000000
/** /**
* kvm_mips_trans_replace() - Replace trapping instruction in guest memory. * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
* @opc: PC of instruction to replace. * @opc: PC of instruction to replace.
* @replace: Instruction to write * @replace: Instruction to write
*/ */
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, u32 replace) static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
union mips_instruction replace)
{ {
unsigned long kseg0_opc, flags; unsigned long kseg0_opc, flags;
...@@ -58,63 +51,68 @@ static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, u32 replace) ...@@ -58,63 +51,68 @@ static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, u32 replace)
return 0; return 0;
} }
int kvm_mips_trans_cache_index(u32 inst, u32 *opc, int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */ /* Replace the CACHE instruction, with a NOP */
return kvm_mips_trans_replace(vcpu, opc, 0x00000000); return kvm_mips_trans_replace(vcpu, opc, nop_inst);
} }
/* /*
* Address based CACHE instructions are transformed into synci(s). A little * Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap * heavy for just D-cache invalidates, but avoids an expensive trap
*/ */
int kvm_mips_trans_cache_va(u32 inst, u32 *opc, int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
u32 synci_inst = SYNCI_TEMPLATE, base, offset; union mips_instruction synci_inst = { 0 };
base = (inst >> 21) & 0x1f; synci_inst.i_format.opcode = bcond_op;
offset = inst & 0xffff; synci_inst.i_format.rs = inst.i_format.rs;
synci_inst |= (base << 21); synci_inst.i_format.rt = synci_op;
synci_inst |= offset; synci_inst.i_format.simmediate = inst.i_format.simmediate;
return kvm_mips_trans_replace(vcpu, opc, synci_inst); return kvm_mips_trans_replace(vcpu, opc, synci_inst);
} }
int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{ {
u32 rt, rd, sel; union mips_instruction mfc0_inst = { 0 };
u32 mfc0_inst; u32 rd, sel;
rt = (inst >> 16) & 0x1f; rd = inst.c0r_format.rd;
rd = (inst >> 11) & 0x1f; sel = inst.c0r_format.sel;
sel = inst & 0x7;
if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { if (rd == MIPS_CP0_ERRCTL && sel == 0) {
mfc0_inst = CLEAR_TEMPLATE; mfc0_inst.r_format.opcode = spec_op;
mfc0_inst |= ((rt & 0x1f) << 11); mfc0_inst.r_format.rd = inst.c0r_format.rt;
mfc0_inst.r_format.func = add_op;
} else { } else {
mfc0_inst = LW_TEMPLATE; mfc0_inst.i_format.opcode = lw_op;
mfc0_inst |= ((rt & 0x1f) << 16); mfc0_inst.i_format.rt = inst.c0r_format.rt;
mfc0_inst |= offsetof(struct kvm_mips_commpage, mfc0_inst.i_format.simmediate =
cop0.reg[rd][sel]); offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
} }
return kvm_mips_trans_replace(vcpu, opc, mfc0_inst); return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
} }
int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{ {
u32 rt, rd, sel; union mips_instruction mtc0_inst = { 0 };
u32 mtc0_inst = SW_TEMPLATE; u32 rd, sel;
rt = (inst >> 16) & 0x1f; rd = inst.c0r_format.rd;
rd = (inst >> 11) & 0x1f; sel = inst.c0r_format.sel;
sel = inst & 0x7;
mtc0_inst |= ((rt & 0x1f) << 16); mtc0_inst.i_format.opcode = sw_op;
mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); mtc0_inst.i_format.rt = inst.c0r_format.rt;
mtc0_inst.i_format.simmediate =
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
return kvm_mips_trans_replace(vcpu, opc, mtc0_inst); return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
} }
...@@ -972,13 +972,14 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) ...@@ -972,13 +972,14 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
return mask; return mask;
} }
enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 rt, rd, copz, sel, co_bit, op; u32 rt, rd, sel;
unsigned long curr_pc; unsigned long curr_pc;
/* /*
...@@ -990,16 +991,8 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, ...@@ -990,16 +991,8 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
if (er == EMULATE_FAIL) if (er == EMULATE_FAIL)
return er; return er;
copz = (inst >> 21) & 0x1f; if (inst.co_format.co) {
rt = (inst >> 16) & 0x1f; switch (inst.co_format.func) {
rd = (inst >> 11) & 0x1f;
sel = inst & 0x7;
co_bit = (inst >> 25) & 1;
if (co_bit) {
op = (inst) & 0xff;
switch (op) {
case tlbr_op: /* Read indexed TLB entry */ case tlbr_op: /* Read indexed TLB entry */
er = kvm_mips_emul_tlbr(vcpu); er = kvm_mips_emul_tlbr(vcpu);
break; break;
...@@ -1018,13 +1011,16 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, ...@@ -1018,13 +1011,16 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
case eret_op: case eret_op:
er = kvm_mips_emul_eret(vcpu); er = kvm_mips_emul_eret(vcpu);
goto dont_update_pc; goto dont_update_pc;
break;
case wait_op: case wait_op:
er = kvm_mips_emul_wait(vcpu); er = kvm_mips_emul_wait(vcpu);
break; break;
} }
} else { } else {
switch (copz) { rt = inst.c0r_format.rt;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
switch (inst.c0r_format.rs) {
case mfc_op: case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++; cop0->stat[rd][sel]++;
...@@ -1258,7 +1254,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, ...@@ -1258,7 +1254,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
vcpu->arch.gprs[rt] = vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0); kvm_read_c0_guest_status(cop0);
/* EI */ /* EI */
if (inst & 0x20) { if (inst.mfmc0_format.sc) {
kvm_debug("[%#lx] mfmc0_op: EI\n", kvm_debug("[%#lx] mfmc0_op: EI\n",
vcpu->arch.pc); vcpu->arch.pc);
kvm_set_c0_guest_status(cop0, ST0_IE); kvm_set_c0_guest_status(cop0, ST0_IE);
...@@ -1290,7 +1286,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, ...@@ -1290,7 +1286,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
break; break;
default: default:
kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
vcpu->arch.pc, copz); vcpu->arch.pc, inst.c0r_format.rs);
er = EMULATE_FAIL; er = EMULATE_FAIL;
break; break;
} }
...@@ -1311,13 +1307,13 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, ...@@ -1311,13 +1307,13 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause,
return er; return er;
} }
enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er = EMULATE_DO_MMIO;
u32 op, base, rt; u32 rt;
s16 offset;
u32 bytes; u32 bytes;
void *data = run->mmio.data; void *data = run->mmio.data;
unsigned long curr_pc; unsigned long curr_pc;
...@@ -1331,12 +1327,9 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, ...@@ -1331,12 +1327,9 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause,
if (er == EMULATE_FAIL) if (er == EMULATE_FAIL)
return er; return er;
rt = (inst >> 16) & 0x1f; rt = inst.i_format.rt;
base = (inst >> 21) & 0x1f;
offset = (s16)inst;
op = (inst >> 26) & 0x3f;
switch (op) { switch (inst.i_format.opcode) {
case sb_op: case sb_op:
bytes = 1; bytes = 1;
if (bytes > sizeof(run->mmio.data)) { if (bytes > sizeof(run->mmio.data)) {
...@@ -1413,7 +1406,7 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, ...@@ -1413,7 +1406,7 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause,
default: default:
kvm_err("Store not yet supported (inst=0x%08x)\n", kvm_err("Store not yet supported (inst=0x%08x)\n",
inst); inst.word);
er = EMULATE_FAIL; er = EMULATE_FAIL;
break; break;
} }
...@@ -1425,19 +1418,16 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, ...@@ -1425,19 +1418,16 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause,
return er; return er;
} }
enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause, enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_run *run, u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er = EMULATE_DO_MMIO;
u32 op, base, rt; u32 op, rt;
s16 offset;
u32 bytes; u32 bytes;
rt = (inst >> 16) & 0x1f; rt = inst.i_format.rt;
base = (inst >> 21) & 0x1f; op = inst.i_format.opcode;
offset = (s16)inst;
op = (inst >> 26) & 0x3f;
vcpu->arch.pending_load_cause = cause; vcpu->arch.pending_load_cause = cause;
vcpu->arch.io_gpr = rt; vcpu->arch.io_gpr = rt;
...@@ -1524,7 +1514,7 @@ enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause, ...@@ -1524,7 +1514,7 @@ enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause,
default: default:
kvm_err("Load not yet supported (inst=0x%08x)\n", kvm_err("Load not yet supported (inst=0x%08x)\n",
inst); inst.word);
er = EMULATE_FAIL; er = EMULATE_FAIL;
break; break;
} }
...@@ -1532,8 +1522,8 @@ enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause, ...@@ -1532,8 +1522,8 @@ enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause,
return er; return er;
} }
enum emulation_result kvm_mips_emulate_cache(u32 inst, u32 *opc, enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 cause, u32 *opc, u32 cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
...@@ -1554,9 +1544,9 @@ enum emulation_result kvm_mips_emulate_cache(u32 inst, u32 *opc, ...@@ -1554,9 +1544,9 @@ enum emulation_result kvm_mips_emulate_cache(u32 inst, u32 *opc,
if (er == EMULATE_FAIL) if (er == EMULATE_FAIL)
return er; return er;
base = (inst >> 21) & 0x1f; base = inst.i_format.rs;
op_inst = (inst >> 16) & 0x1f; op_inst = inst.i_format.rt;
offset = (s16)inst; offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache; cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op; op = op_inst & CacheOp_Op;
...@@ -1693,16 +1683,16 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -1693,16 +1683,16 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 inst;
/* Fetch the instruction. */ /* Fetch the instruction. */
if (cause & CAUSEF_BD) if (cause & CAUSEF_BD)
opc += 1; opc += 1;
inst = kvm_get_inst(opc, vcpu); inst.word = kvm_get_inst(opc, vcpu);
switch (((union mips_instruction)inst).r_format.opcode) { switch (inst.r_format.opcode) {
case cop0_op: case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break; break;
...@@ -1727,7 +1717,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -1727,7 +1717,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
default: default:
kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
inst); inst.word);
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL; er = EMULATE_FAIL;
break; break;
...@@ -2262,21 +2252,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, ...@@ -2262,21 +2252,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
return er; return er;
} }
/* ll/sc, rdhwr, sync emulation */
#define OPCODE 0xfc000000
#define BASE 0x03e00000
#define RT 0x001f0000
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
...@@ -2285,7 +2260,7 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, ...@@ -2285,7 +2260,7 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc; unsigned long curr_pc;
u32 inst; union mips_instruction inst;
/* /*
* Update PC and hold onto current PC in case there is * Update PC and hold onto current PC in case there is
...@@ -2300,18 +2275,19 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, ...@@ -2300,18 +2275,19 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
if (cause & CAUSEF_BD) if (cause & CAUSEF_BD)
opc += 1; opc += 1;
inst = kvm_get_inst(opc, vcpu); inst.word = kvm_get_inst(opc, vcpu);
if (inst == KVM_INVALID_INST) { if (inst.word == KVM_INVALID_INST) {
kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
return EMULATE_FAIL; return EMULATE_FAIL;
} }
if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { if (inst.r_format.opcode == spec3_op &&
inst.r_format.func == rdhwr_op) {
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
int rd = (inst & RD) >> 11; int rd = inst.r_format.rd;
int rt = (inst & RT) >> 16; int rt = inst.r_format.rt;
int sel = (inst >> 6) & 0x7; int sel = inst.r_format.re & 0x7;
/* If usermode, check RDHWR rd is allowed by guest HWREna */ /* If usermode, check RDHWR rd is allowed by guest HWREna */
if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
...@@ -2352,7 +2328,8 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, ...@@ -2352,7 +2328,8 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} else { } else {
kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); kvm_debug("Emulate RI not supported @ %p: %#x\n",
opc, inst.word);
goto emulate_ri; goto emulate_ri;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment