Commit 8cffd197 authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Convert code to kernel sized types

Convert the MIPS KVM C code to use standard kernel sized types (e.g.
u32) instead of inttypes.h style ones (e.g. uint32_t) or other types as
appropriate.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bdb7ed86
...@@ -33,13 +33,13 @@ int kvm_mips_trans_cache_index(u32 inst, u32 *opc, ...@@ -33,13 +33,13 @@ int kvm_mips_trans_cache_index(u32 inst, u32 *opc,
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
uint32_t synci_inst = 0x0; u32 synci_inst = 0x0;
/* Replace the CACHE instruction, with a NOP */ /* Replace the CACHE instruction, with a NOP */
kseg0_opc = kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result; return result;
...@@ -54,7 +54,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, ...@@ -54,7 +54,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc,
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
uint32_t synci_inst = SYNCI_TEMPLATE, base, offset; u32 synci_inst = SYNCI_TEMPLATE, base, offset;
base = (inst >> 21) & 0x1f; base = (inst >> 21) & 0x1f;
offset = inst & 0xffff; offset = inst & 0xffff;
...@@ -64,7 +64,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, ...@@ -64,7 +64,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc,
kseg0_opc = kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result; return result;
...@@ -72,8 +72,8 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, ...@@ -72,8 +72,8 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc,
int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; u32 rt, rd, sel;
uint32_t mfc0_inst; u32 mfc0_inst;
unsigned long kseg0_opc, flags; unsigned long kseg0_opc, flags;
rt = (inst >> 16) & 0x1f; rt = (inst >> 16) & 0x1f;
...@@ -94,11 +94,11 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) ...@@ -94,11 +94,11 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
kseg0_opc = kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags); local_irq_save(flags);
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); memcpy((void *)opc, (void *)&mfc0_inst, sizeof(u32));
local_flush_icache_range((unsigned long)opc, local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32); (unsigned long)opc + 32);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -112,8 +112,8 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) ...@@ -112,8 +112,8 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; u32 rt, rd, sel;
uint32_t mtc0_inst = SW_TEMPLATE; u32 mtc0_inst = SW_TEMPLATE;
unsigned long kseg0_opc, flags; unsigned long kseg0_opc, flags;
rt = (inst >> 16) & 0x1f; rt = (inst >> 16) & 0x1f;
...@@ -127,11 +127,11 @@ int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) ...@@ -127,11 +127,11 @@ int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
kseg0_opc = kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags); local_irq_save(flags);
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); memcpy((void *)opc, (void *)&mtc0_inst, sizeof(u32));
local_flush_icache_range((unsigned long)opc, local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32); (unsigned long)opc + 32);
local_irq_restore(flags); local_irq_restore(flags);
......
This diff is collapsed.
...@@ -117,7 +117,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, ...@@ -117,7 +117,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause) u32 cause)
{ {
int allowed = 0; int allowed = 0;
uint32_t exccode; u32 exccode;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
......
...@@ -1222,7 +1222,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1222,7 +1222,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
static void kvm_mips_set_c0_status(void) static void kvm_mips_set_c0_status(void)
{ {
uint32_t status = read_c0_status(); u32 status = read_c0_status();
if (cpu_has_dsp) if (cpu_has_dsp)
status |= (ST0_MX); status |= (ST0_MX);
...@@ -1236,9 +1236,9 @@ static void kvm_mips_set_c0_status(void) ...@@ -1236,9 +1236,9 @@ static void kvm_mips_set_c0_status(void)
*/ */
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
uint32_t cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
......
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
#define KVM_GUEST_PC_TLB 0 #define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1 #define KVM_GUEST_SP_TLB 1
#define PRIx64 "llx"
atomic_t kvm_mips_instance; atomic_t kvm_mips_instance;
EXPORT_SYMBOL_GPL(kvm_mips_instance); EXPORT_SYMBOL_GPL(kvm_mips_instance);
...@@ -102,13 +100,13 @@ void kvm_mips_dump_host_tlbs(void) ...@@ -102,13 +100,13 @@ void kvm_mips_dump_host_tlbs(void)
kvm_info("TLB%c%3d Hi 0x%08lx ", kvm_info("TLB%c%3d Hi 0x%08lx ",
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
i, tlb.tlb_hi); i, tlb.tlb_hi);
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", kvm_info("Lo0=0x%09llx %c%c attr %lx ",
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
(tlb.tlb_lo0 >> 3) & 7); (tlb.tlb_lo0 >> 3) & 7);
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
...@@ -134,13 +132,13 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) ...@@ -134,13 +132,13 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
kvm_info("TLB%c%3d Hi 0x%08lx ", kvm_info("TLB%c%3d Hi 0x%08lx ",
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
i, tlb.tlb_hi); i, tlb.tlb_hi);
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", kvm_info("Lo0=0x%09llx %c%c attr %lx ",
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
(tlb.tlb_lo0 >> 3) & 7); (tlb.tlb_lo0 >> 3) & 7);
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
...@@ -160,7 +158,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) ...@@ -160,7 +158,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
pfn = kvm_mips_gfn_to_pfn(kvm, gfn); pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
if (kvm_mips_is_error_pfn(pfn)) { if (kvm_mips_is_error_pfn(pfn)) {
kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
err = -EFAULT; err = -EFAULT;
goto out; goto out;
} }
...@@ -176,7 +174,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, ...@@ -176,7 +174,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva) unsigned long gva)
{ {
gfn_t gfn; gfn_t gfn;
uint32_t offset = gva & ~PAGE_MASK; unsigned long offset = gva & ~PAGE_MASK;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
...@@ -726,7 +724,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load); ...@@ -726,7 +724,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
unsigned long flags; unsigned long flags;
uint32_t cpu; int cpu;
local_irq_save(flags); local_irq_save(flags);
...@@ -755,7 +753,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) ...@@ -755,7 +753,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long paddr, flags, vpn2, asid; unsigned long paddr, flags, vpn2, asid;
uint32_t inst; u32 inst;
int index; int index;
if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
...@@ -787,7 +785,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) ...@@ -787,7 +785,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
paddr = paddr =
kvm_mips_translate_guest_kseg0_to_hpa(vcpu, kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
(unsigned long) opc); (unsigned long) opc);
inst = *(uint32_t *) CKSEG0ADDR(paddr); inst = *(u32 *) CKSEG0ADDR(paddr);
} else { } else {
kvm_err("%s: illegal address: %p\n", __func__, opc); kvm_err("%s: illegal address: %p\n", __func__, opc);
return KVM_INVALID_INST; return KVM_INVALID_INST;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{ {
gpa_t gpa; gpa_t gpa;
uint32_t kseg = KSEGX(gva); gva_t kseg = KSEGX(gva);
if ((kseg == CKSEG0) || (kseg == CKSEG1)) if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva); gpa = CPHYSADDR(gva);
...@@ -40,7 +40,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -40,7 +40,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -87,7 +87,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -87,7 +87,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -131,7 +131,7 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ...@@ -131,7 +131,7 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -178,7 +178,7 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ...@@ -178,7 +178,7 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -232,7 +232,7 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ...@@ -232,7 +232,7 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -262,7 +262,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ...@@ -262,7 +262,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -292,7 +292,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) ...@@ -292,7 +292,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -310,7 +310,7 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) ...@@ -310,7 +310,7 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -328,7 +328,7 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) ...@@ -328,7 +328,7 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -346,7 +346,7 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) ...@@ -346,7 +346,7 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -364,7 +364,7 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) ...@@ -364,7 +364,7 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -382,7 +382,7 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) ...@@ -382,7 +382,7 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -407,7 +407,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -407,7 +407,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -457,7 +457,7 @@ static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -457,7 +457,7 @@ static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
uint32_t config1; u32 config1;
int vcpu_id = vcpu->vcpu_id; int vcpu_id = vcpu->vcpu_id;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment