Commit 8533ce72 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM changes from Paolo Bonzini:
 "These are the x86, MIPS and s390 changes; PPC and ARM will come in a
  few days.

  MIPS and s390 have little going on this release; just bugfixes, some
  small, some larger.

  The highlights for x86 are nested VMX improvements (Jan Kiszka),
  optimizations for old processor (up to Nehalem, by me and Bandan Das),
  and a lot of x86 emulator bugfixes (Nadav Amit).

  Stephen Rothwell reported a trivial conflict with the tracing branch"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (104 commits)
  x86/kvm: Resolve shadow warnings in macro expansion
  KVM: s390: rework broken SIGP STOP interrupt handling
  KVM: x86: always exit on EOIs for interrupts listed in the IOAPIC redir table
  KVM: vmx: remove duplicate vmx_mpx_supported() prototype
  KVM: s390: Fix memory leak on busy SIGP stop
  x86/kvm: Resolve shadow warning from min macro
  kvm: Resolve missing-field-initializers warnings
  Replace NR_VMX_MSR with its definition
  KVM: x86: Assertions to check no overrun in MSR lists
  KVM: x86: set rflags.rf during fault injection
  KVM: x86: Setting rflags.rf during rep-string emulation
  KVM: x86: DR6/7.RTM cannot be written
  KVM: nVMX: clean up nested_release_vmcs12 and code around it
  KVM: nVMX: fix lifetime issues for vmcs02
  KVM: x86: Defining missing x86 vectors
  KVM: x86: emulator injects #DB when RFLAGS.RF is set
  KVM: x86: Cleanup of rflags.rf cleaning
  KVM: x86: Clear rflags.rf on emulated instructions
  KVM: x86: popf emulation should not change RF
  KVM: x86: Clearing rflags.rf upon skipped emulated instruction
  ...
parents c9b88e95 42cbc04f
This diff is collapsed.
...@@ -359,13 +359,17 @@ enum emulation_result { ...@@ -359,13 +359,17 @@ enum emulation_result {
#define MIPS3_PG_FRAME 0x3fffffc0 #define MIPS3_PG_FRAME 0x3fffffc0
#define VPN2_MASK 0xffffe000 #define VPN2_MASK 0xffffe000
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
((x).tlb_lo1 & MIPS3_PG_G)) ((x).tlb_lo1 & MIPS3_PG_G))
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
? ((x).tlb_lo1 & MIPS3_PG_V) \ ? ((x).tlb_lo1 & MIPS3_PG_V) \
: ((x).tlb_lo0 & MIPS3_PG_V)) : ((x).tlb_lo0 & MIPS3_PG_V))
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
TLB_ASID(x) == ((y) & ASID_MASK))
struct kvm_mips_tlb { struct kvm_mips_tlb {
long tlb_mask; long tlb_mask;
...@@ -760,7 +764,7 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, ...@@ -760,7 +764,7 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
/* Misc */ /* Misc */
extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/uaccess.h> /* for segment_eq() */ #include <asm/uaccess.h> /* for segment_eq() */
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
/* /*
* This macro return a properly sign-extended address suitable as base address * This macro return a properly sign-extended address suitable as base address
* for indexed cache operations. Two issues here: * for indexed cache operations. Two issues here:
......
...@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) ...@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ kvm-objs := $(common-objs) mips.o emulate.o locore.o \
kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ interrupt.o stats.o commpage.o \
kvm_mips_dyntrans.o kvm_trap_emul.o dyntrans.o trap_emul.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-y += kvm_cb.o kvm_tlb.o obj-y += callback.o tlb.o
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* commpage, currently used for Virtual COP0 registers. * commpage, currently used for Virtual COP0 registers.
* Mapped into the guest kernel @ 0x0. * Mapped into the guest kernel @ 0x0.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -22,16 +22,12 @@ ...@@ -22,16 +22,12 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_mips_comm.h" #include "commpage.h"
void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
{ {
struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
memset(page, 0, sizeof(struct kvm_mips_commpage));
/* Specific init values for fields */ /* Specific init values for fields */
vcpu->arch.cop0 = &page->cop0; vcpu->arch.cop0 = &page->cop0;
memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
return;
} }
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: commpage: mapped into get kernel space * KVM/MIPS: commpage: mapped into get kernel space
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#ifndef __KVM_MIPS_COMMPAGE_H__ #ifndef __KVM_MIPS_COMMPAGE_H__
#define __KVM_MIPS_COMMPAGE_H__ #define __KVM_MIPS_COMMPAGE_H__
struct kvm_mips_commpage { struct kvm_mips_commpage {
struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ /* COP0 state is mapped into Guest kernel via commpage */
struct mips_coproc cop0;
}; };
#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 #define KVM_MIPS_COMM_EIDI_OFFSET 0x0
......
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include "kvm_mips_comm.h" #include "commpage.h"
#define SYNCI_TEMPLATE 0x041f0000 #define SYNCI_TEMPLATE 0x041f0000
#define SYNCI_BASE(x) (((x) >> 21) & 0x1f) #define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
...@@ -28,9 +28,8 @@ ...@@ -28,9 +28,8 @@
#define CLEAR_TEMPLATE 0x00000020 #define CLEAR_TEMPLATE 0x00000020
#define SW_TEMPLATE 0xac000000 #define SW_TEMPLATE 0xac000000
int int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
...@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, ...@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
} }
/* /*
* Address based CACHE instructions are transformed into synci(s). A little heavy * Address based CACHE instructions are transformed into synci(s). A little
* for just D-cache invalidates, but avoids an expensive trap * heavy for just D-cache invalidates, but avoids an expensive trap
*/ */
int int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
...@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, ...@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
return result; return result;
} }
int int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; int32_t rt, rd, sel;
uint32_t mfc0_inst; uint32_t mfc0_inst;
...@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) ...@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
return 0; return 0;
} }
int int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; int32_t rt, rd, sel;
uint32_t mtc0_inst = SW_TEMPLATE; uint32_t mtc0_inst = SW_TEMPLATE;
......
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Interrupt delivery * KVM/MIPS: Interrupt delivery
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_mips_int.h" #include "interrupt.h"
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
{ {
...@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) ...@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
{ {
/* Cause bits to reflect the pending timer interrupt, /*
* Cause bits to reflect the pending timer interrupt,
* the EXC code will be set when we are actually * the EXC code will be set when we are actually
* delivering the interrupt: * delivering the interrupt:
*/ */
...@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) ...@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
} }
void void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) struct kvm_mips_interrupt *irq)
{ {
int intr = (int)irq->irq; int intr = (int)irq->irq;
/* Cause bits to reflect the pending IO interrupt, /*
* Cause bits to reflect the pending IO interrupt,
* the EXC code will be set when we are actually * the EXC code will be set when we are actually
* delivering the interrupt: * delivering the interrupt:
*/ */
...@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) ...@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
} }
void void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
struct kvm_mips_interrupt *irq)
{ {
int intr = (int)irq->irq; int intr = (int)irq->irq;
switch (intr) { switch (intr) {
case -2: case -2:
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
...@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, ...@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
} }
/* Deliver the interrupt of the corresponding priority, if possible. */ /* Deliver the interrupt of the corresponding priority, if possible. */
int int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause)
uint32_t cause)
{ {
int allowed = 0; int allowed = 0;
uint32_t exccode; uint32_t exccode;
...@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, ...@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
/* Are we allowed to deliver the interrupt ??? */ /* Are we allowed to deliver the interrupt ??? */
if (allowed) { if (allowed) {
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */ /* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc); kvm_write_c0_guest_epc(cop0, arch->pc);
...@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, ...@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
return allowed; return allowed;
} }
int int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause)
uint32_t cause)
{ {
return 1; return 1;
} }
......
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Interrupts * KVM/MIPS: Interrupts
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
/* MIPS Exception Priorities, exceptions (including interrupts) are queued up /*
* MIPS Exception Priorities, exceptions (including interrupts) are queued up
* for the guest in the order specified by their priorities * for the guest in the order specified by their priorities
*/ */
...@@ -27,6 +28,9 @@ ...@@ -27,6 +28,9 @@
#define MIPS_EXC_MAX 12 #define MIPS_EXC_MAX 12
/* XXXSL More to follow */ /* XXXSL More to follow */
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
/*
* Define opcode values not defined in <asm/isnt.h>
*/
#ifndef __KVM_MIPS_OPCODE_H__
#define __KVM_MIPS_OPCODE_H__
/* COP0 Ops */
#define mfmcz_op 0x0b /* 01011 */
#define wrpgpr_op 0x0e /* 01110 */
/* COP0 opcodes (only if COP0 and CO=1): */
#define wait_op 0x20 /* 100000 */
#endif /* __KVM_MIPS_OPCODE_H__ */
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/stackframe.h> #include <asm/stackframe.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#define _C_LABEL(x) x #define _C_LABEL(x) x
#define MIPSX(name) mips32_ ## name #define MIPSX(name) mips32_ ## name
#define CALLFRAME_SIZ 32 #define CALLFRAME_SIZ 32
...@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run)
LONG_S $24, PT_R24(k1) LONG_S $24, PT_R24(k1)
LONG_S $25, PT_R25(k1) LONG_S $25, PT_R25(k1)
/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ /*
* XXXKYMA k0/k1 not saved, not being used if we got here through
* an ioctl()
*/
LONG_S $28, PT_R28(k1) LONG_S $28, PT_R28(k1)
LONG_S $29, PT_R29(k1) LONG_S $29, PT_R29(k1)
...@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run)
/* Save the kernel gp as well */ /* Save the kernel gp as well */
LONG_S gp, VCPU_HOST_GP(k1) LONG_S gp, VCPU_HOST_GP(k1)
/* Setup status register for running the guest in UM, interrupts are disabled */ /*
* Setup status register for running the guest in UM, interrupts
* are disabled
*/
li k0, (ST0_EXL | KSU_USER | ST0_BEV) li k0, (ST0_EXL | KSU_USER | ST0_BEV)
mtc0 k0, CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
...@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run) ...@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run)
mtc0 k0, CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
/* Set Guest EPC */ /* Set Guest EPC */
LONG_L t0, VCPU_PC(k1) LONG_L t0, VCPU_PC(k1)
mtc0 t0, CP0_EPC mtc0 t0, CP0_EPC
...@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid) ...@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid)
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1: 1:
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
LONG_L t2, TI_CPU($28) /* smp_processor_id */ LONG_L t2, TI_CPU($28) /* smp_processor_id */
INT_SLL t2, t2, 2 /* x4 */ INT_SLL t2, t2, 2 /* x4 */
REG_ADDU t3, t1, t2 REG_ADDU t3, t1, t2
...@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1) ...@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1)
eret eret
VECTOR(MIPSX(exception), unknown) VECTOR(MIPSX(exception), unknown)
/* /* Find out what mode we came from and jump to the proper handler. */
* Find out what mode we came from and jump to the proper handler.
*/
mtc0 k0, CP0_ERROREPC #01: Save guest k0 mtc0 k0, CP0_ERROREPC #01: Save guest k0
ehb #02: ehb #02:
...@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown) ...@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown)
INT_SRL k0, k0, 10 #03: Get rid of CPUNum INT_SRL k0, k0, 10 #03: Get rid of CPUNum
INT_SLL k0, k0, 10 #04 INT_SLL k0, k0, 10 #04
LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
# installed @ offset 0x2000
j k0 #07: jump to the function j k0 #07: jump to the function
nop #08: branch delay slot nop #08: branch delay slot
VECTOR_END(MIPSX(exceptionEnd)) VECTOR_END(MIPSX(exceptionEnd))
...@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd)) ...@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd))
/* /*
* Generic Guest exception handler. We end up here when the guest * Generic Guest exception handler. We end up here when the guest
* does something that causes a trap to kernel mode. * does something that causes a trap to kernel mode.
*
*/ */
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Get the VCPU pointer from DDTATA_LO */ /* Get the VCPU pointer from DDTATA_LO */
...@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_S $30, VCPU_R30(k1) LONG_S $30, VCPU_R30(k1)
LONG_S $31, VCPU_R31(k1) LONG_S $31, VCPU_R31(k1)
/* We need to save hi/lo and restore them on /* We need to save hi/lo and restore them on the way out */
* the way out
*/
mfhi t0 mfhi t0
LONG_S t0, VCPU_HI(k1) LONG_S t0, VCPU_HI(k1)
...@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Save pointer to run in s0, will be saved by the compiler */ /* Save pointer to run in s0, will be saved by the compiler */
move s0, a0 move s0, a0
/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to /*
* process the exception */ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
* process the exception
*/
mfc0 k0,CP0_EPC mfc0 k0,CP0_EPC
LONG_S k0, VCPU_PC(k1) LONG_S k0, VCPU_PC(k1)
...@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1) LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE mtc0 k0,CP0_EBASE
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at .set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
...@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Saved host state */ /* Saved host state */
INT_ADDIU sp, sp, -PT_SIZE INT_ADDIU sp, sp, -PT_SIZE
/* XXXKYMA do we need to load the host ASID, maybe not because the /*
* XXXKYMA do we need to load the host ASID, maybe not because the
* kernel entries are marked GLOBAL, need to verify * kernel entries are marked GLOBAL, need to verify
*/ */
...@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Jump to handler */ /* Jump to handler */
FEXPORT(__kvm_mips_jump_to_handler) FEXPORT(__kvm_mips_jump_to_handler)
/* XXXKYMA: not sure if this is safe, how large is the stack?? /*
* XXXKYMA: not sure if this is safe, how large is the stack??
* Now jump to the kvm_mips_handle_exit() to see if we can deal * Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel */ * with this in the kernel
*/
PTR_LA t9, kvm_mips_handle_exit PTR_LA t9, kvm_mips_handle_exit
jalr.hb t9 jalr.hb t9
INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
...@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler) ...@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
di di
ehb ehb
/* XXXKYMA: k0/k1 could have been blown away if we processed /*
* XXXKYMA: k0/k1 could have been blown away if we processed
* an exception while we were handling the exception from the * an exception while we were handling the exception from the
* guest, reload k1 * guest, reload k1
*/ */
...@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler) ...@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
move k1, s1 move k1, s1
INT_ADDIU k1, k1, VCPU_HOST_ARCH INT_ADDIU k1, k1, VCPU_HOST_ARCH
/* Check return value, should tell us if we are returning to the /*
* Check return value, should tell us if we are returning to the
* host (handle I/O etc)or resuming the guest * host (handle I/O etc)or resuming the guest
*/ */
andi t0, v0, RESUME_HOST andi t0, v0, RESUME_HOST
...@@ -521,8 +527,10 @@ __kvm_mips_return_to_host: ...@@ -521,8 +527,10 @@ __kvm_mips_return_to_host:
LONG_L $0, PT_R0(k1) LONG_L $0, PT_R0(k1)
LONG_L $1, PT_R1(k1) LONG_L $1, PT_R1(k1)
/* r2/v0 is the return code, shift it down by 2 (arithmetic) /*
* to recover the err code */ * r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
*/
INT_SRA k0, v0, 2 INT_SRA k0, v0, 2
move $2, k0 move $2, k0
...@@ -566,7 +574,6 @@ __kvm_mips_return_to_host: ...@@ -566,7 +574,6 @@ __kvm_mips_return_to_host:
PTR_LI k0, 0x2000000F PTR_LI k0, 0x2000000F
mtc0 k0, CP0_HWRENA mtc0 k0, CP0_HWRENA
/* Restore RA, which is the address we will return to */ /* Restore RA, which is the address we will return to */
LONG_L ra, PT_R31(k1) LONG_L ra, PT_R31(k1)
j ra j ra
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
/* Define opcode values not defined in <asm/isnt.h> */
#ifndef __KVM_MIPS_OPCODE_H__
#define __KVM_MIPS_OPCODE_H__
/* COP0 Ops */
#define mfmcz_op 0x0b /* 01011 */
#define wrpgpr_op 0x0e /* 01110 */
/* COP0 opcodes (only if COP0 and CO=1): */
#define wait_op 0x20 /* 100000 */
#endif /* __KVM_MIPS_OPCODE_H__ */
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: COP0 access histogram * KVM/MIPS: COP0 access histogram
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
...@@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { ...@@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
"DESAVE" "DESAVE"
}; };
int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
int i, j; int i, j;
printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
for (i = 0; i < N_MIPS_COPROC_REGS; i++) { for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
for (j = 0; j < N_MIPS_COPROC_SEL; j++) { for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
if (vcpu->arch.cop0->stat[i][j]) if (vcpu->arch.cop0->stat[i][j])
printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
vcpu->arch.cop0->stat[i][j]); vcpu->arch.cop0->stat[i][j]);
} }
} }
#endif #endif
return 0;
} }
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H #define _TRACE_KVM_H
...@@ -17,9 +17,7 @@ ...@@ -17,9 +17,7 @@
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
/* /* Tracepoints for VM eists */
* Tracepoints for VM eists
*/
extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
TRACE_EVENT(kvm_exit, TRACE_EVENT(kvm_exit,
......
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_mips_opcode.h" #include "opcode.h"
#include "kvm_mips_int.h" #include "interrupt.h"
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{ {
...@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) ...@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
if ((kseg == CKSEG0) || (kseg == CKSEG1)) if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva); gpa = CPHYSADDR(gva);
else { else {
printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
gpa = KVM_INVALID_ADDR; gpa = KVM_INVALID_ADDR;
} }
...@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) ...@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
return gpa; return gpa;
} }
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
...@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
} else else
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
switch (er) { switch (er) {
...@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ...@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
kvm_debug kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
...@@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ...@@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* XXXKYMA: The guest kernel does not expect to get this fault when we are not /*
* using HIGHMEM. Need to address this in a HIGHMEM kernel * XXXKYMA: The guest kernel does not expect to get this fault
* when we are not using HIGHMEM. Need to address this in a
* HIGHMEM kernel
*/ */
printk kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} else { } else {
printk kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
...@@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ...@@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
} }
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
kvm_debug kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
...@@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ...@@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* All KSEG0 faults are handled by KVM, as the guest kernel does not /*
* expect to ever get them * All KSEG0 faults are handled by KVM, as the guest kernel does
* not expect to ever get them
*/ */
if (kvm_mips_handle_kseg0_tlb_fault if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
...@@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ...@@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else { } else {
kvm_err kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
...@@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ...@@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badvaddr); vcpu->arch.pc, badvaddr);
/* User Address (UA) fault, this could happen if /*
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * User Address (UA) fault, this could happen if
* case we pass on the fault to the guest kernel and let it handle it. * (1) TLB entry not present/valid in both Guest and shadow host
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this * TLBs, in this case we pass on the fault to the guest
* case we inject the TLB from the Guest TLB into the shadow host TLB * kernel and let it handle it.
* (2) TLB entry is present in the Guest TLB but not in the
* shadow, in this case we inject the TLB from the Guest TLB
* into the shadow host TLB
*/ */
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
...@@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ...@@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else { } else {
printk kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
...@@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ...@@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
kvm_debug("Emulate Store to MMIO space\n"); kvm_debug("Emulate Store to MMIO space\n");
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
printk("Emulate Store to MMIO space failed\n"); kvm_err("Emulate Store to MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} else { } else {
...@@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ...@@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else { } else {
printk kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
...@@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) ...@@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
printk("Emulate Load from MMIO space failed\n"); kvm_err("Emulate Load from MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} else { } else {
...@@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) ...@@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else { } else {
printk kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
er = EMULATE_FAIL; er = EMULATE_FAIL;
...@@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
uint32_t config1; uint32_t config1;
int vcpu_id = vcpu->vcpu_id; int vcpu_id = vcpu->vcpu_id;
/* Arch specific stuff, set up config registers properly so that the /*
* guest will come up as expected, for now we simulate a * Arch specific stuff, set up config registers properly so that the
* MIPS 24kc * guest will come up as expected, for now we simulate a MIPS 24kc
*/ */
kvm_write_c0_guest_prid(cop0, 0x00019300); kvm_write_c0_guest_prid(cop0, 0x00019300);
kvm_write_c0_guest_config(cop0, kvm_write_c0_guest_config(cop0,
...@@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
kvm_write_c0_guest_config3(cop0, kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << (1 << CP0C3_ULRI));
CP0C3_ULRI));
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ /*
* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
*/
kvm_write_c0_guest_intctl(cop0, 0xFC000000); kvm_write_c0_guest_intctl(cop0, 0xFC000000);
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
......
...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt { ...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
struct list_head list; struct list_head list;
atomic_t active; atomic_t active;
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
int timer_due; /* event indicator for waitqueue below */
wait_queue_head_t *wq; wait_queue_head_t *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits; unsigned int action_bits;
...@@ -367,7 +366,6 @@ struct kvm_vcpu_arch { ...@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
s390_fp_regs guest_fpregs; s390_fp_regs guest_fpregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct tasklet_struct tasklet;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
union { union {
struct cpuid cpu_id; struct cpuid cpu_id;
...@@ -418,6 +416,7 @@ struct kvm_arch{ ...@@ -418,6 +416,7 @@ struct kvm_arch{
int css_support; int css_support;
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
int user_cpu_state_ctrl;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq; wait_queue_head_t ipte_wq;
spinlock_t start_stop_lock; spinlock_t start_stop_lock;
......
...@@ -108,6 +108,7 @@ ...@@ -108,6 +108,7 @@
exit_code_ipa0(0xB2, 0x17, "STETR"), \ exit_code_ipa0(0xB2, 0x17, "STETR"), \
exit_code_ipa0(0xB2, 0x18, "PC"), \ exit_code_ipa0(0xB2, 0x18, "PC"), \
exit_code_ipa0(0xB2, 0x20, "SERVC"), \ exit_code_ipa0(0xB2, 0x20, "SERVC"), \
exit_code_ipa0(0xB2, 0x21, "IPTE"), \
exit_code_ipa0(0xB2, 0x28, "PT"), \ exit_code_ipa0(0xB2, 0x28, "PT"), \
exit_code_ipa0(0xB2, 0x29, "ISKE"), \ exit_code_ipa0(0xB2, 0x29, "ISKE"), \
exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
......
...@@ -176,7 +176,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) ...@@ -176,7 +176,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
kvm_s390_vcpu_stop(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
......
...@@ -56,32 +56,26 @@ static int handle_noop(struct kvm_vcpu *vcpu) ...@@ -56,32 +56,26 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu) static int handle_stop(struct kvm_vcpu *vcpu)
{ {
int rc = 0; int rc = 0;
unsigned int action_bits;
vcpu->stat.exit_stop_request++; vcpu->stat.exit_stop_request++;
spin_lock_bh(&vcpu->arch.local_int.lock);
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { action_bits = vcpu->arch.local_int.action_bits;
kvm_s390_vcpu_stop(vcpu);
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -EOPNOTSUPP;
}
if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { if (!(action_bits & ACTION_STOP_ON_STOP))
vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; return 0;
/* store status must be called unlocked. Since local_int.lock
* only protects local_int.* and not guest memory we can give if (action_bits & ACTION_STORE_ON_STOP) {
* up the lock here */
spin_unlock_bh(&vcpu->arch.local_int.lock);
rc = kvm_s390_vcpu_store_status(vcpu, rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR); KVM_S390_STORE_STATUS_NOADDR);
if (rc >= 0) if (rc)
rc = -EOPNOTSUPP; return rc;
} else }
spin_unlock_bh(&vcpu->arch.local_int.lock);
return rc; if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
return -EOPNOTSUPP;
} }
static int handle_validity(struct kvm_vcpu *vcpu) static int handle_validity(struct kvm_vcpu *vcpu)
......
...@@ -158,6 +158,9 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -158,6 +158,9 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
LCTL_CR10 | LCTL_CR11); LCTL_CR10 | LCTL_CR11);
vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
} }
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
} }
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
...@@ -544,13 +547,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -544,13 +547,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int rc = 0; int rc = 0;
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry(inti, &li->list, list) list_for_each_entry(inti, &li->list, list)
if (__interrupt_is_deliverable(vcpu, inti)) { if (__interrupt_is_deliverable(vcpu, inti)) {
rc = 1; rc = 1;
break; break;
} }
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
} }
if ((!rc) && atomic_read(&fi->active)) { if ((!rc) && atomic_read(&fi->active)) {
...@@ -585,88 +588,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -585,88 +588,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{ {
u64 now, sltime; u64 now, sltime;
DECLARE_WAITQUEUE(wait, current);
vcpu->stat.exit_wait_state++; vcpu->stat.exit_wait_state++;
if (kvm_cpu_has_interrupt(vcpu))
return 0;
__set_cpu_idle(vcpu); /* fast path */
spin_lock_bh(&vcpu->arch.local_int.lock); if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
vcpu->arch.local_int.timer_due = 0; return 0;
spin_unlock_bh(&vcpu->arch.local_int.lock);
if (psw_interrupts_disabled(vcpu)) { if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
__unset_cpu_idle(vcpu);
return -EOPNOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
__set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) { if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
goto no_timer; goto no_timer;
} }
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu);
return 0;
}
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer: no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
spin_lock(&vcpu->arch.local_int.float_int->lock); kvm_vcpu_block(vcpu);
spin_lock_bh(&vcpu->arch.local_int.lock);
add_wait_queue(&vcpu->wq, &wait);
while (list_empty(&vcpu->arch.local_int.list) &&
list_empty(&vcpu->arch.local_int.float_int->list) &&
(!vcpu->arch.local_int.timer_due) &&
!signal_pending(current) &&
!kvm_s390_si_ext_call_pending(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
schedule();
spin_lock(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
}
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&vcpu->wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
return 0; return 0;
} }
void kvm_s390_tasklet(unsigned long parm) void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; if (waitqueue_active(&vcpu->wq)) {
/*
spin_lock(&vcpu->arch.local_int.lock); * The vcpu gave up the cpu voluntarily, mark it as a good
vcpu->arch.local_int.timer_due = 1; * yield-candidate.
if (waitqueue_active(&vcpu->wq)) */
vcpu->preempted = true;
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
spin_unlock(&vcpu->arch.local_int.lock); }
} }
/*
* low level hrtimer wake routine. Because this runs in hardirq context
* we schedule a tasklet to do the real work.
*/
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
vcpu->preempted = true; kvm_s390_vcpu_wakeup(vcpu);
tasklet_schedule(&vcpu->arch.tasklet);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -676,13 +647,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -676,13 +647,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_interrupt_info *n, *inti = NULL; struct kvm_s390_interrupt_info *n, *inti = NULL;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
list_del(&inti->list); list_del(&inti->list);
kfree(inti); kfree(inti);
} }
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ /* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
...@@ -701,7 +672,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -701,7 +672,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
do { do {
deliver = 0; deliver = 0;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
if (__interrupt_is_deliverable(vcpu, inti)) { if (__interrupt_is_deliverable(vcpu, inti)) {
list_del(&inti->list); list_del(&inti->list);
...@@ -712,7 +683,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -712,7 +683,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
} }
if (list_empty(&li->list)) if (list_empty(&li->list))
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
if (deliver) { if (deliver) {
__do_deliver_interrupt(vcpu, inti); __do_deliver_interrupt(vcpu, inti);
kfree(inti); kfree(inti);
...@@ -758,7 +729,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) ...@@ -758,7 +729,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
do { do {
deliver = 0; deliver = 0;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
if ((inti->type == KVM_S390_MCHK) && if ((inti->type == KVM_S390_MCHK) &&
__interrupt_is_deliverable(vcpu, inti)) { __interrupt_is_deliverable(vcpu, inti)) {
...@@ -770,7 +741,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) ...@@ -770,7 +741,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
} }
if (list_empty(&li->list)) if (list_empty(&li->list))
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
if (deliver) { if (deliver) {
__do_deliver_interrupt(vcpu, inti); __do_deliver_interrupt(vcpu, inti);
kfree(inti); kfree(inti);
...@@ -817,11 +788,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) ...@@ -817,11 +788,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq)); BUG_ON(waitqueue_active(li->wq));
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return 0; return 0;
} }
...@@ -842,11 +813,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, ...@@ -842,11 +813,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
inti->type = KVM_S390_PROGRAM_INT; inti->type = KVM_S390_PROGRAM_INT;
memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq)); BUG_ON(waitqueue_active(li->wq));
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return 0; return 0;
} }
...@@ -934,12 +905,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -934,12 +905,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
} }
dst_vcpu = kvm_get_vcpu(kvm, sigcpu); dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq)) spin_unlock(&li->lock);
wake_up_interruptible(li->wq); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
kvm_get_vcpu(kvm, sigcpu)->preempted = true;
spin_unlock_bh(&li->lock);
unlock_fi: unlock_fi:
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
...@@ -1081,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -1081,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
li = &vcpu->arch.local_int; li = &vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (inti->type == KVM_S390_PROGRAM_INT) if (inti->type == KVM_S390_PROGRAM_INT)
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
else else
...@@ -1090,11 +1059,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -1090,11 +1059,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
if (inti->type == KVM_S390_SIGP_STOP) if (inti->type == KVM_S390_SIGP_STOP)
li->action_bits |= ACTION_STOP_ON_STOP; li->action_bits |= ACTION_STOP_ON_STOP;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(&vcpu->wq)) spin_unlock(&li->lock);
wake_up_interruptible(&vcpu->wq);
vcpu->preempted = true;
spin_unlock_bh(&li->lock);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
kvm_s390_vcpu_wakeup(vcpu);
return 0; return 0;
} }
......
...@@ -166,7 +166,9 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -166,7 +166,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
r = 1; r = 1;
break; break;
case KVM_CAP_NR_VCPUS: case KVM_CAP_NR_VCPUS:
...@@ -595,7 +597,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -595,7 +597,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
kvm_s390_vcpu_stop(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
kvm_s390_clear_local_irqs(vcpu); kvm_s390_clear_local_irqs(vcpu);
} }
...@@ -647,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -647,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return rc; return rc;
} }
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id); get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff; vcpu->arch.cpu_id.version = 0xff;
...@@ -926,7 +927,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) ...@@ -926,7 +927,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{ {
int rc = 0; int rc = 0;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) if (!is_vcpu_stopped(vcpu))
rc = -EBUSY; rc = -EBUSY;
else { else {
vcpu->run->psw_mask = psw.mask; vcpu->run->psw_mask = psw.mask;
...@@ -980,13 +981,34 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -980,13 +981,34 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
return -EINVAL; /* not implemented yet */ /* CHECK_STOP and LOAD are not supported yet */
return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
KVM_MP_STATE_OPERATING;
} }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
return -EINVAL; /* not implemented yet */ int rc = 0;
/* user space knows about this interface - let it control the state */
vcpu->kvm->arch.user_cpu_state_ctrl = 1;
switch (mp_state->mp_state) {
case KVM_MP_STATE_STOPPED:
kvm_s390_vcpu_stop(vcpu);
break;
case KVM_MP_STATE_OPERATING:
kvm_s390_vcpu_start(vcpu);
break;
case KVM_MP_STATE_LOAD:
case KVM_MP_STATE_CHECK_STOP:
/* fall through - CHECK_STOP and LOAD are not supported yet */
default:
rc = -ENXIO;
}
return rc;
} }
bool kvm_s390_cmma_enabled(struct kvm *kvm) bool kvm_s390_cmma_enabled(struct kvm *kvm)
...@@ -1045,6 +1067,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1045,6 +1067,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
/* nothing to do, just clear the request */
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
return 0; return 0;
} }
...@@ -1284,7 +1309,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1284,7 +1309,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_s390_vcpu_start(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
} else if (is_vcpu_stopped(vcpu)) {
pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
vcpu->vcpu_id);
return -EINVAL;
}
switch (kvm_run->exit_reason) { switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC: case KVM_EXIT_S390_SIEIC:
...@@ -1413,11 +1444,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -1413,11 +1444,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
} }
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
{
return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{ {
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
...@@ -1451,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -1451,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */ /* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
...@@ -1477,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -1477,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
* Let's play safe and flush the VCPU at startup. * Let's play safe and flush the VCPU at startup.
*/ */
vcpu->arch.sie_block->ihcpu = 0xffff; vcpu->arch.sie_block->ihcpu = 0xffff;
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return; return;
} }
...@@ -1491,10 +1517,18 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -1491,10 +1517,18 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */ /* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Need to lock access to action_bits to avoid a SIGP race condition */
spin_lock(&vcpu->arch.local_int.lock);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
vcpu->arch.local_int.action_bits &=
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
spin_unlock(&vcpu->arch.local_int.lock);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
...@@ -1512,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -1512,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
__enable_ibs_on_vcpu(started_vcpu); __enable_ibs_on_vcpu(started_vcpu);
} }
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return; return;
} }
......
...@@ -45,9 +45,9 @@ do { \ ...@@ -45,9 +45,9 @@ do { \
d_args); \ d_args); \
} while (0) } while (0)
static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
{ {
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)
...@@ -129,9 +129,15 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) ...@@ -129,9 +129,15 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44; vcpu->arch.sie_block->gpsw.mask |= cc << 44;
} }
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
return kvm->arch.user_cpu_state_ctrl != 0;
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
......
...@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{ {
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED; int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
...@@ -135,7 +136,13 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) ...@@ -135,7 +136,13 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
return -ENOMEM; return -ENOMEM;
inti->type = KVM_S390_SIGP_STOP; inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
kfree(inti);
rc = SIGP_CC_BUSY;
goto out;
}
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
kfree(inti); kfree(inti);
if ((action & ACTION_STORE_ON_STOP) != 0) if ((action & ACTION_STORE_ON_STOP) != 0)
...@@ -144,19 +151,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) ...@@ -144,19 +151,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
} }
list_add_tail(&inti->list, &li->list); list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
li->action_bits |= action; li->action_bits |= action;
if (waitqueue_active(li->wq)) atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
wake_up_interruptible(li->wq); kvm_s390_vcpu_wakeup(dst_vcpu);
out: out:
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{ {
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL; struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
...@@ -166,9 +171,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) ...@@ -166,9 +171,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
rc = __inject_sigp_stop(li, action); rc = __inject_sigp_stop(dst_vcpu, action);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
...@@ -238,7 +242,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -238,7 +242,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
if (!inti) if (!inti)
return SIGP_CC_BUSY; return SIGP_CC_BUSY;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
/* cpu must be in stopped state */ /* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
...@@ -253,13 +257,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -253,13 +257,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
list_add_tail(&inti->list, &li->list); list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
if (waitqueue_active(li->wq)) kvm_s390_vcpu_wakeup(dst_vcpu);
wake_up_interruptible(li->wq);
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li: out_li:
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -275,9 +278,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, ...@@ -275,9 +278,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock_bh(&dst_vcpu->arch.local_int.lock); spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock_bh(&dst_vcpu->arch.local_int.lock); spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) { if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -338,10 +341,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -338,10 +341,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -461,12 +464,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) ...@@ -461,12 +464,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL); BUG_ON(dest_vcpu == NULL);
spin_lock_bh(&dest_vcpu->arch.local_int.lock); kvm_s390_vcpu_wakeup(dest_vcpu);
if (waitqueue_active(&dest_vcpu->wq))
wake_up_interruptible(&dest_vcpu->wq);
dest_vcpu->preempted = true;
spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
return 0; return 0;
} }
......
...@@ -37,6 +37,7 @@ struct x86_instruction_info { ...@@ -37,6 +37,7 @@ struct x86_instruction_info {
u8 modrm_reg; /* index of register used */ u8 modrm_reg; /* index of register used */
u8 modrm_rm; /* rm part of modrm */ u8 modrm_rm; /* rm part of modrm */
u64 src_val; /* value of source operand */ u64 src_val; /* value of source operand */
u64 dst_val; /* value of destination operand */
u8 src_bytes; /* size of source operand */ u8 src_bytes; /* size of source operand */
u8 dst_bytes; /* size of destination operand */ u8 dst_bytes; /* size of destination operand */
u8 ad_bytes; /* size of src/dst address */ u8 ad_bytes; /* size of src/dst address */
...@@ -194,6 +195,7 @@ struct x86_emulate_ops { ...@@ -194,6 +195,7 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
void (*halt)(struct x86_emulate_ctxt *ctxt); void (*halt)(struct x86_emulate_ctxt *ctxt);
void (*wbinvd)(struct x86_emulate_ctxt *ctxt); void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
...@@ -231,7 +233,7 @@ struct operand { ...@@ -231,7 +233,7 @@ struct operand {
union { union {
unsigned long val; unsigned long val;
u64 val64; u64 val64;
char valptr[sizeof(unsigned long) + 2]; char valptr[sizeof(sse128_t)];
sse128_t vec_val; sse128_t vec_val;
u64 mm_val; u64 mm_val;
void *data; void *data;
...@@ -240,8 +242,8 @@ struct operand { ...@@ -240,8 +242,8 @@ struct operand {
struct fetch_cache { struct fetch_cache {
u8 data[15]; u8 data[15];
unsigned long start; u8 *ptr;
unsigned long end; u8 *end;
}; };
struct read_cache { struct read_cache {
...@@ -286,30 +288,36 @@ struct x86_emulate_ctxt { ...@@ -286,30 +288,36 @@ struct x86_emulate_ctxt {
u8 opcode_len; u8 opcode_len;
u8 b; u8 b;
u8 intercept; u8 intercept;
u8 lock_prefix;
u8 rep_prefix;
u8 op_bytes; u8 op_bytes;
u8 ad_bytes; u8 ad_bytes;
u8 rex_prefix;
struct operand src; struct operand src;
struct operand src2; struct operand src2;
struct operand dst; struct operand dst;
bool has_seg_override;
u8 seg_override;
u64 d;
int (*execute)(struct x86_emulate_ctxt *ctxt); int (*execute)(struct x86_emulate_ctxt *ctxt);
int (*check_perm)(struct x86_emulate_ctxt *ctxt); int (*check_perm)(struct x86_emulate_ctxt *ctxt);
/*
* The following six fields are cleared together,
* the rest are initialized unconditionally in x86_decode_insn
* or elsewhere
*/
bool rip_relative;
u8 rex_prefix;
u8 lock_prefix;
u8 rep_prefix;
/* bitmaps of registers in _regs[] that can be read */
u32 regs_valid;
/* bitmaps of registers in _regs[] that have been written */
u32 regs_dirty;
/* modrm */ /* modrm */
u8 modrm; u8 modrm;
u8 modrm_mod; u8 modrm_mod;
u8 modrm_reg; u8 modrm_reg;
u8 modrm_rm; u8 modrm_rm;
u8 modrm_seg; u8 modrm_seg;
bool rip_relative; u8 seg_override;
u64 d;
unsigned long _eip; unsigned long _eip;
struct operand memop; struct operand memop;
u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */
u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */
/* Fields above regs are cleared together. */ /* Fields above regs are cleared together. */
unsigned long _regs[NR_VCPU_REGS]; unsigned long _regs[NR_VCPU_REGS];
struct operand *memopp; struct operand *memopp;
...@@ -407,6 +415,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); ...@@ -407,6 +415,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_OK 0 #define EMULATION_OK 0
#define EMULATION_RESTART 1 #define EMULATION_RESTART 1
#define EMULATION_INTERCEPTED 2 #define EMULATION_INTERCEPTED 2
void init_decode_cache(struct x86_emulate_ctxt *ctxt);
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason, u16 tss_selector, int idt_index, int reason,
......
...@@ -152,14 +152,16 @@ enum { ...@@ -152,14 +152,16 @@ enum {
#define DR6_BD (1 << 13) #define DR6_BD (1 << 13)
#define DR6_BS (1 << 14) #define DR6_BS (1 << 14)
#define DR6_FIXED_1 0xffff0ff0 #define DR6_RTM (1 << 16)
#define DR6_VOLATILE 0x0000e00f #define DR6_FIXED_1 0xfffe0ff0
#define DR6_INIT 0xffff0ff0
#define DR6_VOLATILE 0x0001e00f
#define DR7_BP_EN_MASK 0x000000ff #define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9) #define DR7_GE (1 << 9)
#define DR7_GD (1 << 13) #define DR7_GD (1 << 13)
#define DR7_FIXED_1 0x00000400 #define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff #define DR7_VOLATILE 0xffff2bff
/* apic attention bits */ /* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0 #define KVM_APIC_CHECK_VAPIC 0
...@@ -448,7 +450,7 @@ struct kvm_vcpu_arch { ...@@ -448,7 +450,7 @@ struct kvm_vcpu_arch {
u64 tsc_offset_adjustment; u64 tsc_offset_adjustment;
u64 this_tsc_nsec; u64 this_tsc_nsec;
u64 this_tsc_write; u64 this_tsc_write;
u8 this_tsc_generation; u64 this_tsc_generation;
bool tsc_catchup; bool tsc_catchup;
bool tsc_always_catchup; bool tsc_always_catchup;
s8 virtual_tsc_shift; s8 virtual_tsc_shift;
...@@ -591,7 +593,7 @@ struct kvm_arch { ...@@ -591,7 +593,7 @@ struct kvm_arch {
u64 cur_tsc_nsec; u64 cur_tsc_nsec;
u64 cur_tsc_write; u64 cur_tsc_write;
u64 cur_tsc_offset; u64 cur_tsc_offset;
u8 cur_tsc_generation; u64 cur_tsc_generation;
int nr_vcpus_matched_tsc; int nr_vcpus_matched_tsc;
spinlock_t pvclock_gtod_sync_lock; spinlock_t pvclock_gtod_sync_lock;
...@@ -717,7 +719,7 @@ struct kvm_x86_ops { ...@@ -717,7 +719,7 @@ struct kvm_x86_ops {
int (*handle_exit)(struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_vcpu *vcpu);
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
void (*patch_hypercall)(struct kvm_vcpu *vcpu, void (*patch_hypercall)(struct kvm_vcpu *vcpu,
unsigned char *hypercall_addr); unsigned char *hypercall_addr);
void (*set_irq)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu);
...@@ -1070,6 +1072,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); ...@@ -1070,6 +1072,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
......
...@@ -51,6 +51,9 @@ ...@@ -51,6 +51,9 @@
#define CPU_BASED_MONITOR_EXITING 0x20000000 #define CPU_BASED_MONITOR_EXITING 0x20000000
#define CPU_BASED_PAUSE_EXITING 0x40000000 #define CPU_BASED_PAUSE_EXITING 0x40000000
#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
/* /*
* Definitions of Secondary Processor-Based VM-Execution Controls. * Definitions of Secondary Processor-Based VM-Execution Controls.
*/ */
...@@ -76,7 +79,7 @@ ...@@ -76,7 +79,7 @@
#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
...@@ -89,7 +92,7 @@ ...@@ -89,7 +92,7 @@
#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
#define VM_ENTRY_IA32E_MODE 0x00000200 #define VM_ENTRY_IA32E_MODE 0x00000200
#define VM_ENTRY_SMM 0x00000400 #define VM_ENTRY_SMM 0x00000400
#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
......
...@@ -23,7 +23,10 @@ ...@@ -23,7 +23,10 @@
#define GP_VECTOR 13 #define GP_VECTOR 13
#define PF_VECTOR 14 #define PF_VECTOR 14
#define MF_VECTOR 16 #define MF_VECTOR 16
#define AC_VECTOR 17
#define MC_VECTOR 18 #define MC_VECTOR 18
#define XM_VECTOR 19
#define VE_VECTOR 20
/* Select x86 specific features in <linux/kvm.h> */ /* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT #define __KVM_HAVE_PIT
......
...@@ -558,6 +558,7 @@ ...@@ -558,6 +558,7 @@
/* VMX_BASIC bits and bitmasks */ /* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32 #define VMX_BASIC_VMCS_SIZE_SHIFT 32
#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
#define VMX_BASIC_64 0x0001000000000000LLU #define VMX_BASIC_64 0x0001000000000000LLU
#define VMX_BASIC_MEM_TYPE_SHIFT 50 #define VMX_BASIC_MEM_TYPE_SHIFT 50
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU #define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
......
...@@ -95,4 +95,12 @@ static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) ...@@ -95,4 +95,12 @@ static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
return best && (best->edx & bit(X86_FEATURE_GBPAGES)); return best && (best->edx & bit(X86_FEATURE_GBPAGES));
} }
static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
#endif #endif
This diff is collapsed.
...@@ -1451,7 +1451,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) ...@@ -1451,7 +1451,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
vcpu->arch.apic_arb_prio = 0; vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0; vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" apic_debug("%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic), vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address); vcpu->arch.apic_base, apic->base_address);
...@@ -1895,7 +1895,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -1895,7 +1895,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
/* evaluate pending_events before reading the vector */ /* evaluate pending_events before reading the vector */
smp_rmb(); smp_rmb();
sipi_vector = apic->sipi_vector; sipi_vector = apic->sipi_vector;
pr_debug("vcpu %d received sipi with vector # %x\n", apic_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, sipi_vector); vcpu->vcpu_id, sipi_vector);
kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
__entry->unsync = sp->unsync; __entry->unsync = sp->unsync;
#define KVM_MMU_PAGE_PRINTK() ({ \ #define KVM_MMU_PAGE_PRINTK() ({ \
const char *ret = trace_seq_buffer_ptr(p); \ const u32 saved_len = p->len; \
static const char *access_str[] = { \ static const char *access_str[] = { \
"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
}; \ }; \
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
role.nxe ? "" : "!", \ role.nxe ? "" : "!", \
__entry->root_count, \ __entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \ __entry->unsync ? "unsync" : "sync", 0); \
ret; \ p->buffer + saved_len; \
}) })
#define kvm_mmu_trace_pferr_flags \ #define kvm_mmu_trace_pferr_flags \
......
...@@ -428,6 +428,15 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -428,6 +428,15 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
} }
int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool fixed = pmc & (1u << 30);
pmc &= ~(3u << 30);
return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
(fixed && pmc >= pmu->nr_arch_fixed_counters);
}
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
{ {
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
......
...@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info) ...@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info)
return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
} }
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u32 ret = 0; u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask; return ret;
} }
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
...@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, ...@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
/*
* AMD CPUs circa 2014 track the G bit for all segments except CS.
* However, the SVM spec states that the G bit is not observed by the
* CPU, and some VMware virtual CPUs drop the G bit for all segments.
* So let's synthesize a legal G bit for all segments, this helps
* running KVM nested. It also helps cross-vendor migration, because
* Intel's vmentry has a check on the 'G' bit.
*/
var->g = s->limit > 0xfffff;
/* /*
* AMD's VMCB does not have an explicit unusable field, so emulate it * AMD's VMCB does not have an explicit unusable field, so emulate it
...@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, ...@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->unusable = !var->present || (var->type == 0); var->unusable = !var->present || (var->type == 0);
switch (seg) { switch (seg) {
case VCPU_SREG_CS:
/*
* SVM always stores 0 for the 'G' bit in the CS selector in
* the VMCB on a VMEXIT. This hurts cross-vendor migration:
* Intel's VMENTRY has a check on the 'G' bit.
*/
var->g = s->limit > 0xfffff;
break;
case VCPU_SREG_TR: case VCPU_SREG_TR:
/* /*
* Work around a bug where the busy flag in the tr selector * Work around a bug where the busy flag in the tr selector
...@@ -2116,22 +2117,27 @@ static void nested_svm_unmap(struct page *page) ...@@ -2116,22 +2117,27 @@ static void nested_svm_unmap(struct page *page)
static int nested_svm_intercept_ioio(struct vcpu_svm *svm) static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{ {
unsigned port; unsigned port, size, iopm_len;
u8 val, bit; u16 val, mask;
u8 start_bit;
u64 gpa; u64 gpa;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16; port = svm->vmcb->control.exit_info_1 >> 16;
size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
SVM_IOIO_SIZE_SHIFT;
gpa = svm->nested.vmcb_iopm + (port / 8); gpa = svm->nested.vmcb_iopm + (port / 8);
bit = port % 8; start_bit = port % 8;
val = 0; iopm_len = (start_bit + size > 8) ? 2 : 1;
mask = (0xf >> (4 - size)) << start_bit;
val = 0;
if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1)) if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
val &= (1 << bit); return NESTED_EXIT_DONE;
return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
} }
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
...@@ -4205,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -4205,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
if (info->intercept == x86_intercept_cr_write) if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg; icpt_info.exit_code += info->modrm_reg;
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0) if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
info->intercept == x86_intercept_clts)
break; break;
intercept = svm->nested.intercept; intercept = svm->nested.intercept;
...@@ -4250,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -4250,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
u64 exit_info; u64 exit_info;
u32 bytes; u32 bytes;
exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
if (info->intercept == x86_intercept_in || if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) { info->intercept == x86_intercept_ins) {
exit_info |= SVM_IOIO_TYPE_MASK; exit_info = ((info->src_val & 0xffff) << 16) |
bytes = info->src_bytes; SVM_IOIO_TYPE_MASK;
} else {
bytes = info->dst_bytes; bytes = info->dst_bytes;
} else {
exit_info = (info->dst_val & 0xffff) << 16;
bytes = info->src_bytes;
} }
if (info->intercept == x86_intercept_outs || if (info->intercept == x86_intercept_outs ||
......
...@@ -721,10 +721,10 @@ TRACE_EVENT(kvm_emulate_insn, ...@@ -721,10 +721,10 @@ TRACE_EVENT(kvm_emulate_insn,
), ),
TP_fast_assign( TP_fast_assign(
__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
__entry->len = vcpu->arch.emulate_ctxt._eip __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
- vcpu->arch.emulate_ctxt.fetch.start; - vcpu->arch.emulate_ctxt.fetch.data;
__entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
memcpy(__entry->insn, memcpy(__entry->insn,
vcpu->arch.emulate_ctxt.fetch.data, vcpu->arch.emulate_ctxt.fetch.data,
15); 15);
......
This diff is collapsed.
This diff is collapsed.
...@@ -47,6 +47,16 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) ...@@ -47,6 +47,16 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
#endif #endif
} }
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
{
int cs_db, cs_l;
if (!is_long_mode(vcpu))
return false;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
return cs_l;
}
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
...@@ -108,6 +118,23 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) ...@@ -108,6 +118,23 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false; return false;
} }
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
unsigned long val = kvm_register_read(vcpu, reg);
return is_64_bit_mode(vcpu) ? val : (u32)val;
}
static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
enum kvm_reg reg,
unsigned long val)
{
if (!is_64_bit_mode(vcpu))
val = (u32)val;
return kvm_register_write(vcpu, reg, val);
}
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
......
...@@ -399,13 +399,18 @@ struct kvm_vapic_addr { ...@@ -399,13 +399,18 @@ struct kvm_vapic_addr {
__u64 vapic_addr; __u64 vapic_addr;
}; };
/* for KVM_SET_MPSTATE */ /* for KVM_SET_MP_STATE */
/* not all states are valid on all architectures */
#define KVM_MP_STATE_RUNNABLE 0 #define KVM_MP_STATE_RUNNABLE 0
#define KVM_MP_STATE_UNINITIALIZED 1 #define KVM_MP_STATE_UNINITIALIZED 1
#define KVM_MP_STATE_INIT_RECEIVED 2 #define KVM_MP_STATE_INIT_RECEIVED 2
#define KVM_MP_STATE_HALTED 3 #define KVM_MP_STATE_HALTED 3
#define KVM_MP_STATE_SIPI_RECEIVED 4 #define KVM_MP_STATE_SIPI_RECEIVED 4
#define KVM_MP_STATE_STOPPED 5
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
struct kvm_mp_state { struct kvm_mp_state {
__u32 mp_state; __u32 mp_state;
......
...@@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, ...@@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
spin_lock(&ioapic->lock); spin_lock(&ioapic->lock);
for (index = 0; index < IOAPIC_NUM_PINS; index++) { for (index = 0; index < IOAPIC_NUM_PINS; index++) {
e = &ioapic->redirtbl[index]; e = &ioapic->redirtbl[index];
if (!e->fields.mask && if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
(e->fields.trig_mode == IOAPIC_LEVEL_TRIG || kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index == RTC_GSI) {
index) || index == RTC_GSI)) {
if (kvm_apic_match_dest(vcpu, NULL, 0, if (kvm_apic_match_dest(vcpu, NULL, 0,
e->fields.dest_id, e->fields.dest_mode)) { e->fields.dest_id, e->fields.dest_mode)) {
__set_bit(e->fields.vector, __set_bit(e->fields.vector,
......
...@@ -323,13 +323,13 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, ...@@ -323,13 +323,13 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
#define IOAPIC_ROUTING_ENTRY(irq) \ #define IOAPIC_ROUTING_ENTRY(irq) \
{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
.u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
#ifdef CONFIG_X86 #ifdef CONFIG_X86
# define PIC_ROUTING_ENTRY(irq) \ # define PIC_ROUTING_ENTRY(irq) \
{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
.u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 } .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
# define ROUTING_ENTRY2(irq) \ # define ROUTING_ENTRY2(irq) \
IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment