Commit 715958f9 authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvm_mips_4.12_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips

From: James Hogan <james.hogan@imgtec.com>

KVM: MIPS: VZ support, Octeon III, and TLBR

Add basic support for the MIPS Virtualization Module (generally known as
MIPS VZ) in KVM. We primarily support the ImgTec P5600, P6600, I6400,
and Cavium Octeon III cores so far. Support is included for the
following VZ / guest hardware features:
- MIPS32 and MIPS64, r5 (VZ requires r5 or later) and r6
- TLBs with GuestID (IMG cores) or Root ASID Dealias (Octeon III)
- Shared physical root/guest TLB (IMG cores)
- FPU / MSA
- Cop0 timer (up to 1GHz for now due to soft timer limit)
- Segmentation control (EVA)
- Hardware page table walker (HTW) both for root and guest TLB

Also included is a proper implementation of the TLBR instruction for the
trap & emulate MIPS KVM implementation.

Preliminary MIPS architecture changes are applied directly with Ralf's
ack.
parents e55fe3cc dc44abd6
...@@ -115,12 +115,17 @@ will access the virtual machine's physical address space; offset zero ...@@ -115,12 +115,17 @@ will access the virtual machine's physical address space; offset zero
corresponds to guest physical address zero. Use of mmap() on a VM fd corresponds to guest physical address zero. Use of mmap() on a VM fd
is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is
available. available.
You most certainly want to use 0 as machine type. You probably want to use 0 as machine type.
In order to create user controlled virtual machines on S390, check In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN). privileged user (CAP_SYS_ADMIN).
To use hardware assisted virtualization on MIPS (VZ ASE) rather than
the default trap & emulate implementation (which changes the virtual
memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
flag KVM_VM_MIPS_VZ.
4.3 KVM_GET_MSR_INDEX_LIST 4.3 KVM_GET_MSR_INDEX_LIST
...@@ -2068,11 +2073,23 @@ registers, find a list below: ...@@ -2068,11 +2073,23 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
MIPS | KVM_REG_MIPS_CP0_WIRED | 32 MIPS | KVM_REG_MIPS_CP0_WIRED | 32
MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
MIPS | KVM_REG_MIPS_CP0_HWRENA | 32 MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64 MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
MIPS | KVM_REG_MIPS_CP0_COUNT | 32 MIPS | KVM_REG_MIPS_CP0_COUNT | 32
MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
...@@ -2089,6 +2106,7 @@ registers, find a list below: ...@@ -2089,6 +2106,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
...@@ -2096,6 +2114,7 @@ registers, find a list below: ...@@ -2096,6 +2114,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
MIPS | KVM_REG_MIPS_COUNT_CTL | 64 MIPS | KVM_REG_MIPS_COUNT_CTL | 64
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
MIPS | KVM_REG_MIPS_COUNT_HZ | 64 MIPS | KVM_REG_MIPS_COUNT_HZ | 64
...@@ -2162,6 +2181,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e. ...@@ -2162,6 +2181,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e.
with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
the PFNX field starting at bit 30. the PFNX field starting at bit 30.
MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
patterns:
0x7030 0000 0001 01 <reg:8>
MIPS KVM control registers (see above) have the following id bit patterns: MIPS KVM control registers (see above) have the following id bit patterns:
0x7030 0000 0002 <reg:16> 0x7030 0000 0002 <reg:16>
...@@ -4210,3 +4233,68 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is ...@@ -4210,3 +4233,68 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel can support guests using the available, means that that the kernel can support guests using the
hashed page table MMU defined in Power ISA V3.00 (as implemented in hashed page table MMU defined in Power ISA V3.00 (as implemented in
the POWER9 processor), including in-memory segment tables. the POWER9 processor), including in-memory segment tables.
8.5 KVM_CAP_MIPS_VZ
Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that full hardware assisted virtualization capabilities
of the hardware are available for use through KVM. An appropriate
KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which
utilises it.
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
available, it means that the VM is using full hardware assisted virtualization
capabilities of the hardware. This is useful to check after creating a VM with
KVM_VM_MIPS_DEFAULT.
The value returned by KVM_CHECK_EXTENSION should be compared against known
values (see below). All other values are reserved. This is to allow for the
possibility of other hardware assisted virtualization implementations which
may be incompatible with the MIPS VZ ASE.
0: The trap & emulate implementation is in use to run guest code in user
mode. Guest virtual memory segments are rearranged to fit the guest in the
user mode address space.
1: The MIPS VZ ASE is in use, providing full hardware assisted
virtualization, including standard guest virtual memory segments.
8.6 KVM_CAP_MIPS_TE
Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that the trap & emulate implementation is available to
run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
to KVM_CREATE_VM to create a VM which utilises it.
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
available, it means that the VM is using trap & emulate.
8.7 KVM_CAP_MIPS_64BIT
Architectures: mips
This capability indicates the supported architecture type of the guest, i.e. the
supported register and address width.
The values returned when this capability is checked by KVM_CHECK_EXTENSION on a
kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
be checked specifically against known values (see below). All other values are
reserved.
0: MIPS32 or microMIPS32.
Both registers and addresses are 32-bits wide.
It will only be possible to run 32-bit guest code.
1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
Registers are 64-bits wide, but addresses are 32-bits wide.
64-bit guest code may run but cannot access MIPS64 memory segments.
It will also be possible to run 32-bit guest code.
2: MIPS64 or microMIPS64 with access to all address segments.
Both registers and addresses are 64-bits wide.
It will be possible to run 64-bit or 32-bit guest code.
...@@ -28,6 +28,11 @@ S390: ...@@ -28,6 +28,11 @@ S390:
property inside the device tree's /hypervisor node. property inside the device tree's /hypervisor node.
For more information refer to Documentation/virtual/kvm/ppc-pv.txt For more information refer to Documentation/virtual/kvm/ppc-pv.txt
MIPS:
KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and
the return value is placed in $2 (v0).
KVM Hypercalls Documentation KVM Hypercalls Documentation
=========================== ===========================
The template for each hypercall is: The template for each hypercall is:
......
...@@ -1687,6 +1687,7 @@ config CPU_CAVIUM_OCTEON ...@@ -1687,6 +1687,7 @@ config CPU_CAVIUM_OCTEON
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7 select MIPS_L1_CACHE_SHIFT_7
select HAVE_KVM
help help
The Cavium Octeon processor is a highly integrated chip containing The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor many ethernet hardware widgets for networking tasks. The processor
......
...@@ -444,6 +444,10 @@ ...@@ -444,6 +444,10 @@
# define cpu_has_msa 0 # define cpu_has_msa 0
#endif #endif
#ifndef cpu_has_ufr
# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
#endif
#ifndef cpu_has_fre #ifndef cpu_has_fre
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE) # define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
#endif #endif
...@@ -528,6 +532,9 @@ ...@@ -528,6 +532,9 @@
#ifndef cpu_guest_has_htw #ifndef cpu_guest_has_htw
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW) #define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
#endif #endif
#ifndef cpu_guest_has_mvh
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
#endif
#ifndef cpu_guest_has_msa #ifndef cpu_guest_has_msa
#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA) #define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
#endif #endif
...@@ -543,6 +550,9 @@ ...@@ -543,6 +550,9 @@
#ifndef cpu_guest_has_maar #ifndef cpu_guest_has_maar
#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR) #define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
#endif #endif
#ifndef cpu_guest_has_userlocal
#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
#endif
/* /*
* Guest dynamic capabilities * Guest dynamic capabilities
......
...@@ -33,6 +33,7 @@ struct guest_info { ...@@ -33,6 +33,7 @@ struct guest_info {
unsigned long ases_dyn; unsigned long ases_dyn;
unsigned long long options; unsigned long long options;
unsigned long long options_dyn; unsigned long long options_dyn;
int tlbsize;
u8 conf; u8 conf;
u8 kscratch_mask; u8 kscratch_mask;
}; };
...@@ -109,6 +110,7 @@ struct cpuinfo_mips { ...@@ -109,6 +110,7 @@ struct cpuinfo_mips {
struct guest_info guest; struct guest_info guest;
unsigned int gtoffset_mask; unsigned int gtoffset_mask;
unsigned int guestid_mask; unsigned int guestid_mask;
unsigned int guestid_cache;
} __attribute__((aligned(SMP_CACHE_BYTES))); } __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[]; extern struct cpuinfo_mips cpu_data[];
......
...@@ -415,6 +415,7 @@ enum cpu_type_enum { ...@@ -415,6 +415,7 @@ enum cpu_type_enum {
#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */ #define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */ #define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */ #define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
/* /*
* CPU ASE encodings * CPU ASE encodings
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifndef __MIPS_KVM_HOST_H__ #ifndef __MIPS_KVM_HOST_H__
#define __MIPS_KVM_HOST_H__ #define __MIPS_KVM_HOST_H__
#include <linux/cpumask.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -33,12 +34,23 @@ ...@@ -33,12 +34,23 @@
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
...@@ -55,6 +67,7 @@ ...@@ -55,6 +67,7 @@
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
...@@ -73,6 +86,11 @@ ...@@ -73,6 +86,11 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_HALT_POLL_NS_DEFAULT 500000
#ifdef CONFIG_KVM_MIPS_VZ
extern unsigned long GUESTID_MASK;
extern unsigned long GUESTID_FIRST_VERSION;
extern unsigned long GUESTID_VERSION_MASK;
#endif
/* /*
...@@ -145,6 +163,16 @@ struct kvm_vcpu_stat { ...@@ -145,6 +163,16 @@ struct kvm_vcpu_stat {
u64 fpe_exits; u64 fpe_exits;
u64 msa_disabled_exits; u64 msa_disabled_exits;
u64 flush_dcache_exits; u64 flush_dcache_exits;
#ifdef CONFIG_KVM_MIPS_VZ
u64 vz_gpsi_exits;
u64 vz_gsfc_exits;
u64 vz_hc_exits;
u64 vz_grr_exits;
u64 vz_gva_exits;
u64 vz_ghfc_exits;
u64 vz_gpa_exits;
u64 vz_resvd_exits;
#endif
u64 halt_successful_poll; u64 halt_successful_poll;
u64 halt_attempted_poll; u64 halt_attempted_poll;
u64 halt_poll_invalid; u64 halt_poll_invalid;
...@@ -157,6 +185,8 @@ struct kvm_arch_memory_slot { ...@@ -157,6 +185,8 @@ struct kvm_arch_memory_slot {
struct kvm_arch { struct kvm_arch {
/* Guest physical mm */ /* Guest physical mm */
struct mm_struct gpa_mm; struct mm_struct gpa_mm;
/* Mask of CPUs needing GPA ASID flush */
cpumask_t asid_flush_mask;
}; };
#define N_MIPS_COPROC_REGS 32 #define N_MIPS_COPROC_REGS 32
...@@ -214,6 +244,11 @@ struct mips_coproc { ...@@ -214,6 +244,11 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG4_SEL 4 #define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5 #define MIPS_CP0_CONFIG5_SEL 5
#define MIPS_CP0_GUESTCTL2 10
#define MIPS_CP0_GUESTCTL2_SEL 5
#define MIPS_CP0_GTOFFSET 12
#define MIPS_CP0_GTOFFSET_SEL 7
/* Resume Flags */ /* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
...@@ -229,6 +264,7 @@ enum emulation_result { ...@@ -229,6 +264,7 @@ enum emulation_result {
EMULATE_WAIT, /* WAIT instruction */ EMULATE_WAIT, /* WAIT instruction */
EMULATE_PRIV_FAIL, EMULATE_PRIV_FAIL,
EMULATE_EXCEPT, /* A guest exception has been generated */ EMULATE_EXCEPT, /* A guest exception has been generated */
EMULATE_HYPERCALL, /* HYPCALL instruction */
}; };
#define mips3_paddr_to_tlbpfn(x) \ #define mips3_paddr_to_tlbpfn(x) \
...@@ -276,13 +312,18 @@ struct kvm_mmu_memory_cache { ...@@ -276,13 +312,18 @@ struct kvm_mmu_memory_cache {
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
void *guest_ebase; void *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
/* Host registers preserved across guest mode execution */
unsigned long host_stack; unsigned long host_stack;
unsigned long host_gp; unsigned long host_gp;
unsigned long host_pgd;
unsigned long host_entryhi;
/* Host CP0 registers used when handling exits from guest */ /* Host CP0 registers used when handling exits from guest */
unsigned long host_cp0_badvaddr; unsigned long host_cp0_badvaddr;
unsigned long host_cp0_epc; unsigned long host_cp0_epc;
u32 host_cp0_cause; u32 host_cp0_cause;
u32 host_cp0_guestctl0;
u32 host_cp0_badinstr; u32 host_cp0_badinstr;
u32 host_cp0_badinstrp; u32 host_cp0_badinstrp;
...@@ -340,7 +381,23 @@ struct kvm_vcpu_arch { ...@@ -340,7 +381,23 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
#ifdef CONFIG_KVM_MIPS_VZ
/* vcpu's vzguestid is different on each host cpu in an smp system */
u32 vzguestid[NR_CPUS];
/* wired guest TLB entries */
struct kvm_mips_tlb *wired_tlb;
unsigned int wired_tlb_limit;
unsigned int wired_tlb_used;
/* emulated guest MAAR registers */
unsigned long maar[6];
#endif
/* Last CPU the VCPU state was loaded on */
int last_sched_cpu; int last_sched_cpu;
/* Last CPU the VCPU actually executed guest code on */
int last_exec_cpu;
/* WAIT executed */ /* WAIT executed */
int wait; int wait;
...@@ -349,78 +406,6 @@ struct kvm_vcpu_arch { ...@@ -349,78 +406,6 @@ struct kvm_vcpu_arch {
u8 msa_enabled; u8 msa_enabled;
}; };
#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val))
#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val))
#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
/*
* Some of the guest registers may be modified asynchronously (e.g. from a
* hrtimer callback in hard irq context) and therefore need stronger atomicity
* guarantees than other registers.
*/
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long val) unsigned long val)
{ {
...@@ -471,26 +456,286 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, ...@@ -471,26 +456,286 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
} while (unlikely(!temp)); } while (unlikely(!temp));
} }
#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) /* Guest register types, used in accessor build below */
#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) #define __KVMT32 u32
#define __KVMTl unsigned long
/* Cause can be modified asynchronously from hardirq hrtimer callback */ /*
#define kvm_set_c0_guest_cause(cop0, val) \ * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) * These operate on the saved guest C0 state in RAM.
#define kvm_clear_c0_guest_cause(cop0, val) \ */
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
#define kvm_change_c0_guest_cause(cop0, change, val) \ /* Generate saved context simple accessors */
_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
change, val) static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
{ \
#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) return cop0->reg[(_reg)][(sel)]; \
#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) } \
#define kvm_change_c0_guest_ebase(cop0, change, val) \ static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
cop0->reg[(_reg)][(sel)] = val; \
}
/* Generate saved context bitwise modifiers */
#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
cop0->reg[(_reg)][(sel)] |= val; \
} \
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
cop0->reg[(_reg)][(sel)] &= ~val; \
} \
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type mask, \
__KVMT##type val) \
{ \
unsigned long _mask = mask; \
cop0->reg[(_reg)][(sel)] &= ~_mask; \
cop0->reg[(_reg)][(sel)] |= val & _mask; \
}
/* Generate saved context atomic bitwise modifiers */
#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
} \
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
} \
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type mask, \
__KVMT##type val) \
{ \
_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
val); \
}
/*
* __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
* These operate on the VZ guest C0 context in hardware.
*/
/* Generate VZ guest context simple accessors */
#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
{ \
return read_gc0_##name(); \
} \
static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
write_gc0_##name(val); \
}
/* Generate VZ guest context bitwise modifiers */
#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
set_gc0_##name(val); \
} \
static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
clear_gc0_##name(val); \
} \
static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
__KVMT##type mask, \
__KVMT##type val) \
{ \
change_gc0_##name(mask, val); \
}
/* Generate VZ guest context save/restore to/from saved context */
#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
{ \
write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
} \
static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
{ \
cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
}
/*
* __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
* These wrap a set of operations to provide them with a different name.
*/
/* Generate simple accessor wrapper */
#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
{ \
return kvm_read_##name2(cop0); \
} \
static inline void kvm_write_##name1(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
kvm_write_##name2(cop0, val); \
}
/* Generate bitwise modifier wrapper */
#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
static inline void kvm_set_##name1(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \ { \
kvm_clear_c0_guest_ebase(cop0, change); \ kvm_set_##name2(cop0, val); \
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ } \
static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
__KVMT##type val) \
{ \
kvm_clear_##name2(cop0, val); \
} \
static inline void kvm_change_##name1(struct mips_coproc *cop0, \
__KVMT##type mask, \
__KVMT##type val) \
{ \
kvm_change_##name2(cop0, mask, val); \
} }
/*
* __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
* These generate accessors operating on the saved context in RAM, and wrap them
* with the common guest C0 accessors (for use by common emulation code).
*/
#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
#ifndef CONFIG_KVM_MIPS_VZ
/*
* T&E (trap & emulate software based virtualisation)
* We generate the common accessors operating exclusively on the saved context
* in RAM.
*/
#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
#else
/*
* VZ (hardware assisted virtualisation)
* These macros use the active guest state in VZ mode (hardware registers),
*/
/*
* __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
* These generate accessors operating on the VZ guest context in hardware, and
* wrap them with the common guest C0 accessors (for use by common emulation
* code).
*
* Accessors operating on the saved context in RAM are also generated to allow
* convenient explicit saving and restoring of the state.
*/
#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
__BUILD_KVM_RW_VZ(name, type, _reg, sel) \
__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
__BUILD_KVM_SAVE_VZ(name, _reg, sel)
#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
__BUILD_KVM_SET_VZ(name, type, _reg, sel) \
__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
/*
* We can't do atomic modifications of COP0 state if hardware can modify it.
* Races must be handled explicitly.
*/
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
#endif
/*
* Define accessors for CP0 registers that are accessible to the guest. These
* are primarily used by common emulation code, which may need to access the
* registers differently depending on the implementation.
*
* fns_hw/sw name type reg num select
*/
__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
/* Bitwise operations (on HW state) */
__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
/* Cause can be modified asynchronously from hardirq hrtimer callback */
__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
/* Bitwise operations (on saved state) */
__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
/* Helpers */ /* Helpers */
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
...@@ -531,6 +776,10 @@ struct kvm_mips_callbacks { ...@@ -531,6 +776,10 @@ struct kvm_mips_callbacks {
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
int (*handle_fpe)(struct kvm_vcpu *vcpu); int (*handle_fpe)(struct kvm_vcpu *vcpu);
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
int (*hardware_enable)(void);
void (*hardware_disable)(void);
int (*check_extension)(struct kvm *kvm, long ext);
int (*vcpu_init)(struct kvm_vcpu *vcpu); int (*vcpu_init)(struct kvm_vcpu *vcpu);
void (*vcpu_uninit)(struct kvm_vcpu *vcpu); void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu); int (*vcpu_setup)(struct kvm_vcpu *vcpu);
...@@ -599,6 +848,10 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); ...@@ -599,6 +848,10 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu, bool write_fault);
#endif
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
bool write_fault); bool write_fault);
...@@ -625,6 +878,18 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, ...@@ -625,6 +878,18 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
unsigned long entryhi); unsigned long entryhi);
#ifdef CONFIG_KVM_MIPS_VZ
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa);
void kvm_vz_local_flush_roottlb_all_guests(void);
void kvm_vz_local_flush_guesttlb_all(void);
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
#endif
void kvm_mips_suspend_mm(int cpu); void kvm_mips_suspend_mm(int cpu);
void kvm_mips_resume_mm(int cpu); void kvm_mips_resume_mm(int cpu);
...@@ -795,7 +1060,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -795,7 +1060,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
void kvm_mips_init_count(struct kvm_vcpu *vcpu); void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
...@@ -803,6 +1068,20 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); ...@@ -803,6 +1068,20 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
/* fairly internal functions requiring some care to use */
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
u32 count, int min_drift);
#ifdef CONFIG_KVM_MIPS_VZ
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
#else
static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
#endif
enum emulation_result kvm_mips_check_privilege(u32 cause, enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run, struct kvm_run *run,
...@@ -827,11 +1106,20 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, ...@@ -827,11 +1106,20 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
/* COP0 */
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Hypercalls (hypcall.c) */
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
union mips_instruction inst);
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */ /* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(union mips_instruction inst, extern int kvm_mips_trans_cache_index(union mips_instruction inst,
u32 *opc, struct kvm_vcpu *vcpu); u32 *opc, struct kvm_vcpu *vcpu);
...@@ -846,7 +1134,6 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, ...@@ -846,7 +1134,6 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, static inline void kvm_arch_free_memslot(struct kvm *kvm,
......
...@@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs); ...@@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs);
* @upper: The highest address that the MAAR pair will affect. Must be * @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary. * aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
* MIPS_MAAR_V attribute will automatically be set. * MIPS_MAAR_VL attribute will automatically be set.
* *
* Program the pair of MAAR registers specified by idx to apply the attributes * Program the pair of MAAR registers specified by idx to apply the attributes
* specified by attrs to the range of addresses from lower to higher. * specified by attrs to the range of addresses from lower to higher.
...@@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, ...@@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
BUG_ON(((upper & 0xffff) != 0xffff) BUG_ON(((upper & 0xffff) != 0xffff)
|| ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4))); || ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
/* Automatically set MIPS_MAAR_V */ /* Automatically set MIPS_MAAR_VL */
attrs |= MIPS_MAAR_V; attrs |= MIPS_MAAR_VL;
/* Write the upper address & attributes (only MIPS_MAAR_V matters) */ /* Write the upper address & attributes (only MIPS_MAAR_VL matters) */
write_c0_maari(idx << 1); write_c0_maari(idx << 1);
back_to_back_c0_hazard(); back_to_back_c0_hazard();
write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs); write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
...@@ -81,7 +81,7 @@ extern void maar_init(void); ...@@ -81,7 +81,7 @@ extern void maar_init(void);
* @upper: The highest address that the MAAR pair will affect. Must be * @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary. * aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
* MIPS_MAAR_V attribute will automatically be set. * MIPS_MAAR_VL attribute will automatically be set.
* *
* Describes the configuration of a pair of Memory Accessibility Attribute * Describes the configuration of a pair of Memory Accessibility Attribute
* Registers - applying attributes from attrs to the range of physical * Registers - applying attributes from attrs to the range of physical
......
...@@ -34,8 +34,10 @@ ...@@ -34,8 +34,10 @@
*/ */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define _ULCAST_ #define _ULCAST_
#define _U64CAST_
#else #else
#define _ULCAST_ (unsigned long) #define _ULCAST_ (unsigned long)
#define _U64CAST_ (u64)
#endif #endif
/* /*
...@@ -217,8 +219,10 @@ ...@@ -217,8 +219,10 @@
/* /*
* Wired register bits * Wired register bits
*/ */
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16) #define MIPSR6_WIRED_LIMIT_SHIFT 16
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0) #define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
#define MIPSR6_WIRED_WIRED_SHIFT 0
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
/* /*
* Values used for computation of new tlb entries * Values used for computation of new tlb entries
...@@ -645,6 +649,7 @@ ...@@ -645,6 +649,7 @@
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4) #define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) #define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
#define MIPS_CONF5_VP (_ULCAST_(1) << 7) #define MIPS_CONF5_VP (_ULCAST_(1) << 7)
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
...@@ -719,10 +724,14 @@ ...@@ -719,10 +724,14 @@
#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13) #define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
/* MAAR bit definitions */ /* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) #define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_V (_ULCAST_(1) << 0) #define MIPS_MAAR_VL (_ULCAST_(1) << 0)
/* MAARI bit definitions */
#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
/* EBase bit definitions */ /* EBase bit definitions */
#define MIPS_EBASE_CPUNUM_SHIFT 0 #define MIPS_EBASE_CPUNUM_SHIFT 0
...@@ -736,6 +745,10 @@ ...@@ -736,6 +745,10 @@
#define MIPS_CMGCRB_BASE 11 #define MIPS_CMGCRB_BASE 11
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1)) #define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
/* LLAddr bit definitions */
#define MIPS_LLADDR_LLB_SHIFT 0
#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
/* /*
* Bits in the MIPS32 Memory Segmentation registers. * Bits in the MIPS32 Memory Segmentation registers.
*/ */
...@@ -961,6 +974,22 @@ ...@@ -961,6 +974,22 @@
/* Flush FTLB */ /* Flush FTLB */
#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13) #define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
/* CvmCtl register field definitions */
#define CVMCTL_IPPCI_SHIFT 7
#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
#define CVMCTL_IPTI_SHIFT 4
#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
/* CvmMemCtl2 register field definitions */
#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
/* CvmVMConfig register field definitions */
#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
#define CVMVMCONF_MMUSIZEM1_S 12
#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
#define CVMVMCONF_RMMUSIZEM1_S 0
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
/* /*
* Coprocessor 1 (FPU) register names * Coprocessor 1 (FPU) register names
*/ */
...@@ -1720,6 +1749,13 @@ do { \ ...@@ -1720,6 +1749,13 @@ do { \
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7) #define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val) #define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
/* /*
* The cacheerr registers are not standardized. On OCTEON, they are * The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide. * 64 bits wide.
...@@ -1989,6 +2025,8 @@ do { \ ...@@ -1989,6 +2025,8 @@ do { \
#define read_gc0_epc() __read_ulong_gc0_register(14, 0) #define read_gc0_epc() __read_ulong_gc0_register(14, 0)
#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val) #define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
#define read_gc0_prid() __read_32bit_gc0_register(15, 0)
#define read_gc0_ebase() __read_32bit_gc0_register(15, 1) #define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val) #define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
...@@ -2012,6 +2050,9 @@ do { \ ...@@ -2012,6 +2050,9 @@ do { \
#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val) #define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val) #define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
#define read_gc0_lladdr() __read_ulong_gc0_register(17, 0)
#define write_gc0_lladdr(val) __write_ulong_gc0_register(17, 0, val)
#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0) #define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1) #define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2) #define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
...@@ -2090,6 +2131,19 @@ do { \ ...@@ -2090,6 +2131,19 @@ do { \
#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val) #define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val) #define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
/* Cavium OCTEON (cnMIPS) */
#define read_gc0_cvmcount() __read_ulong_gc0_register(9, 6)
#define write_gc0_cvmcount(val) __write_ulong_gc0_register(9, 6, val)
#define read_gc0_cvmctl() __read_64bit_gc0_register(9, 7)
#define write_gc0_cvmctl(val) __write_64bit_gc0_register(9, 7, val)
#define read_gc0_cvmmemctl() __read_64bit_gc0_register(11, 7)
#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register(11, 7, val)
#define read_gc0_cvmmemctl2() __read_64bit_gc0_register(16, 6)
#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register(16, 6, val)
/* /*
* Macros to access the floating point coprocessor control registers * Macros to access the floating point coprocessor control registers
*/ */
...@@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode) ...@@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode)
*/ */
#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name) #define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
__BUILD_SET_GC0(wired)
__BUILD_SET_GC0(status) __BUILD_SET_GC0(status)
__BUILD_SET_GC0(cause) __BUILD_SET_GC0(cause)
__BUILD_SET_GC0(ebase) __BUILD_SET_GC0(ebase)
__BUILD_SET_GC0(config1)
/* /*
* Return low 10 bits of ebase. * Return low 10 bits of ebase.
......
...@@ -21,9 +21,11 @@ ...@@ -21,9 +21,11 @@
*/ */
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define UNIQUE_ENTRYHI(idx) \ #define _UNIQUE_ENTRYHI(base, idx) \
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
static inline unsigned int num_wired_entries(void) static inline unsigned int num_wired_entries(void)
{ {
......
...@@ -179,7 +179,7 @@ enum cop0_coi_func { ...@@ -179,7 +179,7 @@ enum cop0_coi_func {
tlbr_op = 0x01, tlbwi_op = 0x02, tlbr_op = 0x01, tlbwi_op = 0x02,
tlbwr_op = 0x06, tlbp_op = 0x08, tlbwr_op = 0x06, tlbp_op = 0x08,
rfe_op = 0x10, eret_op = 0x18, rfe_op = 0x10, eret_op = 0x18,
wait_op = 0x20, wait_op = 0x20, hypcall_op = 0x28
}; };
/* /*
......
...@@ -54,10 +54,15 @@ struct kvm_fpu { ...@@ -54,10 +54,15 @@ struct kvm_fpu {
* Register set = 0: GP registers from kvm_regs (see definitions below). * Register set = 0: GP registers from kvm_regs (see definitions below).
* *
* Register set = 1: CP0 registers. * Register set = 1: CP0 registers.
* bits[15..8] - Must be zero. * bits[15..8] - COP0 register set.
*
* COP0 register set = 0: Main CP0 registers.
* bits[7..3] - Register 'rd' index. * bits[7..3] - Register 'rd' index.
* bits[2..0] - Register 'sel' index. * bits[2..0] - Register 'sel' index.
* *
* COP0 register set = 1: MAARs.
* bits[7..0] - MAAR index.
*
* Register set = 2: KVM specific registers (see definitions below). * Register set = 2: KVM specific registers (see definitions below).
* *
* Register set = 3: FPU / MSA registers (see definitions below). * Register set = 3: FPU / MSA registers (see definitions below).
...@@ -114,6 +119,15 @@ struct kvm_fpu { ...@@ -114,6 +119,15 @@ struct kvm_fpu {
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34) #define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
/*
* KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
*/
#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
KVM_REG_SIZE_U64 | (n))
/* /*
* KVM_REG_MIPS_KVM - KVM specific control registers. * KVM_REG_MIPS_KVM - KVM specific control registers.
*/ */
......
...@@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c) ...@@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
if (c->fpu_id & MIPS_FPIR_3D) if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D; c->ases |= MIPS_ASE_MIPS3D;
if (c->fpu_id & MIPS_FPIR_UFRP)
c->options |= MIPS_CPU_UFR;
if (c->fpu_id & MIPS_FPIR_FREP) if (c->fpu_id & MIPS_FPIR_FREP)
c->options |= MIPS_CPU_FRE; c->options |= MIPS_CPU_FRE;
} }
...@@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) ...@@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
unsigned int config3, config3_dyn; unsigned int config3, config3_dyn;
probe_gc0_config_dyn(config3, config3, config3_dyn, probe_gc0_config_dyn(config3, config3, config3_dyn,
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC); MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
MIPS_CONF3_CTXTC);
if (config3 & MIPS_CONF3_CTXTC) if (config3 & MIPS_CONF3_CTXTC)
c->guest.options |= MIPS_CPU_CTXTC; c->guest.options |= MIPS_CPU_CTXTC;
...@@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) ...@@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_PW) if (config3 & MIPS_CONF3_PW)
c->guest.options |= MIPS_CPU_HTW; c->guest.options |= MIPS_CPU_HTW;
if (config3 & MIPS_CONF3_ULRI)
c->guest.options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_SC) if (config3 & MIPS_CONF3_SC)
c->guest.options |= MIPS_CPU_SEGMENTS; c->guest.options |= MIPS_CPU_SEGMENTS;
...@@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) ...@@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
unsigned int config5, config5_dyn; unsigned int config5, config5_dyn;
probe_gc0_config_dyn(config5, config5, config5_dyn, probe_gc0_config_dyn(config5, config5, config5_dyn,
MIPS_CONF_M | MIPS_CONF5_MRP); MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
if (config5 & MIPS_CONF5_MRP) if (config5 & MIPS_CONF5_MRP)
c->guest.options |= MIPS_CPU_MAAR; c->guest.options |= MIPS_CPU_MAAR;
...@@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) ...@@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_LLB) if (config5 & MIPS_CONF5_LLB)
c->guest.options |= MIPS_CPU_RW_LLB; c->guest.options |= MIPS_CPU_RW_LLB;
if (config5 & MIPS_CONF5_MVH)
c->guest.options |= MIPS_CPU_MVH;
if (config5 & MIPS_CONF_M) if (config5 & MIPS_CONF_M)
c->guest.conf |= BIT(6); c->guest.conf |= BIT(6);
return config5 & MIPS_CONF_M; return config5 & MIPS_CONF_M;
......
...@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq); ...@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
*/ */
unsigned int mips_hpt_frequency; unsigned int mips_hpt_frequency;
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
/* /*
* This function exists in order to cause an error due to a duplicate * This function exists in order to cause an error due to a duplicate
......
...@@ -26,11 +26,34 @@ config KVM ...@@ -26,11 +26,34 @@ config KVM
select SRCU select SRCU
---help--- ---help---
Support for hosting Guest kernels. Support for hosting Guest kernels.
Currently supported on MIPS32 processors.
choice
prompt "Virtualization mode"
depends on KVM
default KVM_MIPS_TE
config KVM_MIPS_TE
bool "Trap & Emulate"
---help---
Use trap and emulate to virtualize 32-bit guests in user mode. This
does not require any special hardware Virtualization support beyond
standard MIPS32/64 r2 or later, but it does require the guest kernel
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
user address segment.
config KVM_MIPS_VZ
bool "MIPS Virtualization (VZ) ASE"
---help---
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
but requires hardware support.
endchoice
config KVM_MIPS_DYN_TRANS config KVM_MIPS_DYN_TRANS
bool "KVM/MIPS: Dynamic binary translation to reduce traps" bool "KVM/MIPS: Dynamic binary translation to reduce traps"
depends on KVM depends on KVM_MIPS_TE
default y
---help--- ---help---
When running in Trap & Emulate mode patch privileged When running in Trap & Emulate mode patch privileged
instructions to reduce the number of traps. instructions to reduce the number of traps.
......
...@@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o ...@@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \ interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o kvm-objs += mmu.o
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
else
kvm-objs += dyntrans.o
kvm-objs += trap_emul.o
endif
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o obj-y += callback.o tlb.o
...@@ -308,7 +308,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) ...@@ -308,7 +308,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
* CP0_Cause.DC bit or the count_ctl.DC bit. * CP0_Cause.DC bit or the count_ctl.DC bit.
* 0 otherwise (in which case CP0_Count timer is running). * 0 otherwise (in which case CP0_Count timer is running).
*/ */
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -467,7 +467,7 @@ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) ...@@ -467,7 +467,7 @@ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
* *
* Returns: The ktime at the point of freeze. * Returns: The ktime at the point of freeze.
*/ */
static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{ {
ktime_t now; ktime_t now;
...@@ -516,6 +516,82 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, ...@@ -516,6 +516,82 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
} }
/**
* kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
* @vcpu: Virtual CPU.
* @before: Time before Count was saved, lower bound of drift calculation.
* @count: CP0_Count at point of restore.
* @min_drift: Minimum amount of drift permitted before correction.
* Must be <= 0.
*
* Restores the timer from a particular @count, accounting for drift. This can
* be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
* to be used for a period of time, but the exact ktime corresponding to the
* final Count that must be restored is not known.
*
* It is gauranteed that a timer interrupt immediately after restore will be
* handled, but not if CP0_Compare is exactly at @count. That case should
* already be handled when the hardware timer state is saved.
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
* stopped).
*
* Returns: Amount of correction to count_bias due to drift.
*/
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
u32 count, int min_drift)
{
ktime_t now, count_time;
u32 now_count, before_count;
u64 delta;
int drift, ret = 0;
/* Calculate expected count at before */
before_count = vcpu->arch.count_bias +
kvm_mips_ktime_to_count(vcpu, before);
/*
* Detect significantly negative drift, where count is lower than
* expected. Some negative drift is expected when hardware counter is
* set after kvm_mips_freeze_timer(), and it is harmless to allow the
* time to jump forwards a little, within reason. If the drift is too
* significant, adjust the bias to avoid a big Guest.CP0_Count jump.
*/
drift = count - before_count;
if (drift < min_drift) {
count_time = before;
vcpu->arch.count_bias += drift;
ret = drift;
goto resume;
}
/* Calculate expected count right now */
now = ktime_get();
now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
/*
* Detect positive drift, where count is higher than expected, and
* adjust the bias to avoid guest time going backwards.
*/
drift = count - now_count;
if (drift > 0) {
count_time = now;
vcpu->arch.count_bias += drift;
ret = drift;
goto resume;
}
/* Subtract nanosecond delta to find ktime when count was read */
delta = (u64)(u32)(now_count - count);
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
count_time = ktime_sub_ns(now, delta);
resume:
/* Resume using the calculated ktime */
kvm_mips_resume_hrtimer(vcpu, count_time, count);
return ret;
}
/** /**
* kvm_mips_write_count() - Modify the count and update timer. * kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
...@@ -543,16 +619,15 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) ...@@ -543,16 +619,15 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
/** /**
* kvm_mips_init_count() - Initialise timer. * kvm_mips_init_count() - Initialise timer.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
* @count_hz: Frequency of timer.
* *
* Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set * Initialise the timer to the specified frequency, zero it, and set it going if
* it going if it's enabled. * it's enabled.
*/ */
void kvm_mips_init_count(struct kvm_vcpu *vcpu) void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
{ {
/* 100 MHz */ vcpu->arch.count_hz = count_hz;
vcpu->arch.count_hz = 100*1000*1000; vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
vcpu->arch.count_hz);
vcpu->arch.count_dyn_bias = 0; vcpu->arch.count_dyn_bias = 0;
/* Starting at 0 */ /* Starting at 0 */
...@@ -622,7 +697,9 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) ...@@ -622,7 +697,9 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
int dc; int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0); u32 old_compare = kvm_read_c0_guest_compare(cop0);
ktime_t now; s32 delta = compare - old_compare;
u32 cause;
ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
u32 count; u32 count;
/* if unchanged, must just be an ack */ /* if unchanged, must just be an ack */
...@@ -634,6 +711,21 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) ...@@ -634,6 +711,21 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
return; return;
} }
/*
* If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
* too to prevent guest CP0_Count hitting guest CP0_Compare.
*
* The new GTOffset corresponds to the new value of CP0_Compare, and is
* set prior to it being written into the guest context. We disable
* preemption until the new value is written to prevent restore of a
* GTOffset corresponding to the old CP0_Compare value.
*/
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
preempt_disable();
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
}
/* freeze_hrtimer() takes care of timer interrupts <= count */ /* freeze_hrtimer() takes care of timer interrupts <= count */
dc = kvm_mips_count_disabled(vcpu); dc = kvm_mips_count_disabled(vcpu);
if (!dc) if (!dc)
...@@ -641,12 +733,36 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) ...@@ -641,12 +733,36 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
if (ack) if (ack)
kvm_mips_callbacks->dequeue_timer_int(vcpu); kvm_mips_callbacks->dequeue_timer_int(vcpu);
else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
/*
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
* preserve guest CP0_Cause.TI if we don't want to ack it.
*/
cause = kvm_read_c0_guest_cause(cop0);
kvm_write_c0_guest_compare(cop0, compare); kvm_write_c0_guest_compare(cop0, compare);
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
if (delta > 0)
preempt_enable();
back_to_back_c0_hazard();
if (!ack && cause & CAUSEF_TI)
kvm_write_c0_guest_cause(cop0, cause);
}
/* resume_hrtimer() takes care of timer interrupts > count */ /* resume_hrtimer() takes care of timer interrupts > count */
if (!dc) if (!dc)
kvm_mips_resume_hrtimer(vcpu, now, count); kvm_mips_resume_hrtimer(vcpu, now, count);
/*
* If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
* until after the new CP0_Compare is written, otherwise new guest
* CP0_Count could hit new guest CP0_Compare.
*/
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
write_c0_gtoffset(compare - read_c0_count());
} }
/** /**
...@@ -857,6 +973,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) ...@@ -857,6 +973,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
++vcpu->stat.wait_exits; ++vcpu->stat.wait_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) { if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1; vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
...@@ -873,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) ...@@ -873,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
/* static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that unsigned long entryhi)
* we can catch this, if things ever change {
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
int cpu, i;
u32 nasid = entryhi & KVM_ENTRYHI_ASID;
if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID, nasid);
/*
* Flush entries from the GVA page tables.
* Guest user page table will get flushed lazily on re-entry to
* guest user if the guest ASID actually changes.
*/
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
/*
* Regenerate/invalidate kernel MMU context.
* The user MMU context will be regenerated lazily on re-entry
* to guest user if the guest ASID actually changes.
*/ */
preempt_disable();
cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0, entryhi);
}
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb *tlb;
unsigned long pc = vcpu->arch.pc; unsigned long pc = vcpu->arch.pc;
int index;
kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); index = kvm_read_c0_guest_index(cop0);
return EMULATE_FAIL; if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
/* UNDEFINED */
kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
}
tlb = &vcpu->arch.guest_tlb[index];
kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
return EMULATE_DONE;
} }
/** /**
...@@ -1105,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1105,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel; u32 rt, rd, sel;
unsigned long curr_pc; unsigned long curr_pc;
int cpu, i;
/* /*
* Update PC and hold onto current PC in case there is * Update PC and hold onto current PC in case there is
...@@ -1143,6 +1303,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1143,6 +1303,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
case wait_op: case wait_op:
er = kvm_mips_emul_wait(vcpu); er = kvm_mips_emul_wait(vcpu);
break; break;
case hypcall_op:
er = kvm_mips_emul_hypcall(vcpu, inst);
break;
} }
} else { } else {
rt = inst.c0r_format.rt; rt = inst.c0r_format.rt;
...@@ -1208,43 +1371,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1208,43 +1371,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
kvm_change_c0_guest_ebase(cop0, 0x1ffff000, kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) { } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
u32 nasid = kvm_mips_change_entryhi(vcpu,
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if (((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
& KVM_ENTRYHI_ASID,
nasid);
/*
* Flush entries from the GVA page
* tables.
* Guest user page table will get
* flushed lazily on re-entry to guest
* user if the guest ASID actually
* changes.
*/
kvm_mips_flush_gva_pt(kern_mm->pgd,
KMF_KERN);
/*
* Regenerate/invalidate kernel MMU
* context.
* The user MMU context will be
* regenerated lazily on re-entry to
* guest user if the guest ASID actually
* changes.
*/
preempt_disable();
cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} }
/* Are we writing to COUNT */ /* Are we writing to COUNT */
...@@ -1474,9 +1601,8 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, ...@@ -1474,9 +1601,8 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er;
u32 rt; u32 rt;
u32 bytes;
void *data = run->mmio.data; void *data = run->mmio.data;
unsigned long curr_pc; unsigned long curr_pc;
...@@ -1491,103 +1617,74 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, ...@@ -1491,103 +1617,74 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
rt = inst.i_format.rt; rt = inst.i_format.rt;
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
goto out_fail;
switch (inst.i_format.opcode) { switch (inst.i_format.opcode) {
case sb_op: #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
bytes = 1; case sd_op:
if (bytes > sizeof(run->mmio.data)) { run->mmio.len = 8;
kvm_err("%s: bad MMIO length: %d\n", __func__, *(u64 *)data = vcpu->arch.gprs[rt];
run->mmio.len);
}
run->mmio.phys_addr =
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break;
}
run->mmio.len = bytes;
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
*(u8 *) data = vcpu->arch.gprs[rt];
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
*(u8 *) data);
kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break; break;
#endif
case sw_op: case sw_op:
bytes = 4; run->mmio.len = 4;
if (bytes > sizeof(run->mmio.data)) { *(u32 *)data = vcpu->arch.gprs[rt];
kvm_err("%s: bad MMIO length: %d\n", __func__,
run->mmio.len);
}
run->mmio.phys_addr =
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break;
}
run->mmio.len = bytes;
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
*(u32 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *) data); vcpu->arch.gprs[rt], *(u32 *)data);
break; break;
case sh_op: case sh_op:
bytes = 2; run->mmio.len = 2;
if (bytes > sizeof(run->mmio.data)) { *(u16 *)data = vcpu->arch.gprs[rt];
kvm_err("%s: bad MMIO length: %d\n", __func__,
run->mmio.len); kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
} vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
run->mmio.phys_addr = vcpu->arch.gprs[rt], *(u16 *)data);
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break; break;
}
run->mmio.len = bytes; case sb_op:
run->mmio.is_write = 1; run->mmio.len = 1;
vcpu->mmio_needed = 1; *(u8 *)data = vcpu->arch.gprs[rt];
vcpu->mmio_is_write = 1;
*(u16 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u32 *) data); vcpu->arch.gprs[rt], *(u8 *)data);
break; break;
default: default:
kvm_err("Store not yet supported (inst=0x%08x)\n", kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word); inst.word);
er = EMULATE_FAIL; goto out_fail;
break;
} }
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
return EMULATE_DO_MMIO;
out_fail:
/* Rollback PC if emulation was unsuccessful */ /* Rollback PC if emulation was unsuccessful */
if (er == EMULATE_FAIL)
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
return er;
} }
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_run *run, u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er;
unsigned long curr_pc; unsigned long curr_pc;
u32 op, rt; u32 op, rt;
u32 bytes;
rt = inst.i_format.rt; rt = inst.i_format.rt;
op = inst.i_format.opcode; op = inst.i_format.opcode;
...@@ -1606,96 +1703,53 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, ...@@ -1606,96 +1703,53 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
vcpu->arch.io_gpr = rt; vcpu->arch.io_gpr = rt;
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
return EMULATE_FAIL;
vcpu->mmio_needed = 2; /* signed */
switch (op) { switch (op) {
case lw_op: #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
bytes = 4; case ld_op:
if (bytes > sizeof(run->mmio.data)) { run->mmio.len = 8;
kvm_err("%s: bad MMIO length: %d\n", __func__,
run->mmio.len);
er = EMULATE_FAIL;
break; break;
}
run->mmio.phys_addr =
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break;
}
run->mmio.len = bytes; case lwu_op:
run->mmio.is_write = 0; vcpu->mmio_needed = 1; /* unsigned */
vcpu->mmio_needed = 1; /* fall through */
vcpu->mmio_is_write = 0; #endif
case lw_op:
run->mmio.len = 4;
break; break;
case lh_op:
case lhu_op: case lhu_op:
bytes = 2; vcpu->mmio_needed = 1; /* unsigned */
if (bytes > sizeof(run->mmio.data)) { /* fall through */
kvm_err("%s: bad MMIO length: %d\n", __func__, case lh_op:
run->mmio.len); run->mmio.len = 2;
er = EMULATE_FAIL;
break;
}
run->mmio.phys_addr =
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break;
}
run->mmio.len = bytes;
run->mmio.is_write = 0;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
if (op == lh_op)
vcpu->mmio_needed = 2;
else
vcpu->mmio_needed = 1;
break; break;
case lbu_op: case lbu_op:
vcpu->mmio_needed = 1; /* unsigned */
/* fall through */
case lb_op: case lb_op:
bytes = 1; run->mmio.len = 1;
if (bytes > sizeof(run->mmio.data)) {
kvm_err("%s: bad MMIO length: %d\n", __func__,
run->mmio.len);
er = EMULATE_FAIL;
break;
}
run->mmio.phys_addr =
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
host_cp0_badvaddr);
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
er = EMULATE_FAIL;
break;
}
run->mmio.len = bytes;
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
if (op == lb_op)
vcpu->mmio_needed = 2;
else
vcpu->mmio_needed = 1;
break; break;
default: default:
kvm_err("Load not yet supported (inst=0x%08x)\n", kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word); inst.word);
er = EMULATE_FAIL; vcpu->mmio_needed = 0;
break; return EMULATE_FAIL;
} }
return er; run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
return EMULATE_DO_MMIO;
} }
#ifndef CONFIG_KVM_MIPS_VZ
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
unsigned long curr_pc, unsigned long curr_pc,
unsigned long addr, unsigned long addr,
...@@ -1786,11 +1840,35 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, ...@@ -1786,11 +1840,35 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
arch->gprs[base], offset); arch->gprs[base], offset);
if (cache == Cache_D) if (cache == Cache_D) {
#ifdef CONFIG_CPU_R4K_CACHE_TLB
r4k_blast_dcache(); r4k_blast_dcache();
else if (cache == Cache_I) #else
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* locally flush icache */
local_flush_icache_range(0, 0);
break;
default:
__flush_cache_all();
break;
}
#endif
} else if (cache == Cache_I) {
#ifdef CONFIG_CPU_R4K_CACHE_TLB
r4k_blast_icache(); r4k_blast_icache();
else { #else
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* locally flush icache */
local_flush_icache_range(0, 0);
break;
default:
flush_icache_all();
break;
}
#endif
} else {
kvm_err("%s: unsupported CACHE INDEX operation\n", kvm_err("%s: unsupported CACHE INDEX operation\n",
__func__); __func__);
return EMULATE_FAIL; return EMULATE_FAIL;
...@@ -1870,18 +1948,6 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -1870,18 +1948,6 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
case cop0_op: case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break; break;
case sb_op:
case sh_op:
case sw_op:
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
break;
case lb_op:
case lbu_op:
case lhu_op:
case lh_op:
case lw_op:
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6 #ifndef CONFIG_CPU_MIPSR6
case cache_op: case cache_op:
...@@ -1915,6 +1981,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -1915,6 +1981,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
return er; return er;
} }
#endif /* CONFIG_KVM_MIPS_VZ */
/** /**
* kvm_mips_guest_exception_base() - Find guest exception vector base address. * kvm_mips_guest_exception_base() - Find guest exception vector base address.
...@@ -2524,8 +2591,15 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -2524,8 +2591,15 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu->arch.pc = vcpu->arch.io_pc; vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) { switch (run->mmio.len) {
case 8:
*gpr = *(s64 *)run->mmio.data;
break;
case 4: case 4:
*gpr = *(s32 *) run->mmio.data; if (vcpu->mmio_needed == 2)
*gpr = *(s32 *)run->mmio.data;
else
*gpr = *(u32 *)run->mmio.data;
break; break;
case 2: case 2:
......
...@@ -51,12 +51,15 @@ ...@@ -51,12 +51,15 @@
#define RA 31 #define RA 31
/* Some CP0 registers */ /* Some CP0 registers */
#define C0_PWBASE 5, 5
#define C0_HWRENA 7, 0 #define C0_HWRENA 7, 0
#define C0_BADVADDR 8, 0 #define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1 #define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2 #define C0_BADINSTRP 8, 2
#define C0_ENTRYHI 10, 0 #define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0 #define C0_STATUS 12, 0
#define C0_GUESTCTL0 12, 6
#define C0_CAUSE 13, 0 #define C0_CAUSE 13, 0
#define C0_EPC 14, 0 #define C0_EPC 14, 0
#define C0_EBASE 15, 1 #define C0_EBASE 15, 1
...@@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr)
unsigned int i; unsigned int i;
struct uasm_label labels[2]; struct uasm_label labels[2];
struct uasm_reloc relocs[2]; struct uasm_reloc relocs[2];
struct uasm_label *l = labels; struct uasm_label __maybe_unused *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc __maybe_unused *r = relocs;
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
...@@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC); UASM_i_MTC0(&p, T0, C0_EPC);
/* Set the ASID for the Guest Kernel */ #ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
* Set up KVM GPA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*
* We keep S0 pointing at struct kvm so we can load the ASID below.
*/
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), K1);
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
uasm_i_addiu(&p, V1, ZERO, 1);
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
if (cpu_has_guestid) {
/*
* Set root mode GuestID, so that root TLB refill handler can
* use the correct GuestID in the root TLB.
*/
/* Get current GuestID */
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = GuestCtl1.ID */
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */
goto skip_asid_restore;
}
/* Root ASID Dealias (RAD) */
/* Save host ASID */
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
#else
/* Set the ASID for the Guest Kernel or User */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
T0); T0);
...@@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
guest_user_mm.context.asid)); guest_user_mm.context.asid));
uasm_l_kernel_asid(&l, p); uasm_l_kernel_asid(&l, p);
#endif
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */ /* smp_processor_id */
...@@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif #endif
#ifndef CONFIG_KVM_MIPS_VZ
/* /*
* Set up KVM T&E GVA pgd. * Set up KVM T&E GVA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
...@@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9); uasm_i_jalr(&p, RA, T9);
uasm_i_mtc0(&p, K0, C0_ENTRYHI); uasm_i_mtc0(&p, K0, C0_ENTRYHI);
#else
/* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
skip_asid_restore:
#endif
uasm_i_ehb(&p); uasm_i_ehb(&p);
/* Disable RDHWR access */ /* Disable RDHWR access */
...@@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr) ...@@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */ /* Now that context has been saved, we can use other registers */
/* Restore vcpu */ /* Restore vcpu */
UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
uasm_i_move(&p, S1, A1);
/* Restore run (vcpu->run) */ /* Restore run (vcpu->run) */
UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
/* Save pointer to run in s0, will be saved by the compiler */
uasm_i_move(&p, S0, A0);
/* /*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
...@@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr) ...@@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_msa_1(&l, p); uasm_l_msa_1(&l, p);
} }
#ifdef CONFIG_KVM_MIPS_VZ
/* Restore host ASID */
if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
}
/*
* Set up normal Linux process pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*/
UASM_i_LW(&p, A0,
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
uasm_i_sw(&p, K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
if (cpu_has_guestid) {
/*
* Clear root mode GuestID, so that root TLB operations use the
* root GuestID in the root TLB.
*/
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
}
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, V0, V0, AT); uasm_i_and(&p, V0, V0, AT);
...@@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr) ...@@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr)
* Now jump to the kvm_mips_handle_exit() to see if we can deal * Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel * with this in the kernel
*/ */
uasm_i_move(&p, A0, S0);
uasm_i_move(&p, A1, S1);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9); uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Hypercall handling.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm_para.h>
#define MAX_HYPCALL_ARGS 4
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
union mips_instruction inst)
{
unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
switch (code) {
case 0:
return EMULATE_HYPERCALL;
default:
return EMULATE_FAIL;
};
}
static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
const unsigned long *args, unsigned long *hret)
{
/* Report unimplemented hypercall to guest */
*hret = -KVM_ENOSYS;
return RESUME_GUEST;
}
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
{
unsigned long num, args[MAX_HYPCALL_ARGS];
/* read hypcall number and arguments */
num = vcpu->arch.gprs[2]; /* v0 */
args[0] = vcpu->arch.gprs[4]; /* a0 */
args[1] = vcpu->arch.gprs[5]; /* a1 */
args[2] = vcpu->arch.gprs[6]; /* a2 */
args[3] = vcpu->arch.gprs[7]; /* a3 */
return kvm_mips_hypercall(vcpu, num,
args, &vcpu->arch.gprs[2] /* v0 */);
}
...@@ -30,8 +30,13 @@ ...@@ -30,8 +30,13 @@
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#ifdef CONFIG_KVM_MIPS_VZ
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
#else
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
......
...@@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
#ifdef CONFIG_KVM_MIPS_VZ
{ "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
{ "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
{ "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
{ "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
{ "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
{ "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
{ "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
{ "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
#endif
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
...@@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{NULL} {NULL}
}; };
bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void)
{
kvm_trace_guest_mode_change = 1;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
kvm_trace_guest_mode_change = 0;
}
/* /*
* XXXKYMA: We are simulatoring a processor that has the WII bit set in * XXXKYMA: We are simulatoring a processor that has the WII bit set in
* Config7, so we are "runnable" if interrupts are pending * Config7, so we are "runnable" if interrupts are pending
...@@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ...@@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_hardware_enable(void) int kvm_arch_hardware_enable(void)
{ {
return 0; return kvm_mips_callbacks->hardware_enable();
}
void kvm_arch_hardware_disable(void)
{
kvm_mips_callbacks->hardware_disable();
} }
int kvm_arch_hardware_setup(void) int kvm_arch_hardware_setup(void)
...@@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn) ...@@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
switch (type) {
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
case KVM_VM_MIPS_TE:
#endif
break;
default:
/* Unsupported KVM type */
return -EINVAL;
};
/* Allocate page table to map GPA -> RPA */ /* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd) if (!kvm->arch.gpa_mm.pgd)
...@@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Build guest exception vectors dynamically in unmapped memory */ /* Build guest exception vectors dynamically in unmapped memory */
handler = gebase + 0x2000; handler = gebase + 0x2000;
/* TLB refill */ /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase; refill_start = gebase;
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
/* General Exception Entry point */ /* General Exception Entry point */
...@@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Init */ /* Init */
vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
/* Start off the timer */
kvm_mips_init_count(vcpu);
return vcpu; return vcpu;
...@@ -1059,7 +1099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -1059,7 +1099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
break; break;
default: default:
r = 0; r = kvm_mips_callbacks->check_extension(kvm, ext);
break; break;
} }
return r; return r;
...@@ -1067,7 +1107,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -1067,7 +1107,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
return kvm_mips_pending_timer(vcpu); return kvm_mips_pending_timer(vcpu) ||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
} }
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
...@@ -1092,7 +1133,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) ...@@ -1092,7 +1133,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
cop0 = vcpu->arch.cop0; cop0 = vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0)); kvm_read_c0_guest_cause(cop0));
...@@ -1208,6 +1249,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1208,6 +1249,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
/* re-enable HTW before enabling interrupts */ /* re-enable HTW before enabling interrupts */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_start(); htw_start();
/* Set a default exit reason */ /* Set a default exit reason */
...@@ -1226,9 +1268,11 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1226,9 +1268,11 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, run, vcpu); cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode); trace_kvm_exit(vcpu, exccode);
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
/* /*
* Do a privilege check, if in UM most of these exit conditions end up * Do a privilege check, if in UM most of these exit conditions
* causing an exception to be delivered to the Guest Kernel * end up causing an exception to be delivered to the Guest
* Kernel
*/ */
er = kvm_mips_check_privilege(cause, opc, run, vcpu); er = kvm_mips_check_privilege(cause, opc, run, vcpu);
if (er == EMULATE_PRIV_FAIL) { if (er == EMULATE_PRIV_FAIL) {
...@@ -1238,6 +1282,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1238,6 +1282,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
goto skip_emul; goto skip_emul;
} }
}
switch (exccode) { switch (exccode) {
case EXCCODE_INT: case EXCCODE_INT:
...@@ -1267,7 +1312,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1267,7 +1312,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
break; break;
case EXCCODE_TLBS: case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
badvaddr); badvaddr);
...@@ -1328,12 +1373,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1328,12 +1373,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break; break;
case EXCCODE_GE:
/* defer exit accounting to handler */
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
break;
default: default:
if (cause & CAUSEF_BD) if (cause & CAUSEF_BD)
opc += 1; opc += 1;
inst = 0; inst = 0;
kvm_get_badinstr(opc, vcpu, &inst); kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr, exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
...@@ -1346,6 +1396,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1346,6 +1396,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
skip_emul: skip_emul:
local_irq_disable(); local_irq_disable();
if (ret == RESUME_GUEST)
kvm_vz_acquire_htimer(vcpu);
if (er == EMULATE_DONE && !(ret & RESUME_HOST)) if (er == EMULATE_DONE && !(ret & RESUME_HOST))
kvm_mips_deliver_interrupts(vcpu, cause); kvm_mips_deliver_interrupts(vcpu, cause);
...@@ -1391,6 +1444,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1391,6 +1444,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
/* Disable HTW before returning to guest or host */ /* Disable HTW before returning to guest or host */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_stop(); htw_stop();
return ret; return ret;
...@@ -1527,16 +1581,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu) ...@@ -1527,16 +1581,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
void kvm_lose_fpu(struct kvm_vcpu *vcpu) void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{ {
/* /*
* FPU & MSA get disabled in root context (hardware) when it is disabled * With T&E, FPU & MSA get disabled in root context (hardware) when it
* in guest context (software), but the register state in the hardware * is disabled in guest context (software), but the register state in
* may still be in use. This is why we explicitly re-enable the hardware * the hardware may still be in use.
* before saving. * This is why we explicitly re-enable the hardware before saving.
*/ */
preempt_disable(); preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_config5(MIPS_CONF5_MSAEN); set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard(); enable_fpu_hazard();
}
__kvm_save_msa(&vcpu->arch); __kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
...@@ -1549,8 +1605,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1549,8 +1605,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
} }
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_status(ST0_CU1); set_c0_status(ST0_CU1);
enable_fpu_hazard(); enable_fpu_hazard();
}
__kvm_save_fpu(&vcpu->arch); __kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
......
...@@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) ...@@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
return kvm_mips_gpa_pte_to_gva_unmapped(pte); return kvm_mips_gpa_pte_to_gva_unmapped(pte);
} }
#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
{
int ret;
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
if (ret)
return ret;
/* Invalidate this entry in the TLB */
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
#endif
/* XXXKYMA: Must be called with interrupts disabled */ /* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
...@@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) ...@@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{ {
int err; int err;
if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
"Expect BadInstr/BadInstrP registers to be used with VZ\n"))
return -EINVAL;
retry: retry:
kvm_trap_emul_gva_lockless_begin(vcpu); kvm_trap_emul_gva_lockless_begin(vcpu);
err = get_user(*out, opc); err = get_user(*out, opc);
......
...@@ -33,6 +33,25 @@ ...@@ -33,6 +33,25 @@
#define KVM_GUEST_PC_TLB 0 #define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1 #define KVM_GUEST_SP_TLB 1
#ifdef CONFIG_KVM_MIPS_VZ
unsigned long GUESTID_MASK;
EXPORT_SYMBOL_GPL(GUESTID_MASK);
unsigned long GUESTID_FIRST_VERSION;
EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
unsigned long GUESTID_VERSION_MASK;
EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
{
struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
if (cpu_has_guestid)
return 0;
else
return cpu_asid(smp_processor_id(), gpa_mm);
}
#endif
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
...@@ -166,6 +185,13 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, ...@@ -166,6 +185,13 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
local_irq_restore(flags); local_irq_restore(flags);
/*
* We don't want to get reserved instruction exceptions for missing tlb
* entries.
*/
if (cpu_has_vtag_icache)
flush_icache_all();
if (user && idx_user >= 0) if (user && idx_user >= 0)
kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) | __func__, (va & VPN2_MASK) |
...@@ -179,6 +205,421 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, ...@@ -179,6 +205,421 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
} }
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
#ifdef CONFIG_KVM_MIPS_VZ
/* GuestID management */
/**
* clear_root_gid() - Set GuestCtl1.RID for normal root operation.
*/
static inline void clear_root_gid(void)
{
if (cpu_has_guestid) {
clear_c0_guestctl1(MIPS_GCTL1_RID);
mtc0_tlbw_hazard();
}
}
/**
* set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
*
* Sets the root GuestID to match the current guest GuestID, for TLB operation
* on the GPA->RPA mappings in the root TLB.
*
* The caller must be sure to disable HTW while the root GID is set, and
* possibly longer if TLB registers are modified.
*/
static inline void set_root_gid_to_guest_gid(void)
{
unsigned int guestctl1;
if (cpu_has_guestid) {
back_to_back_c0_hazard();
guestctl1 = read_c0_guestctl1();
guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
<< MIPS_GCTL1_RID_SHIFT;
write_c0_guestctl1(guestctl1);
mtc0_tlbw_hazard();
}
}
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{
int idx;
unsigned long flags, old_entryhi;
local_irq_save(flags);
htw_stop();
/* Set root GuestID for root probe and write of guest TLB entry */
set_root_gid_to_guest_gid();
old_entryhi = read_c0_entryhi();
idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
kvm_mips_get_root_asid(vcpu));
write_c0_entryhi(old_entryhi);
clear_root_gid();
mtc0_tlbw_hazard();
htw_start();
local_irq_restore(flags);
/*
* We don't want to get reserved instruction exceptions for missing tlb
* entries.
*/
if (cpu_has_vtag_icache)
flush_icache_all();
if (idx > 0)
kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) |
kvm_mips_get_root_asid(vcpu), idx);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
/**
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
* @vcpu: KVM VCPU pointer.
* @gpa: Guest virtual address in a TLB mapped guest segment.
* @gpa: Ponter to output guest physical address it maps to.
*
* Converts a guest virtual address in a guest TLB mapped segment to a guest
* physical address, by probing the guest TLB.
*
* Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
* written.
* -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
* have been written.
*/
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa)
{
unsigned long o_entryhi, o_entrylo[2], o_pagemask;
unsigned int o_index;
unsigned long entrylo[2], pagemask, pagemaskbit, pa;
unsigned long flags;
int index;
/* Probe the guest TLB for a mapping */
local_irq_save(flags);
/* Set root GuestID for root probe of guest TLB entry */
htw_stop();
set_root_gid_to_guest_gid();
o_entryhi = read_gc0_entryhi();
o_index = read_gc0_index();
write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
mtc0_tlbw_hazard();
guest_tlb_probe();
tlb_probe_hazard();
index = read_gc0_index();
if (index < 0) {
/* No match, fail */
write_gc0_entryhi(o_entryhi);
write_gc0_index(o_index);
clear_root_gid();
htw_start();
local_irq_restore(flags);
return -EFAULT;
}
/* Match! read the TLB entry */
o_entrylo[0] = read_gc0_entrylo0();
o_entrylo[1] = read_gc0_entrylo1();
o_pagemask = read_gc0_pagemask();
mtc0_tlbr_hazard();
guest_tlb_read();
tlb_read_hazard();
entrylo[0] = read_gc0_entrylo0();
entrylo[1] = read_gc0_entrylo1();
pagemask = ~read_gc0_pagemask() & ~0x1fffl;
write_gc0_entryhi(o_entryhi);
write_gc0_index(o_index);
write_gc0_entrylo0(o_entrylo[0]);
write_gc0_entrylo1(o_entrylo[1]);
write_gc0_pagemask(o_pagemask);
clear_root_gid();
htw_start();
local_irq_restore(flags);
/* Select one of the EntryLo values and interpret the GPA */
pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
pa = entrylo[!!(gva & pagemaskbit)];
/*
* TLB entry may have become invalid since TLB probe if physical FTLB
* entries are shared between threads (e.g. I6400).
*/
if (!(pa & ENTRYLO_V))
return -EFAULT;
/*
* Note, this doesn't take guest MIPS32 XPA into account, where PFN is
* split with XI/RI in the middle.
*/
pa = (pa << 6) & ~0xfffl;
pa |= gva & ~(pagemask | pagemaskbit);
*gpa = pa;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
/**
* kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
* guests.
*
* Invalidate all entries in root tlb which are GPA mappings.
*/
void kvm_vz_local_flush_roottlb_all_guests(void)
{
unsigned long flags;
unsigned long old_entryhi, old_pagemask, old_guestctl1;
int entry;
if (WARN_ON(!cpu_has_guestid))
return;
local_irq_save(flags);
htw_stop();
/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
old_entryhi = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
old_guestctl1 = read_c0_guestctl1();
/*
* Invalidate guest entries in root TLB while leaving root entries
* intact when possible.
*/
for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_read();
tlb_read_hazard();
/* Don't invalidate non-guest (RVA) mappings in the root TLB */
if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
write_c0_guestctl1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
}
write_c0_entryhi(old_entryhi);
write_c0_pagemask(old_pagemask);
write_c0_guestctl1(old_guestctl1);
tlbw_use_hazard();
htw_start();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
/**
* kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
*
* Invalidate all entries in guest tlb irrespective of guestid.
*/
void kvm_vz_local_flush_guesttlb_all(void)
{
unsigned long flags;
unsigned long old_index;
unsigned long old_entryhi;
unsigned long old_entrylo[2];
unsigned long old_pagemask;
int entry;
u64 cvmmemctl2 = 0;
local_irq_save(flags);
/* Preserve all clobbered guest registers */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo[0] = read_gc0_entrylo0();
old_entrylo[1] = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Inhibit machine check due to multiple matching TLB entries */
cvmmemctl2 = read_c0_cvmmemctl2();
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
break;
};
/* Invalidate guest entries in guest TLB */
write_gc0_entrylo0(0);
write_gc0_entrylo1(0);
write_gc0_pagemask(0);
for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
/* Make sure all entries differ. */
write_gc0_index(entry);
write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
mtc0_tlbw_hazard();
guest_tlb_write_indexed();
}
if (cvmmemctl2) {
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
};
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo[0]);
write_gc0_entrylo1(old_entrylo[1]);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
/**
* kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
* @buf: Buffer to write TLB entries into.
* @index: Start index.
* @count: Number of entries to save.
*
* Save a range of guest TLB entries. The caller must ensure interrupts are
* disabled.
*/
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count)
{
unsigned int end = index + count;
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
unsigned int guestctl1 = 0;
int old_index, i;
/* Save registers we're about to clobber */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo0 = read_gc0_entrylo0();
old_entrylo1 = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
/* Set root GuestID for root probe */
htw_stop();
set_root_gid_to_guest_gid();
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
/* Read each entry from guest TLB */
for (i = index; i < end; ++i, ++buf) {
write_gc0_index(i);
mtc0_tlbr_hazard();
guest_tlb_read();
tlb_read_hazard();
if (cpu_has_guestid &&
(read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
/* Entry invalid or belongs to another guest */
buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
buf->tlb_lo[0] = 0;
buf->tlb_lo[1] = 0;
buf->tlb_mask = 0;
} else {
/* Entry belongs to the right guest */
buf->tlb_hi = read_gc0_entryhi();
buf->tlb_lo[0] = read_gc0_entrylo0();
buf->tlb_lo[1] = read_gc0_entrylo1();
buf->tlb_mask = read_gc0_pagemask();
}
}
/* Clear root GuestID again */
clear_root_gid();
htw_start();
/* Restore clobbered registers */
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo0);
write_gc0_entrylo1(old_entrylo1);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
}
EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
/**
* kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
* @buf: Buffer to read TLB entries from.
* @index: Start index.
* @count: Number of entries to load.
*
* Load a range of guest TLB entries. The caller must ensure interrupts are
* disabled.
*/
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count)
{
unsigned int end = index + count;
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
int old_index, i;
/* Save registers we're about to clobber */
old_index = read_gc0_index();
old_entryhi = read_gc0_entryhi();
old_entrylo0 = read_gc0_entrylo0();
old_entrylo1 = read_gc0_entrylo1();
old_pagemask = read_gc0_pagemask();
/* Set root GuestID for root probe */
htw_stop();
set_root_gid_to_guest_gid();
/* Write each entry to guest TLB */
for (i = index; i < end; ++i, ++buf) {
write_gc0_index(i);
write_gc0_entryhi(buf->tlb_hi);
write_gc0_entrylo0(buf->tlb_lo[0]);
write_gc0_entrylo1(buf->tlb_lo[1]);
write_gc0_pagemask(buf->tlb_mask);
mtc0_tlbw_hazard();
guest_tlb_write_indexed();
}
/* Clear root GuestID again */
clear_root_gid();
htw_start();
/* Restore clobbered registers */
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
write_gc0_entrylo0(old_entrylo0);
write_gc0_entrylo1(old_entrylo1);
write_gc0_pagemask(old_pagemask);
tlbw_use_hazard();
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
#endif
/** /**
* kvm_mips_suspend_mm() - Suspend the active mm. * kvm_mips_suspend_mm() - Suspend the active mm.
* @cpu The CPU we're running on. * @cpu The CPU we're running on.
......
...@@ -17,6 +17,13 @@ ...@@ -17,6 +17,13 @@
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
/*
* arch/mips/kvm/mips.c
*/
extern bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void);
void kvm_guest_mode_change_trace_unreg(void);
/* /*
* Tracepoints for VM enters * Tracepoints for VM enters
*/ */
...@@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out, ...@@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out,
#define KVM_TRACE_EXIT_MSA_FPE 14 #define KVM_TRACE_EXIT_MSA_FPE 14
#define KVM_TRACE_EXIT_FPE 15 #define KVM_TRACE_EXIT_FPE 15
#define KVM_TRACE_EXIT_MSA_DISABLED 21 #define KVM_TRACE_EXIT_MSA_DISABLED 21
#define KVM_TRACE_EXIT_GUEST_EXIT 27
/* Further exit reasons */ /* Further exit reasons */
#define KVM_TRACE_EXIT_WAIT 32 #define KVM_TRACE_EXIT_WAIT 32
#define KVM_TRACE_EXIT_CACHE 33 #define KVM_TRACE_EXIT_CACHE 33
#define KVM_TRACE_EXIT_SIGNAL 34 #define KVM_TRACE_EXIT_SIGNAL 34
/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
#define KVM_TRACE_EXIT_HC 66 /* 2 */
#define KVM_TRACE_EXIT_GRR 67 /* 3 */
#define KVM_TRACE_EXIT_GVA 72 /* 8 */
#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
#define KVM_TRACE_EXIT_GPA 74 /* 10 */
/* Tracepoints for VM exits */ /* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \ #define kvm_trace_symbol_exit_types \
...@@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out, ...@@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out,
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
{ KVM_TRACE_EXIT_FPE, "FPE" }, \ { KVM_TRACE_EXIT_FPE, "FPE" }, \
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
{ KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
{ KVM_TRACE_EXIT_SIGNAL, "Signal" } { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
{ KVM_TRACE_EXIT_GPSI, "GPSI" }, \
{ KVM_TRACE_EXIT_GSFC, "GSFC" }, \
{ KVM_TRACE_EXIT_HC, "HC" }, \
{ KVM_TRACE_EXIT_GRR, "GRR" }, \
{ KVM_TRACE_EXIT_GVA, "GVA" }, \
{ KVM_TRACE_EXIT_GHFC, "GHFC" }, \
{ KVM_TRACE_EXIT_GPA, "GPA" }
TRACE_EVENT(kvm_exit, TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
...@@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit, ...@@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit,
{ KVM_TRACE_COP0(16, 4), "Config4" }, \ { KVM_TRACE_COP0(16, 4), "Config4" }, \
{ KVM_TRACE_COP0(16, 5), "Config5" }, \ { KVM_TRACE_COP0(16, 5), "Config5" }, \
{ KVM_TRACE_COP0(16, 7), "Config7" }, \ { KVM_TRACE_COP0(16, 7), "Config7" }, \
{ KVM_TRACE_COP0(17, 1), "MAAR" }, \
{ KVM_TRACE_COP0(17, 2), "MAARI" }, \
{ KVM_TRACE_COP0(26, 0), "ECC" }, \ { KVM_TRACE_COP0(26, 0), "ECC" }, \
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
...@@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change, ...@@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change,
__entry->new_asid) __entry->new_asid)
); );
TRACE_EVENT(kvm_guestid_change,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
TP_ARGS(vcpu, guestid),
TP_STRUCT__entry(
__field(unsigned int, guestid)
),
TP_fast_assign(
__entry->guestid = guestid;
),
TP_printk("GuestID: 0x%02x",
__entry->guestid)
);
TRACE_EVENT_FN(kvm_guest_mode_change,
TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(unsigned long, epc)
__field(unsigned long, pc)
__field(unsigned long, badvaddr)
__field(unsigned int, status)
__field(unsigned int, cause)
),
TP_fast_assign(
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
__entry->pc = vcpu->arch.pc;
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
),
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
__entry->epc,
__entry->pc,
__entry->status,
__entry->cause,
__entry->badvaddr),
kvm_guest_mode_change_trace_reg,
kvm_guest_mode_change_trace_unreg
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/log2.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) ...@@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
return gpa; return gpa;
} }
static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
{
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 inst = 0;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
case EMULATE_HYPERCALL:
ret = kvm_mips_handle_hypcall(vcpu);
break;
default: default:
BUG(); BUG();
} }
...@@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int kvm_trap_emul_hardware_enable(void)
{
return 0;
}
static void kvm_trap_emul_hardware_disable(void)
{
}
static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_MIPS_TE:
r = 1;
break;
default:
r = 0;
break;
}
return r;
}
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
...@@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
u32 config, config1; u32 config, config1;
int vcpu_id = vcpu->vcpu_id; int vcpu_id = vcpu->vcpu_id;
/* Start off the timer at 100 MHz */
kvm_mips_init_count(vcpu, 100*1000*1000);
/* /*
* Arch specific stuff, set up config registers properly so that the * Arch specific stuff, set up config registers properly so that the
* guest will come up as expected * guest will come up as expected
...@@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
/* Read the cache characteristics from the host Config1 Register */ /* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f); config1 = (read_c0_config1() & ~0x7f);
/* DCache line size not correctly reported in Config1 on Octeon CPUs */
if (cpu_dcache_line_size()) {
config1 &= ~MIPS_CONF1_DL;
config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
}
/* Set up MMU size */ /* Set up MMU size */
config1 &= ~(0x3f << 25); config1 &= ~(0x3f << 25);
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
...@@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
if (v & CAUSEF_DC) { if (v & CAUSEF_DC) {
/* disable timer first */ /* disable timer first */
kvm_mips_count_disable_cause(vcpu); kvm_mips_count_disable_cause(vcpu);
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
v);
} else { } else {
/* enable timer last */ /* enable timer last */
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
v);
kvm_mips_count_enable_cause(vcpu); kvm_mips_count_enable_cause(vcpu);
} }
} else { } else {
...@@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { ...@@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
.handle_fpe = kvm_trap_emul_handle_fpe, .handle_fpe = kvm_trap_emul_handle_fpe,
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.handle_guest_exit = kvm_trap_emul_no_handler,
.hardware_enable = kvm_trap_emul_hardware_enable,
.hardware_disable = kvm_trap_emul_hardware_disable,
.check_extension = kvm_trap_emul_check_extension,
.vcpu_init = kvm_trap_emul_vcpu_init, .vcpu_init = kvm_trap_emul_vcpu_init,
.vcpu_uninit = kvm_trap_emul_vcpu_uninit, .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
.vcpu_setup = kvm_trap_emul_vcpu_setup, .vcpu_setup = kvm_trap_emul_vcpu_setup,
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Support for hardware virtualization extensions
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Yann Le Du <ledu@kymasys.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/cmpxchg.h>
#include <asm/fpu.h>
#include <asm/hazards.h>
#include <asm/inst.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/time.h>
#include <asm/tlb.h>
#include <asm/tlbex.h>
#include <linux/kvm_host.h>
#include "interrupt.h"
#include "trace.h"
/* Pointers to last VCPU loaded on each physical CPU */
static struct kvm_vcpu *last_vcpu[NR_CPUS];
/* Pointers to last VCPU executed on each physical CPU */
static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
/*
* Number of guest VTLB entries to use, so we can catch inconsistency between
* CPUs.
*/
static unsigned int kvm_vz_guest_vtlb_size;
static inline long kvm_vz_read_gc0_ebase(void)
{
if (sizeof(long) == 8 && cpu_has_ebase_wg)
return read_gc0_ebase_64();
else
return read_gc0_ebase();
}
static inline void kvm_vz_write_gc0_ebase(long v)
{
/*
* First write with WG=1 to write upper bits, then write again in case
* WG should be left at 0.
* write_gc0_ebase_64() is no longer UNDEFINED since R6.
*/
if (sizeof(long) == 8 &&
(cpu_has_mips64r6 || cpu_has_ebase_wg)) {
write_gc0_ebase_64(v | MIPS_EBASE_WG);
write_gc0_ebase_64(v);
} else {
write_gc0_ebase(v | MIPS_EBASE_WG);
write_gc0_ebase(v);
}
}
/*
* These Config bits may be writable by the guest:
* Config: [K23, KU] (!TLB), K0
* Config1: (none)
* Config2: [TU, SU] (impl)
* Config3: ISAOnExc
* Config4: FTLBPageSize
* Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
*/
static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
{
return CONF_CM_CMASK;
}
static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
{
return MIPS_CONF3_ISA_OE;
}
static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
{
/* no need to be exact */
return MIPS_CONF4_VFTLBPAGESIZE;
}
static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
/* Permit MSAEn changes if MSA supported and enabled */
if (kvm_mips_guest_has_msa(&vcpu->arch))
mask |= MIPS_CONF5_MSAEN;
/*
* Permit guest FPU mode changes if FPU is enabled and the relevant
* feature exists according to FIR register.
*/
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
if (cpu_has_ufr)
mask |= MIPS_CONF5_UFR;
if (cpu_has_fre)
mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
}
return mask;
}
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
* Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
* Config2: M
* Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
* VInt, SP, CDMM, MT, SM, TL]
* Config4: M, [VTLBSizeExt, MMUSizeExt]
* Config5: MRP
*/
static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
/* Permit FPU to be present if FPU is supported */
if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
mask |= MIPS_CONF1_FP;
return mask;
}
static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
mask |= MIPS_CONF3_MSA;
return mask;
}
static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
}
static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
{
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
return gva;
}
static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
}
static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
set_bit(priority, &vcpu->arch.pending_exceptions_clr);
}
static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
{
/*
* timer expiry is asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
{
/*
* timer expiry is asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
/*
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
switch (intr) {
case 2:
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
break;
case 3:
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
break;
case 4:
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
break;
default:
break;
}
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
/*
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
switch (intr) {
case -2:
kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
break;
case -3:
kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
break;
case -4:
kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
break;
default:
break;
}
}
static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
[MIPS_EXC_INT_TIMER] = C_IRQ5,
[MIPS_EXC_INT_IO] = C_IRQ0,
[MIPS_EXC_INT_IPI_1] = C_IRQ1,
[MIPS_EXC_INT_IPI_2] = C_IRQ2,
};
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
kvm_vz_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
case MIPS_EXC_INT_IO:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
set_c0_guestctl2(irq);
else
set_gc0_cause(irq);
break;
default:
break;
}
clear_bit(priority, &vcpu->arch.pending_exceptions);
return 1;
}
static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
kvm_vz_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
/*
* Call to kvm_write_c0_guest_compare() clears Cause.TI in
* kvm_mips_emulate_CP0(). Explicitly clear irq associated with
* Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
* supported or if not using GuestCtl2 Hardware Clear.
*/
if (cpu_has_guestctl2) {
if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
}
break;
case MIPS_EXC_INT_IO:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
if (cpu_has_guestctl2) {
if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
}
break;
default:
break;
}
clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
return 1;
}
/*
* VZ guest timer handling.
*/
/**
* kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
* @vcpu: Virtual CPU.
*
* Returns: true if the VZ GTOffset & real guest CP0_Count should be used
* instead of software emulation of guest timer.
* false otherwise.
*/
static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
{
if (kvm_mips_count_disabled(vcpu))
return false;
/* Chosen frequency must match real frequency */
if (mips_hpt_frequency != vcpu->arch.count_hz)
return false;
/* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
if (current_cpu_data.gtoffset_mask != 0xffffffff)
return false;
return true;
}
/**
* _kvm_vz_restore_stimer() - Restore soft timer state.
* @vcpu: Virtual CPU.
* @compare: CP0_Compare register value, restored by caller.
* @cause: CP0_Cause register to restore.
*
* Restore VZ state relating to the soft timer. The hard timer can be enabled
* later.
*/
static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
u32 cause)
{
/*
* Avoid spurious counter interrupts by setting Guest CP0_Count to just
* after Guest CP0_Compare.
*/
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
write_gc0_cause(cause);
}
/**
* _kvm_vz_restore_htimer() - Restore hard timer state.
* @vcpu: Virtual CPU.
* @compare: CP0_Compare register value, restored by caller.
* @cause: CP0_Cause register to restore.
*
* Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
* value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
*/
static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
u32 compare, u32 cause)
{
u32 start_count, after_count;
ktime_t freeze_time;
unsigned long flags;
/*
* Freeze the soft-timer and sync the guest CP0_Count with it. We do
* this with interrupts disabled to avoid latency.
*/
local_irq_save(flags);
freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
write_c0_gtoffset(start_count - read_c0_count());
local_irq_restore(flags);
/* restore guest CP0_Cause, as TI may already be set */
back_to_back_c0_hazard();
write_gc0_cause(cause);
/*
* The above sequence isn't atomic and would result in lost timer
* interrupts if we're not careful. Detect if a timer interrupt is due
* and assert it.
*/
back_to_back_c0_hazard();
after_count = read_gc0_count();
if (after_count - start_count > compare - start_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
/**
* kvm_vz_restore_timer() - Restore timer state.
* @vcpu: Virtual CPU.
*
* Restore soft timer state from saved context.
*/
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
u32 cause, compare;
compare = kvm_read_sw_gc0_compare(cop0);
cause = kvm_read_sw_gc0_cause(cop0);
write_gc0_compare(compare);
_kvm_vz_restore_stimer(vcpu, compare, cause);
}
/**
* kvm_vz_acquire_htimer() - Switch to hard timer state.
* @vcpu: Virtual CPU.
*
* Restore hard timer state on top of existing soft timer state if possible.
*
* Since hard timer won't remain active over preemption, preemption should be
* disabled by the caller.
*/
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0;
gctl0 = read_c0_guestctl0();
if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
/* enable guest access to hard timer */
write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
_kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
read_gc0_cause());
}
}
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
* @compare: Pointer to write compare value to.
* @cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
* disabled.
*/
static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
u32 *out_compare, u32 *out_cause)
{
u32 cause, compare, before_count, end_count;
ktime_t before_time;
compare = read_gc0_compare();
*out_compare = compare;
before_time = ktime_get();
/*
* Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
* at which no pending timer interrupt is missing.
*/
before_count = read_gc0_count();
back_to_back_c0_hazard();
cause = read_gc0_cause();
*out_cause = cause;
/*
* Record a final CP0_Count which we will transfer to the soft-timer.
* This is recorded *after* saving CP0_Cause, so we don't get any timer
* interrupts from just after the final CP0_Count point.
*/
back_to_back_c0_hazard();
end_count = read_gc0_count();
/*
* The above sequence isn't atomic, so we could miss a timer interrupt
* between reading CP0_Cause and end_count. Detect and record any timer
* interrupt due between before_count and end_count.
*/
if (end_count - before_count > compare - before_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
/*
* Restore soft-timer, ignoring a small amount of negative drift due to
* delay between freeze_hrtimer and setting CP0_GTOffset.
*/
kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
}
/**
* kvm_vz_save_timer() - Save guest timer state.
* @vcpu: Virtual CPU.
*
* Save VZ guest timer state and switch to soft guest timer if hard timer was in
* use.
*/
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0();
if (gctl0 & MIPS_GCTL0_GT) {
/* disable guest use of hard timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* save hard timer state */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
} else {
compare = read_gc0_compare();
cause = read_gc0_cause();
}
/* save timer-related state to VCPU context */
kvm_write_sw_gc0_cause(cop0, cause);
kvm_write_sw_gc0_compare(cop0, compare);
}
/**
* kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
* @vcpu: Virtual CPU.
*
* Transfers the state of the hard guest timer to the soft guest timer, leaving
* guest state intact so it can continue to be used with the soft timer.
*/
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0, compare, cause;
preempt_disable();
gctl0 = read_c0_guestctl0();
if (gctl0 & MIPS_GCTL0_GT) {
/* disable guest use of timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* switch to soft timer */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
/* leave soft timer in usable state */
_kvm_vz_restore_stimer(vcpu, compare, cause);
}
preempt_enable();
}
/**
* is_eva_access() - Find whether an instruction is an EVA memory accessor.
* @inst: 32-bit instruction encoding.
*
* Finds whether @inst encodes an EVA memory access instruction, which would
* indicate that emulation of it should access the user mode address space
* instead of the kernel mode address space. This matters for MUSUK segments
* which are TLB mapped for user mode but unmapped for kernel mode.
*
* Returns: Whether @inst encodes an EVA accessor instruction.
*/
static bool is_eva_access(union mips_instruction inst)
{
if (inst.spec3_format.opcode != spec3_op)
return false;
switch (inst.spec3_format.func) {
case lwle_op:
case lwre_op:
case cachee_op:
case sbe_op:
case she_op:
case sce_op:
case swe_op:
case swle_op:
case swre_op:
case prefe_op:
case lbue_op:
case lhue_op:
case lbe_op:
case lhe_op:
case lle_op:
case lwe_op:
return true;
default:
return false;
}
}
/**
* is_eva_am_mapped() - Find whether an access mode is mapped.
* @vcpu: KVM VCPU state.
* @am: 3-bit encoded access mode.
* @eu: Segment becomes unmapped and uncached when Status.ERL=1.
*
* Decode @am to find whether it encodes a mapped segment for the current VCPU
* state. Where necessary @eu and the actual instruction causing the fault are
* taken into account to make the decision.
*
* Returns: Whether the VCPU faulted on a TLB mapped address.
*/
static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
{
u32 am_lookup;
int err;
/*
* Interpret access control mode. We assume address errors will already
* have been caught by the guest, leaving us with:
* AM UM SM KM 31..24 23..16
* UK 0 000 Unm 0 0
* MK 1 001 TLB 1
* MSK 2 010 TLB TLB 1
* MUSK 3 011 TLB TLB TLB 1
* MUSUK 4 100 TLB TLB Unm 0 1
* USK 5 101 Unm Unm 0 0
* - 6 110 0 0
* UUSK 7 111 Unm Unm Unm 0 0
*
* We shift a magic value by AM across the sign bit to find if always
* TLB mapped, and if not shift by 8 again to find if it depends on KM.
*/
am_lookup = 0x70080000 << am;
if ((s32)am_lookup < 0) {
/*
* MK, MSK, MUSK
* Always TLB mapped, unless SegCtl.EU && ERL
*/
if (!eu || !(read_gc0_status() & ST0_ERL))
return true;
} else {
am_lookup <<= 8;
if ((s32)am_lookup < 0) {
union mips_instruction inst;
unsigned int status;
u32 *opc;
/*
* MUSUK
* TLB mapped if not in kernel mode
*/
status = read_gc0_status();
if (!(status & (ST0_EXL | ST0_ERL)) &&
(status & ST0_KSU))
return true;
/*
* EVA access instructions in kernel
* mode access user address space.
*/
opc = (u32 *)vcpu->arch.pc;
if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (!err && is_eva_access(inst))
return true;
}
}
return false;
}
/**
* kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
* @vcpu: KVM VCPU state.
* @gva: Guest virtual address to convert.
* @gpa: Output guest physical address.
*
* Convert a guest virtual address (GVA) which is valid according to the guest
* context, to a guest physical address (GPA).
*
* Returns: 0 on success.
* -errno on failure.
*/
static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa)
{
u32 gva32 = gva;
unsigned long segctl;
if ((long)gva == (s32)gva32) {
/* Handle canonical 32-bit virtual address */
if (cpu_guest_has_segments) {
unsigned long mask, pa;
switch (gva32 >> 29) {
case 0:
case 1: /* CFG5 (1GB) */
segctl = read_gc0_segctl2() >> 16;
mask = (unsigned long)0xfc0000000ull;
break;
case 2:
case 3: /* CFG4 (1GB) */
segctl = read_gc0_segctl2();
mask = (unsigned long)0xfc0000000ull;
break;
case 4: /* CFG3 (512MB) */
segctl = read_gc0_segctl1() >> 16;
mask = (unsigned long)0xfe0000000ull;
break;
case 5: /* CFG2 (512MB) */
segctl = read_gc0_segctl1();
mask = (unsigned long)0xfe0000000ull;
break;
case 6: /* CFG1 (512MB) */
segctl = read_gc0_segctl0() >> 16;
mask = (unsigned long)0xfe0000000ull;
break;
case 7: /* CFG0 (512MB) */
segctl = read_gc0_segctl0();
mask = (unsigned long)0xfe0000000ull;
break;
default:
/*
* GCC 4.9 isn't smart enough to figure out that
* segctl and mask are always initialised.
*/
unreachable();
}
if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
segctl & 0x0008))
goto tlb_mapped;
/* Unmapped, find guest physical address */
pa = (segctl << 20) & mask;
pa |= gva32 & ~mask;
*gpa = pa;
return 0;
} else if ((s32)gva32 < (s32)0xc0000000) {
/* legacy unmapped KSeg0 or KSeg1 */
*gpa = gva32 & 0x1fffffff;
return 0;
}
#ifdef CONFIG_64BIT
} else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
/* XKPHYS */
if (cpu_guest_has_segments) {
/*
* Each of the 8 regions can be overridden by SegCtl2.XR
* to use SegCtl1.XAM.
*/
segctl = read_gc0_segctl2();
if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
segctl = read_gc0_segctl1();
if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
0))
goto tlb_mapped;
}
}
/*
* Traditionally fully unmapped.
* Bits 61:59 specify the CCA, which we can just mask off here.
* Bits 58:PABITS should be zero, but we shouldn't have got here
* if it wasn't.
*/
*gpa = gva & 0x07ffffffffffffff;
return 0;
#endif
}
tlb_mapped:
return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
}
/**
* kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
* @vcpu: KVM VCPU state.
* @badvaddr: Root BadVAddr.
* @gpa: Output guest physical address.
*
* VZ implementations are permitted to report guest virtual addresses (GVA) in
* BadVAddr on a root exception during guest execution, instead of the more
* convenient guest physical addresses (GPA). When we get a GVA, this function
* converts it to a GPA, taking into account guest segmentation and guest TLB
* state.
*
* Returns: 0 on success.
* -errno on failure.
*/
static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
unsigned long *gpa)
{
unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
/* If BadVAddr is GPA, then all is well in the world */
if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
*gpa = badvaddr;
return 0;
}
/* Otherwise we'd expect it to be GVA ... */
if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
"Unexpected gexccode %#x\n", gexccode))
return -EINVAL;
/* ... and we need to perform the GVA->GPA translation in software */
return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
}
static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 inst = 0;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
read_gc0_status());
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
static unsigned long mips_process_maar(unsigned int op, unsigned long val)
{
/* Mask off unused bits */
unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
if (read_gc0_pagegrain() & PG_ELPA)
mask |= 0x00ffffff00000000ull;
if (cpu_guest_has_mvh)
mask |= MIPS_MAAR_VH;
/* Set or clear VH */
if (op == mtc_op) {
/* clear VH */
val &= ~MIPS_MAAR_VH;
} else if (op == dmtc_op) {
/* set VH to match VL */
val &= ~MIPS_MAAR_VH;
if (val & MIPS_MAAR_VL)
val |= MIPS_MAAR_VH;
}
return val & mask;
}
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
val &= MIPS_MAARI_INDEX;
if (val == MIPS_MAARI_INDEX)
kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
else if (val < ARRAY_SIZE(vcpu->arch.maar))
kvm_write_sw_gc0_maari(cop0, val);
}
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
unsigned long val;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
if (inst.co_format.co) {
switch (inst.co_format.func) {
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
default:
er = EMULATE_FAIL;
}
} else {
rt = inst.c0r_format.rt;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
switch (inst.c0r_format.rs) {
case dmfc_op:
case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
if (rd == MIPS_CP0_COUNT &&
sel == 0) { /* Count */
val = kvm_mips_read_count(vcpu);
} else if (rd == MIPS_CP0_COMPARE &&
sel == 0) { /* Compare */
val = read_gc0_compare();
} else if (rd == MIPS_CP0_LLADDR &&
sel == 0) { /* LLAddr */
if (cpu_guest_has_rw_llb)
val = read_gc0_lladdr() &
MIPS_LLADDR_LLB;
else
val = 0;
} else if (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
/* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
val = vcpu->arch.maar[
kvm_read_sw_gc0_maari(cop0)];
} else if ((rd == MIPS_CP0_PRID &&
(sel == 0 || /* PRid */
sel == 2 || /* CDMMBase */
sel == 3)) || /* CMGCRBase */
(rd == MIPS_CP0_STATUS &&
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
(sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) ||
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
} else {
val = 0;
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL) {
/* Sign extend */
if (inst.c0r_format.rs == mfc_op)
val = (int)val;
vcpu->arch.gprs[rt] = val;
}
trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
KVM_TRACE_COP0(rd, sel), val);
break;
case dmtc_op:
case mtc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
val = vcpu->arch.gprs[rt];
trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
KVM_TRACE_COP0(rd, sel), val);
if (rd == MIPS_CP0_COUNT &&
sel == 0) { /* Count */
kvm_vz_lose_htimer(vcpu);
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
} else if (rd == MIPS_CP0_COMPARE &&
sel == 0) { /* Compare */
kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt],
true);
} else if (rd == MIPS_CP0_LLADDR &&
sel == 0) { /* LLAddr */
/*
* P5600 generates GPSI on guest MTC0 LLAddr.
* Only allow the guest to clear LLB.
*/
if (cpu_guest_has_rw_llb &&
!(val & MIPS_LLADDR_LLB))
write_gc0_lladdr(0);
} else if (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
val = mips_process_maar(inst.c0r_format.rs,
val);
/* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
val;
} else if (rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
} else {
er = EMULATE_FAIL;
}
break;
default:
er = EMULATE_FAIL;
break;
}
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
curr_pc, __func__, inst.word);
vcpu->arch.pc = curr_pc;
}
return er;
}
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
u32 cache, op_inst, op, base;
s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va, curr_pc;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
base = inst.i_format.rs;
op_inst = inst.i_format.rt;
if (cpu_has_mips_r6)
offset = inst.spec3_format.simmediate;
else
offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
va = arch->gprs[base] + offset;
kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
/* Secondary or tirtiary cache ops ignored */
if (cache != Cache_I && cache != Cache_D)
return EMULATE_DONE;
switch (op_inst) {
case Index_Invalidate_I:
flush_icache_line_indexed(va);
return EMULATE_DONE;
case Index_Writeback_Inv_D:
flush_dcache_line_indexed(va);
return EMULATE_DONE;
case Hit_Invalidate_I:
case Hit_Invalidate_D:
case Hit_Writeback_Inv_D:
if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
/* We can just flush entire icache */
local_flush_icache_range(0, 0);
return EMULATE_DONE;
}
/* So far, other platforms support guest hit cache ops */
break;
default:
break;
};
kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
offset);
/* Rollback PC */
vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
}
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct kvm_run *run = vcpu->run;
union mips_instruction inst;
int rd, rt, sel;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
case spec3_op:
switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
case rdhwr_op:
if (inst.r_format.rs || (inst.r_format.re >> 3))
goto unknown;
rd = inst.r_format.rd;
rt = inst.r_format.rt;
sel = inst.r_format.re & 0x7;
switch (rd) {
case MIPS_HWR_CC: /* Read count register */
arch->gprs[rt] =
(long)(int)kvm_mips_read_count(vcpu);
break;
default:
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), 0);
goto unknown;
};
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
er = update_pc(vcpu, cause);
break;
default:
goto unknown;
};
break;
unknown:
default:
kvm_err("GPSI exception not supported (%p/%#x)\n",
opc, inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
union mips_instruction inst;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/* complete MTC0 on behalf of guest and advance EPC */
if (inst.c0r_format.opcode == cop0_op &&
inst.c0r_format.rs == mtc_op &&
inst.c0r_format.z == 0) {
int rt = inst.c0r_format.rt;
int rd = inst.c0r_format.rd;
int sel = inst.c0r_format.sel;
unsigned int val = arch->gprs[rt];
unsigned int old_val, change;
trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
val);
if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
/* FR bit should read as zero if no FPU */
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
val &= ~(ST0_CU1 | ST0_FR);
/*
* Also don't allow FR to be set if host doesn't support
* it.
*/
if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
val &= ~ST0_FR;
old_val = read_gc0_status();
change = val ^ old_val;
if (change & ST0_FR) {
/*
* FPU and Vector register state is made
* UNPREDICTABLE by a change of FR, so don't
* even bother saving it.
*/
kvm_drop_fpu(vcpu);
}
/*
* If MSA state is already live, it is undefined how it
* interacts with FR=0 FPU state, and we don't want to
* hit reserved instruction exceptions trying to save
* the MSA state later when CU=1 && FR=1, so play it
* safe and save it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
write_gc0_status(val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
u32 old_cause = read_gc0_cause();
u32 change = old_cause ^ val;
/* DC bit enabling/disabling timer? */
if (change & CAUSEF_DC) {
if (val & CAUSEF_DC) {
kvm_vz_lose_htimer(vcpu);
kvm_mips_count_disable_cause(vcpu);
} else {
kvm_mips_count_enable_cause(vcpu);
}
}
/* Only certain bits are RW to the guest */
change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
CAUSEF_IP0 | CAUSEF_IP1);
/* WP can only be cleared */
change &= ~CAUSEF_WP | old_cause;
write_gc0_cause(old_cause ^ change);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
write_gc0_intctl(val);
} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
old_val = read_gc0_config5();
change = val ^ old_val;
/* Handle changes in FPU/MSA modes */
preempt_disable();
/*
* Propagate FRE changes immediately if the FPU
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
preempt_enable();
val = old_val ^
(change & kvm_vz_config5_guest_wrmask(vcpu));
write_gc0_config5(val);
} else {
kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL)
er = update_pc(vcpu, cause);
} else {
kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
/*
* Presumably this is due to MC (guest mode change), so lets trace some
* relevant info.
*/
trace_kvm_guest_mode_change(vcpu);
return EMULATE_DONE;
}
static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
unsigned long curr_pc;
int err;
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
er = kvm_mips_emul_hypcall(vcpu, inst);
if (er == EMULATE_FAIL)
vcpu->arch.pc = curr_pc;
return er;
}
static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
u32 cause,
u32 *opc,
struct kvm_vcpu *vcpu)
{
u32 inst;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
gexccode, opc, inst, read_gc0_status());
return EMULATE_FAIL;
}
static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
int ret = RESUME_GUEST;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
switch (gexccode) {
case MIPS_GCTL0_GEXC_GPSI:
++vcpu->stat.vz_gpsi_exits;
er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GSFC:
++vcpu->stat.vz_gsfc_exits;
er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_HC:
++vcpu->stat.vz_hc_exits;
er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GRR:
++vcpu->stat.vz_grr_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GVA:
++vcpu->stat.vz_gva_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GHFC:
++vcpu->stat.vz_ghfc_exits;
er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GPA:
++vcpu->stat.vz_gpa_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
default:
++vcpu->stat.vz_resvd_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_HYPERCALL) {
ret = kvm_mips_handle_hypcall(vcpu);
} else {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
/**
* kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
/*
* If guest FPU not present, the FPU operation should have been
* treated as a reserved instruction!
* If FPU already in use, we shouldn't get this at all.
*/
if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
preempt_enable();
return EMULATE_FAIL;
}
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
/* other coprocessors not handled */
switch (er) {
case EMULATE_DONE:
ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
default:
BUG();
}
return ret;
}
/**
* kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
* Same if CU1=1, FR=0.
* If MSA already in use, we shouldn't get this at all.
*/
if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
kvm_own_msa(vcpu);
return RESUME_GUEST;
}
static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err, ret = RESUME_GUEST;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err;
int ret = RESUME_GUEST;
/* Just try the access again if we couldn't do the translation */
if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
return RESUME_GUEST;
vcpu->arch.host_cp0_badvaddr = badvaddr;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_INDEX,
KVM_REG_MIPS_CP0_ENTRYLO0,
KVM_REG_MIPS_CP0_ENTRYLO1,
KVM_REG_MIPS_CP0_CONTEXT,
KVM_REG_MIPS_CP0_PAGEMASK,
KVM_REG_MIPS_CP0_PAGEGRAIN,
KVM_REG_MIPS_CP0_WIRED,
KVM_REG_MIPS_CP0_HWRENA,
KVM_REG_MIPS_CP0_BADVADDR,
KVM_REG_MIPS_CP0_COUNT,
KVM_REG_MIPS_CP0_ENTRYHI,
KVM_REG_MIPS_CP0_COMPARE,
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_INTCTL,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_EBASE,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
KVM_REG_MIPS_CP0_ERROREPC,
KVM_REG_MIPS_COUNT_CTL,
KVM_REG_MIPS_COUNT_RESUME,
KVM_REG_MIPS_COUNT_HZ,
};
static u64 kvm_vz_get_one_regs_contextconfig[] = {
KVM_REG_MIPS_CP0_CONTEXTCONFIG,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
#endif
};
static u64 kvm_vz_get_one_regs_segments[] = {
KVM_REG_MIPS_CP0_SEGCTL0,
KVM_REG_MIPS_CP0_SEGCTL1,
KVM_REG_MIPS_CP0_SEGCTL2,
};
static u64 kvm_vz_get_one_regs_htw[] = {
KVM_REG_MIPS_CP0_PWBASE,
KVM_REG_MIPS_CP0_PWFIELD,
KVM_REG_MIPS_CP0_PWSIZE,
KVM_REG_MIPS_CP0_PWCTL,
};
static u64 kvm_vz_get_one_regs_kscratch[] = {
KVM_REG_MIPS_CP0_KSCRATCH1,
KVM_REG_MIPS_CP0_KSCRATCH2,
KVM_REG_MIPS_CP0_KSCRATCH3,
KVM_REG_MIPS_CP0_KSCRATCH4,
KVM_REG_MIPS_CP0_KSCRATCH5,
KVM_REG_MIPS_CP0_KSCRATCH6,
};
static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
{
unsigned long ret;
ret = ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal)
++ret;
if (cpu_guest_has_badinstr)
++ret;
if (cpu_guest_has_badinstrp)
++ret;
if (cpu_guest_has_contextconfig)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
if (cpu_guest_has_htw)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
return ret;
}
static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
{
u64 index;
unsigned int i;
if (copy_to_user(indices, kvm_vz_get_one_regs,
sizeof(kvm_vz_get_one_regs)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal) {
index = KVM_REG_MIPS_CP0_USERLOCAL;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstr) {
index = KVM_REG_MIPS_CP0_BADINSTR;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstrp) {
index = KVM_REG_MIPS_CP0_BADINSTRP;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_contextconfig) {
if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
sizeof(kvm_vz_get_one_regs_contextconfig)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
}
if (cpu_guest_has_segments) {
if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
sizeof(kvm_vz_get_one_regs_segments)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
if (cpu_guest_has_htw) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
}
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
index = KVM_REG_MIPS_CP0_MAAR(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
index = KVM_REG_MIPS_CP0_MAARI;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
for (i = 0; i < 6; ++i) {
if (!cpu_guest_has_kscr(i + 2))
continue;
if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
sizeof(kvm_vz_get_one_regs_kscratch[i])))
return -EFAULT;
++indices;
}
return 0;
}
static inline s64 entrylo_kvm_to_user(unsigned long v)
{
s64 mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit version of the register, so move the
* RI/XI bits up into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= ((s64)v & mask) << 32;
}
return ret;
}
static inline unsigned long entrylo_user_to_kvm(s64 v)
{
unsigned long mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit versiono of the register, so move the
* RI/XI bits down into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= (v >> 32) & mask;
}
return ret;
}
static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
case KVM_REG_MIPS_CP0_INDEX:
*v = (long)read_gc0_index();
break;
case KVM_REG_MIPS_CP0_ENTRYLO0:
*v = entrylo_kvm_to_user(read_gc0_entrylo0());
break;
case KVM_REG_MIPS_CP0_ENTRYLO1:
*v = entrylo_kvm_to_user(read_gc0_entrylo1());
break;
case KVM_REG_MIPS_CP0_CONTEXT:
*v = (long)read_gc0_context();
break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_contextconfig();
break;
case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal)
return -EINVAL;
*v = read_gc0_userlocal();
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_xcontextconfig();
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK:
*v = (long)read_gc0_pagemask();
break;
case KVM_REG_MIPS_CP0_PAGEGRAIN:
*v = (long)read_gc0_pagegrain();
break;
case KVM_REG_MIPS_CP0_SEGCTL0:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl0();
break;
case KVM_REG_MIPS_CP0_SEGCTL1:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl1();
break;
case KVM_REG_MIPS_CP0_SEGCTL2:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
if (!cpu_guest_has_htw)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
if (!cpu_guest_has_htw)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
if (!cpu_guest_has_htw)
return -EINVAL;
*v = read_gc0_pwsize();
break;
case KVM_REG_MIPS_CP0_WIRED:
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
if (!cpu_guest_has_htw)
return -EINVAL;
*v = read_gc0_pwctl();
break;
case KVM_REG_MIPS_CP0_HWRENA:
*v = (long)read_gc0_hwrena();
break;
case KVM_REG_MIPS_CP0_BADVADDR:
*v = (long)read_gc0_badvaddr();
break;
case KVM_REG_MIPS_CP0_BADINSTR:
if (!cpu_guest_has_badinstr)
return -EINVAL;
*v = read_gc0_badinstr();
break;
case KVM_REG_MIPS_CP0_BADINSTRP:
if (!cpu_guest_has_badinstrp)
return -EINVAL;
*v = read_gc0_badinstrp();
break;
case KVM_REG_MIPS_CP0_COUNT:
*v = kvm_mips_read_count(vcpu);
break;
case KVM_REG_MIPS_CP0_ENTRYHI:
*v = (long)read_gc0_entryhi();
break;
case KVM_REG_MIPS_CP0_COMPARE:
*v = (long)read_gc0_compare();
break;
case KVM_REG_MIPS_CP0_STATUS:
*v = (long)read_gc0_status();
break;
case KVM_REG_MIPS_CP0_INTCTL:
*v = read_gc0_intctl();
break;
case KVM_REG_MIPS_CP0_CAUSE:
*v = (long)read_gc0_cause();
break;
case KVM_REG_MIPS_CP0_EPC:
*v = (long)read_gc0_epc();
break;
case KVM_REG_MIPS_CP0_PRID:
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Octeon III has a read-only guest.PRid */
*v = read_gc0_prid();
break;
default:
*v = (long)kvm_read_c0_guest_prid(cop0);
break;
};
break;
case KVM_REG_MIPS_CP0_EBASE:
*v = kvm_vz_read_gc0_ebase();
break;
case KVM_REG_MIPS_CP0_CONFIG:
*v = read_gc0_config();
break;
case KVM_REG_MIPS_CP0_CONFIG1:
if (!cpu_guest_has_conf1)
return -EINVAL;
*v = read_gc0_config1();
break;
case KVM_REG_MIPS_CP0_CONFIG2:
if (!cpu_guest_has_conf2)
return -EINVAL;
*v = read_gc0_config2();
break;
case KVM_REG_MIPS_CP0_CONFIG3:
if (!cpu_guest_has_conf3)
return -EINVAL;
*v = read_gc0_config3();
break;
case KVM_REG_MIPS_CP0_CONFIG4:
if (!cpu_guest_has_conf4)
return -EINVAL;
*v = read_gc0_config4();
break;
case KVM_REG_MIPS_CP0_CONFIG5:
if (!cpu_guest_has_conf5)
return -EINVAL;
*v = read_gc0_config5();
break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
if (idx >= ARRAY_SIZE(vcpu->arch.maar))
return -EINVAL;
*v = vcpu->arch.maar[idx];
break;
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
*v = read_gc0_xcontext();
break;
#endif
case KVM_REG_MIPS_CP0_ERROREPC:
*v = (long)read_gc0_errorepc();
break;
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
if (!cpu_guest_has_kscr(idx))
return -EINVAL;
switch (idx) {
case 2:
*v = (long)read_gc0_kscratch1();
break;
case 3:
*v = (long)read_gc0_kscratch2();
break;
case 4:
*v = (long)read_gc0_kscratch3();
break;
case 5:
*v = (long)read_gc0_kscratch4();
break;
case 6:
*v = (long)read_gc0_kscratch5();
break;
case 7:
*v = (long)read_gc0_kscratch6();
break;
}
break;
case KVM_REG_MIPS_COUNT_CTL:
*v = vcpu->arch.count_ctl;
break;
case KVM_REG_MIPS_COUNT_RESUME:
*v = ktime_to_ns(vcpu->arch.count_resume);
break;
case KVM_REG_MIPS_COUNT_HZ:
*v = vcpu->arch.count_hz;
break;
default:
return -EINVAL;
}
return 0;
}
static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 v)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned int idx;
int ret = 0;
unsigned int cur, change;
switch (reg->id) {
case KVM_REG_MIPS_CP0_INDEX:
write_gc0_index(v);
break;
case KVM_REG_MIPS_CP0_ENTRYLO0:
write_gc0_entrylo0(entrylo_user_to_kvm(v));
break;
case KVM_REG_MIPS_CP0_ENTRYLO1:
write_gc0_entrylo1(entrylo_user_to_kvm(v));
break;
case KVM_REG_MIPS_CP0_CONTEXT:
write_gc0_context(v);
break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_contextconfig(v);
break;
case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal)
return -EINVAL;
write_gc0_userlocal(v);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_xcontextconfig(v);
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK:
write_gc0_pagemask(v);
break;
case KVM_REG_MIPS_CP0_PAGEGRAIN:
write_gc0_pagegrain(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL0:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl0(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL1:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl1(v);
break;
case KVM_REG_MIPS_CP0_SEGCTL2:
if (!cpu_guest_has_segments)
return -EINVAL;
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
if (!cpu_guest_has_htw)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
if (!cpu_guest_has_htw)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
if (!cpu_guest_has_htw)
return -EINVAL;
write_gc0_pwsize(v);
break;
case KVM_REG_MIPS_CP0_WIRED:
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
if (!cpu_guest_has_htw)
return -EINVAL;
write_gc0_pwctl(v);
break;
case KVM_REG_MIPS_CP0_HWRENA:
write_gc0_hwrena(v);
break;
case KVM_REG_MIPS_CP0_BADVADDR:
write_gc0_badvaddr(v);
break;
case KVM_REG_MIPS_CP0_BADINSTR:
if (!cpu_guest_has_badinstr)
return -EINVAL;
write_gc0_badinstr(v);
break;
case KVM_REG_MIPS_CP0_BADINSTRP:
if (!cpu_guest_has_badinstrp)
return -EINVAL;
write_gc0_badinstrp(v);
break;
case KVM_REG_MIPS_CP0_COUNT:
kvm_mips_write_count(vcpu, v);
break;
case KVM_REG_MIPS_CP0_ENTRYHI:
write_gc0_entryhi(v);
break;
case KVM_REG_MIPS_CP0_COMPARE:
kvm_mips_write_compare(vcpu, v, false);
break;
case KVM_REG_MIPS_CP0_STATUS:
write_gc0_status(v);
break;
case KVM_REG_MIPS_CP0_INTCTL:
write_gc0_intctl(v);
break;
case KVM_REG_MIPS_CP0_CAUSE:
/*
* If the timer is stopped or started (DC bit) it must look
* atomic with changes to the timer interrupt pending bit (TI).
* A timer interrupt should not happen in between.
*/
if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
if (v & CAUSEF_DC) {
/* disable timer first */
kvm_mips_count_disable_cause(vcpu);
change_gc0_cause((u32)~CAUSEF_DC, v);
} else {
/* enable timer last */
change_gc0_cause((u32)~CAUSEF_DC, v);
kvm_mips_count_enable_cause(vcpu);
}
} else {
write_gc0_cause(v);
}
break;
case KVM_REG_MIPS_CP0_EPC:
write_gc0_epc(v);
break;
case KVM_REG_MIPS_CP0_PRID:
switch (boot_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Octeon III has a guest.PRid, but its read-only */
break;
default:
kvm_write_c0_guest_prid(cop0, v);
break;
};
break;
case KVM_REG_MIPS_CP0_EBASE:
kvm_vz_write_gc0_ebase(v);
break;
case KVM_REG_MIPS_CP0_CONFIG:
cur = read_gc0_config();
change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG1:
if (!cpu_guest_has_conf1)
break;
cur = read_gc0_config1();
change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config1(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG2:
if (!cpu_guest_has_conf2)
break;
cur = read_gc0_config2();
change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config2(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG3:
if (!cpu_guest_has_conf3)
break;
cur = read_gc0_config3();
change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config3(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG4:
if (!cpu_guest_has_conf4)
break;
cur = read_gc0_config4();
change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config4(v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG5:
if (!cpu_guest_has_conf5)
break;
cur = read_gc0_config5();
change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
if (change) {
v = cur ^ change;
write_gc0_config5(v);
}
break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
if (idx >= ARRAY_SIZE(vcpu->arch.maar))
return -EINVAL;
vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
break;
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
kvm_write_maari(vcpu, v);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
write_gc0_xcontext(v);
break;
#endif
case KVM_REG_MIPS_CP0_ERROREPC:
write_gc0_errorepc(v);
break;
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
if (!cpu_guest_has_kscr(idx))
return -EINVAL;
switch (idx) {
case 2:
write_gc0_kscratch1(v);
break;
case 3:
write_gc0_kscratch2(v);
break;
case 4:
write_gc0_kscratch3(v);
break;
case 5:
write_gc0_kscratch4(v);
break;
case 6:
write_gc0_kscratch5(v);
break;
case 7:
write_gc0_kscratch6(v);
break;
}
break;
case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v);
break;
case KVM_REG_MIPS_COUNT_RESUME:
ret = kvm_mips_set_count_resume(vcpu, v);
break;
case KVM_REG_MIPS_COUNT_HZ:
ret = kvm_mips_set_count_hz(vcpu, v);
break;
default:
return -EINVAL;
}
return ret;
}
#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
{
unsigned long guestid = guestid_cache(cpu);
if (!(++guestid & GUESTID_MASK)) {
if (cpu_has_vtag_icache)
flush_icache_all();
if (!guestid) /* fix version if needed */
guestid = GUESTID_FIRST_VERSION;
++guestid; /* guestid 0 reserved for root */
/* start new guestid cycle */
kvm_vz_local_flush_roottlb_all_guests();
kvm_vz_local_flush_guesttlb_all();
}
guestid_cache(cpu) = guestid;
}
/* Returns 1 if the guest TLB may be clobbered */
static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
{
int ret = 0;
int i;
if (!vcpu->requests)
return 0;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
if (cpu_has_guestid) {
/* Drop all GuestIDs for this VCPU */
for_each_possible_cpu(i)
vcpu->arch.vzguestid[i] = 0;
/* This will clobber guest TLB contents too */
ret = 1;
}
/*
* For Root ASID Dealias (RAD) we don't do anything here, but we
* still need the request to ensure we recheck asid_flush_mask.
* We can still return 0 as only the root TLB will be affected
* by a root ASID flush.
*/
}
return ret;
}
static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
{
unsigned int wired = read_gc0_wired();
struct kvm_mips_tlb *tlbs;
int i;
/* Expand the wired TLB array if necessary */
wired &= MIPSR6_WIRED_WIRED;
if (wired > vcpu->arch.wired_tlb_limit) {
tlbs = krealloc(vcpu->arch.wired_tlb, wired *
sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
if (WARN_ON(!tlbs)) {
/* Save whatever we can */
wired = vcpu->arch.wired_tlb_limit;
} else {
vcpu->arch.wired_tlb = tlbs;
vcpu->arch.wired_tlb_limit = wired;
}
}
if (wired)
/* Save wired entries from the guest TLB */
kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
/* Invalidate any dropped entries since last time */
for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
vcpu->arch.wired_tlb[i].tlb_mask = 0;
}
vcpu->arch.wired_tlb_used = wired;
}
static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
{
/* Load wired entries into the guest TLB */
if (vcpu->arch.wired_tlb)
kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
vcpu->arch.wired_tlb_used);
}
static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm *kvm = vcpu->kvm;
struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
bool migrated;
/*
* Are we entering guest context on a different CPU to last time?
* If so, the VCPU's guest TLB state on this CPU may be stale.
*/
migrated = (vcpu->arch.last_exec_cpu != cpu);
vcpu->arch.last_exec_cpu = cpu;
/*
* A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
* remains set until another vcpu is loaded in. As a rule GuestRID
* remains zeroed when in root context unless the kernel is busy
* manipulating guest tlb entries.
*/
if (cpu_has_guestid) {
/*
* Check if our GuestID is of an older version and thus invalid.
*
* We also discard the stored GuestID if we've executed on
* another CPU, as the guest mappings may have changed without
* hypervisor knowledge.
*/
if (migrated ||
(vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
GUESTID_VERSION_MASK) {
kvm_vz_get_new_guestid(cpu, vcpu);
vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
trace_kvm_guestid_change(vcpu,
vcpu->arch.vzguestid[cpu]);
}
/* Restore GuestID */
change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
} else {
/*
* The Guest TLB only stores a single guest's TLB state, so
* flush it if another VCPU has executed on this CPU.
*
* We also flush if we've executed on another CPU, as the guest
* mappings may have changed without hypervisor knowledge.
*/
if (migrated || last_exec_vcpu[cpu] != vcpu)
kvm_vz_local_flush_guesttlb_all();
last_exec_vcpu[cpu] = vcpu;
/*
* Root ASID dealiases guest GPA mappings in the root TLB.
* Allocate new root ASID if needed.
*/
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
|| (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))
get_new_mmu_context(gpa_mm, cpu);
}
}
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
bool migrated, all;
/*
* Have we migrated to a different CPU?
* If so, any old guest TLB state may be stale.
*/
migrated = (vcpu->arch.last_sched_cpu != cpu);
/*
* Was this the last VCPU to run on this CPU?
* If not, any old guest state from this VCPU will have been clobbered.
*/
all = migrated || (last_vcpu[cpu] != vcpu);
last_vcpu[cpu] = vcpu;
/*
* Restore CP0_Wired unconditionally as we clear it after use, and
* restore wired guest TLB entries (while in guest context).
*/
kvm_restore_gc0_wired(cop0);
if (current->flags & PF_VCPU) {
tlbw_use_hazard();
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
}
/*
* Restore timer state regardless, as e.g. Cause.TI can change over time
* if left unmaintained.
*/
kvm_vz_restore_timer(vcpu);
/* Set MC bit if we want to trace guest mode changes */
if (kvm_trace_guest_mode_change)
set_c0_guestctl0(MIPS_GCTL0_MC);
else
clear_c0_guestctl0(MIPS_GCTL0_MC);
/* Don't bother restoring registers multiple times unless necessary */
if (!all)
return 0;
/*
* Restore config registers first, as some implementations restrict
* writes to other registers when the corresponding feature bits aren't
* set. For example Status.CU1 cannot be set unless Config1.FP is set.
*/
kvm_restore_gc0_config(cop0);
if (cpu_guest_has_conf1)
kvm_restore_gc0_config1(cop0);
if (cpu_guest_has_conf2)
kvm_restore_gc0_config2(cop0);
if (cpu_guest_has_conf3)
kvm_restore_gc0_config3(cop0);
if (cpu_guest_has_conf4)
kvm_restore_gc0_config4(cop0);
if (cpu_guest_has_conf5)
kvm_restore_gc0_config5(cop0);
if (cpu_guest_has_conf6)
kvm_restore_gc0_config6(cop0);
if (cpu_guest_has_conf7)
kvm_restore_gc0_config7(cop0);
kvm_restore_gc0_index(cop0);
kvm_restore_gc0_entrylo0(cop0);
kvm_restore_gc0_entrylo1(cop0);
kvm_restore_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT
kvm_restore_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_xcontextconfig(cop0);
#endif
kvm_restore_gc0_pagemask(cop0);
kvm_restore_gc0_pagegrain(cop0);
kvm_restore_gc0_hwrena(cop0);
kvm_restore_gc0_badvaddr(cop0);
kvm_restore_gc0_entryhi(cop0);
kvm_restore_gc0_status(cop0);
kvm_restore_gc0_intctl(cop0);
kvm_restore_gc0_epc(cop0);
kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
if (cpu_guest_has_userlocal)
kvm_restore_gc0_userlocal(cop0);
kvm_restore_gc0_errorepc(cop0);
/* restore KScratch registers if enabled in guest */
if (cpu_guest_has_conf4) {
if (cpu_guest_has_kscr(2))
kvm_restore_gc0_kscratch1(cop0);
if (cpu_guest_has_kscr(3))
kvm_restore_gc0_kscratch2(cop0);
if (cpu_guest_has_kscr(4))
kvm_restore_gc0_kscratch3(cop0);
if (cpu_guest_has_kscr(5))
kvm_restore_gc0_kscratch4(cop0);
if (cpu_guest_has_kscr(6))
kvm_restore_gc0_kscratch5(cop0);
if (cpu_guest_has_kscr(7))
kvm_restore_gc0_kscratch6(cop0);
}
if (cpu_guest_has_badinstr)
kvm_restore_gc0_badinstr(cop0);
if (cpu_guest_has_badinstrp)
kvm_restore_gc0_badinstrp(cop0);
if (cpu_guest_has_segments) {
kvm_restore_gc0_segctl0(cop0);
kvm_restore_gc0_segctl1(cop0);
kvm_restore_gc0_segctl2(cop0);
}
/* restore HTW registers */
if (cpu_guest_has_htw) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
kvm_restore_gc0_pwctl(cop0);
}
/* restore Root.GuestCtl2 from unused Guest guestctl2 register */
if (cpu_has_guestctl2)
write_c0_guestctl2(
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
/*
* We should clear linked load bit to break interrupted atomics. This
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
if (cpu_guest_has_rw_llb)
write_gc0_lladdr(0);
return 0;
}
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
if (current->flags & PF_VCPU)
kvm_vz_vcpu_save_wired(vcpu);
kvm_lose_fpu(vcpu);
kvm_save_gc0_index(cop0);
kvm_save_gc0_entrylo0(cop0);
kvm_save_gc0_entrylo1(cop0);
kvm_save_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT
kvm_save_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_xcontextconfig(cop0);
#endif
kvm_save_gc0_pagemask(cop0);
kvm_save_gc0_pagegrain(cop0);
kvm_save_gc0_wired(cop0);
/* allow wired TLB entries to be overwritten */
clear_gc0_wired(MIPSR6_WIRED_WIRED);
kvm_save_gc0_hwrena(cop0);
kvm_save_gc0_badvaddr(cop0);
kvm_save_gc0_entryhi(cop0);
kvm_save_gc0_status(cop0);
kvm_save_gc0_intctl(cop0);
kvm_save_gc0_epc(cop0);
kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
if (cpu_guest_has_userlocal)
kvm_save_gc0_userlocal(cop0);
/* only save implemented config registers */
kvm_save_gc0_config(cop0);
if (cpu_guest_has_conf1)
kvm_save_gc0_config1(cop0);
if (cpu_guest_has_conf2)
kvm_save_gc0_config2(cop0);
if (cpu_guest_has_conf3)
kvm_save_gc0_config3(cop0);
if (cpu_guest_has_conf4)
kvm_save_gc0_config4(cop0);
if (cpu_guest_has_conf5)
kvm_save_gc0_config5(cop0);
if (cpu_guest_has_conf6)
kvm_save_gc0_config6(cop0);
if (cpu_guest_has_conf7)
kvm_save_gc0_config7(cop0);
kvm_save_gc0_errorepc(cop0);
/* save KScratch registers if enabled in guest */
if (cpu_guest_has_conf4) {
if (cpu_guest_has_kscr(2))
kvm_save_gc0_kscratch1(cop0);
if (cpu_guest_has_kscr(3))
kvm_save_gc0_kscratch2(cop0);
if (cpu_guest_has_kscr(4))
kvm_save_gc0_kscratch3(cop0);
if (cpu_guest_has_kscr(5))
kvm_save_gc0_kscratch4(cop0);
if (cpu_guest_has_kscr(6))
kvm_save_gc0_kscratch5(cop0);
if (cpu_guest_has_kscr(7))
kvm_save_gc0_kscratch6(cop0);
}
if (cpu_guest_has_badinstr)
kvm_save_gc0_badinstr(cop0);
if (cpu_guest_has_badinstrp)
kvm_save_gc0_badinstrp(cop0);
if (cpu_guest_has_segments) {
kvm_save_gc0_segctl0(cop0);
kvm_save_gc0_segctl1(cop0);
kvm_save_gc0_segctl2(cop0);
}
/* save HTW registers if enabled in guest */
if (cpu_guest_has_htw &&
kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
kvm_save_gc0_pwctl(cop0);
}
kvm_vz_save_timer(vcpu);
/* save Root.GuestCtl2 in unused Guest guestctl2 register */
if (cpu_has_guestctl2)
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
read_c0_guestctl2();
return 0;
}
/**
* kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
* @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
*
* Attempt to resize the guest VTLB by writing guest Config registers. This is
* necessary for cores with a shared root/guest TLB to avoid overlap with wired
* entries in the root VTLB.
*
* Returns: The resulting guest VTLB size.
*/
static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
{
unsigned int config4 = 0, ret = 0, limit;
/* Write MMUSize - 1 into guest Config registers */
if (cpu_guest_has_conf1)
change_gc0_config1(MIPS_CONF1_TLBS,
(size - 1) << MIPS_CONF1_TLBS_SHIFT);
if (cpu_guest_has_conf4) {
config4 = read_gc0_config4();
if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
MIPS_CONF4_VTLBSIZEEXT_SHIFT;
} else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
config4 &= ~MIPS_CONF4_MMUSIZEEXT;
config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
MIPS_CONF4_MMUSIZEEXT_SHIFT;
}
write_gc0_config4(config4);
}
/*
* Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
* would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
* not dropped)
*/
if (cpu_has_mips_r6) {
limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
MIPSR6_WIRED_LIMIT_SHIFT;
if (size - 1 <= limit)
limit = 0;
write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
}
/* Read back MMUSize - 1 */
back_to_back_c0_hazard();
if (cpu_guest_has_conf1)
ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
MIPS_CONF1_TLBS_SHIFT;
if (config4) {
if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
MIPS_CONF1_TLBS_SIZE;
else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
MIPS_CONF1_TLBS_SIZE;
}
return ret + 1;
}
static int kvm_vz_hardware_enable(void)
{
unsigned int mmu_size, guest_mmu_size, ftlb_size;
u64 guest_cvmctl, cvmvmconfig;
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/* Set up guest timer/perfcount IRQ lines */
guest_cvmctl = read_gc0_cvmctl();
guest_cvmctl &= ~CVMCTL_IPTI;
guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
guest_cvmctl &= ~CVMCTL_IPPCI;
guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
write_gc0_cvmctl(guest_cvmctl);
cvmvmconfig = read_c0_cvmvmconfig();
/* No I/O hole translation. */
cvmvmconfig |= CVMVMCONF_DGHT;
/* Halve the root MMU size */
mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
>> CVMVMCONF_MMUSIZEM1_S) + 1;
guest_mmu_size = mmu_size / 2;
mmu_size -= guest_mmu_size;
cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
cvmvmconfig |= mmu_size - 1;
write_c0_cvmvmconfig(cvmvmconfig);
/* Update our records */
current_cpu_data.tlbsize = mmu_size;
current_cpu_data.tlbsizevtlb = mmu_size;
current_cpu_data.guest.tlbsize = guest_mmu_size;
/* Flush moved entries in new (guest) context */
kvm_vz_local_flush_guesttlb_all();
break;
default:
/*
* ImgTec cores tend to use a shared root/guest TLB. To avoid
* overlap of root wired and guest entries, the guest TLB may
* need resizing.
*/
mmu_size = current_cpu_data.tlbsizevtlb;
ftlb_size = current_cpu_data.tlbsize - mmu_size;
/* Try switching to maximum guest VTLB size for flush */
guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
kvm_vz_local_flush_guesttlb_all();
/*
* Reduce to make space for root wired entries and at least 2
* root non-wired entries. This does assume that long-term wired
* entries won't be added later.
*/
guest_mmu_size = mmu_size - num_wired_entries() - 2;
guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
/*
* Write the VTLB size, but if another CPU has already written,
* check it matches or we won't provide a consistent view to the
* guest. If this ever happens it suggests an asymmetric number
* of wired entries.
*/
if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
"Available guest VTLB size mismatch"))
return -EINVAL;
break;
}
/*
* Enable virtualization features granting guest direct control of
* certain features:
* CP0=1: Guest coprocessor 0 context.
* AT=Guest: Guest MMU.
* CG=1: Hit (virtual address) CACHE operations (optional).
* CF=1: Guest Config registers.
* CGI=1: Indexed flush CACHE operations (optional).
*/
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
if (cpu_has_guestctl0ext)
set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
if (cpu_has_guestid) {
write_c0_guestctl1(0);
kvm_vz_local_flush_roottlb_all_guests();
GUESTID_MASK = current_cpu_data.guestid_mask;
GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
GUESTID_VERSION_MASK = ~GUESTID_MASK;
current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
}
/* clear any pending injected virtual guest interrupts */
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
return 0;
}
static void kvm_vz_hardware_disable(void)
{
u64 cvmvmconfig;
unsigned int mmu_size;
/* Flush any remaining guest TLB entries */
kvm_vz_local_flush_guesttlb_all();
switch (current_cpu_type()) {
case CPU_CAVIUM_OCTEON3:
/*
* Allocate whole TLB for root. Existing guest TLB entries will
* change ownership to the root TLB. We should be safe though as
* they've already been flushed above while in guest TLB.
*/
cvmvmconfig = read_c0_cvmvmconfig();
mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
>> CVMVMCONF_MMUSIZEM1_S) + 1;
cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
cvmvmconfig |= mmu_size - 1;
write_c0_cvmvmconfig(cvmvmconfig);
/* Update our records */
current_cpu_data.tlbsize = mmu_size;
current_cpu_data.tlbsizevtlb = mmu_size;
current_cpu_data.guest.tlbsize = 0;
/* Flush moved entries in new (root) context */
local_flush_tlb_all();
break;
}
if (cpu_has_guestid) {
write_c0_guestctl1(0);
kvm_vz_local_flush_roottlb_all_guests();
}
}
static int kvm_vz_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_MIPS_VZ:
/* we wouldn't be here unless cpu_has_vz */
r = 1;
break;
#ifdef CONFIG_64BIT
case KVM_CAP_MIPS_64BIT:
/* We support 64-bit registers/operations and addresses */
r = 2;
break;
#endif
default:
r = 0;
break;
}
return r;
}
static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
{
int i;
for_each_possible_cpu(i)
vcpu->arch.vzguestid[i] = 0;
return 0;
}
static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
{
int cpu;
/*
* If the VCPU is freed and reused as another VCPU, we don't want the
* matching pointer wrongly hanging around in last_vcpu[] or
* last_exec_vcpu[].
*/
for_each_possible_cpu(cpu) {
if (last_vcpu[cpu] == vcpu)
last_vcpu[cpu] = NULL;
if (last_exec_vcpu[cpu] == vcpu)
last_exec_vcpu[cpu] = NULL;
}
}
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
/*
* Start off the timer at the same frequency as the host timer, but the
* soft timer doesn't handle frequencies greater than 1GHz yet.
*/
if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
count_hz = mips_hpt_frequency;
kvm_mips_init_count(vcpu, count_hz);
/*
* Initialize guest register state to valid architectural reset state.
*/
/* PageGrain */
if (cpu_has_mips_r6)
kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
/* Wired */
if (cpu_has_mips_r6)
kvm_write_sw_gc0_wired(cop0,
read_gc0_wired() & MIPSR6_WIRED_LIMIT);
/* Status */
kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
if (cpu_has_mips_r6)
kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
/* IntCtl */
kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
(INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
/* PRId */
kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
/* EBase */
kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
/* Config */
kvm_save_gc0_config(cop0);
/* architecturally writable (e.g. from guest) */
kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
_page_cachable_default >> _CACHE_SHIFT);
/* architecturally read only, but maybe writable from root */
kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
if (cpu_guest_has_conf1) {
kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
/* Config1 */
kvm_save_gc0_config1(cop0);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
MIPS_CONF1_MD |
MIPS_CONF1_PC |
MIPS_CONF1_WR |
MIPS_CONF1_CA |
MIPS_CONF1_FP);
}
if (cpu_guest_has_conf2) {
kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
/* Config2 */
kvm_save_gc0_config2(cop0);
}
if (cpu_guest_has_conf3) {
kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
/* Config3 */
kvm_save_gc0_config3(cop0);
/* architecturally writable (e.g. from guest) */
kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
MIPS_CONF3_BPG |
MIPS_CONF3_ULRI |
MIPS_CONF3_DSP |
MIPS_CONF3_CTXTC |
MIPS_CONF3_ITL |
MIPS_CONF3_LPA |
MIPS_CONF3_VEIC |
MIPS_CONF3_VINT |
MIPS_CONF3_SP |
MIPS_CONF3_CDMM |
MIPS_CONF3_MT |
MIPS_CONF3_SM |
MIPS_CONF3_TL);
}
if (cpu_guest_has_conf4) {
kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
/* Config4 */
kvm_save_gc0_config4(cop0);
}
if (cpu_guest_has_conf5) {
kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
/* Config5 */
kvm_save_gc0_config5(cop0);
/* architecturally writable (e.g. from guest) */
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
MIPS_CONF5_CV |
MIPS_CONF5_MSAEN |
MIPS_CONF5_UFE |
MIPS_CONF5_FRE |
MIPS_CONF5_SBRI |
MIPS_CONF5_UFR);
/* architecturally read only, but maybe writable from root */
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
}
if (cpu_guest_has_contextconfig) {
/* ContextConfig */
kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
#ifdef CONFIG_64BIT
/* XContextConfig */
/* bits SEGBITS-13+3:4 set */
kvm_write_sw_gc0_xcontextconfig(cop0,
((1ull << (cpu_vmbits - 13)) - 1) << 4);
#endif
}
/* Implementation dependent, use the legacy layout */
if (cpu_guest_has_segments) {
/* SegCtl0, SegCtl1, SegCtl2 */
kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
(_page_cachable_default >> _CACHE_SHIFT) <<
(16 + MIPS_SEGCFG_C_SHIFT));
kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
}
/* reset HTW registers */
if (cpu_guest_has_htw && cpu_has_mips_r6) {
/* PWField */
kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
/* PWSize */
kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
}
/* start with no pending virtual guest interrupts */
if (cpu_has_guestctl2)
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
/* Put PC at reset vector */
vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
return 0;
}
static void kvm_vz_flush_shadow_all(struct kvm *kvm)
{
if (cpu_has_guestid) {
/* Flush GuestID for each VCPU individually */
kvm_flush_remote_tlbs(kvm);
} else {
/*
* For each CPU there is a single GPA ASID used by all VCPUs in
* the VM, so it doesn't make sense for the VCPUs to handle
* invalidation of these ASIDs individually.
*
* Instead mark all CPUs as needing ASID invalidation in
* asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
* kick any running VCPUs so they check asid_flush_mask.
*/
cpumask_setall(&kvm->arch.asid_flush_mask);
kvm_flush_remote_tlbs(kvm);
}
}
static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
kvm_vz_flush_shadow_all(kvm);
}
static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int preserve_guest_tlb;
preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
if (preserve_guest_tlb)
kvm_vz_vcpu_save_wired(vcpu);
kvm_vz_vcpu_load_tlb(vcpu, cpu);
if (preserve_guest_tlb)
kvm_vz_vcpu_load_wired(vcpu);
}
static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
kvm_vz_acquire_htimer(vcpu);
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
kvm_vz_check_requests(vcpu, cpu);
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
r = vcpu->arch.vcpu_run(run, vcpu);
kvm_vz_vcpu_save_wired(vcpu);
return r;
}
static struct kvm_mips_callbacks kvm_vz_callbacks = {
.handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
.handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
.handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
.handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
.handle_addr_err_st = kvm_trap_vz_no_handler,
.handle_addr_err_ld = kvm_trap_vz_no_handler,
.handle_syscall = kvm_trap_vz_no_handler,
.handle_res_inst = kvm_trap_vz_no_handler,
.handle_break = kvm_trap_vz_no_handler,
.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
.hardware_enable = kvm_vz_hardware_enable,
.hardware_disable = kvm_vz_hardware_disable,
.check_extension = kvm_vz_check_extension,
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
.vcpu_setup = kvm_vz_vcpu_setup,
.flush_shadow_all = kvm_vz_flush_shadow_all,
.flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
.queue_timer_int = kvm_vz_queue_timer_int_cb,
.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
.queue_io_int = kvm_vz_queue_io_int_cb,
.dequeue_io_int = kvm_vz_dequeue_io_int_cb,
.irq_deliver = kvm_vz_irq_deliver_cb,
.irq_clear = kvm_vz_irq_clear_cb,
.num_regs = kvm_vz_num_regs,
.copy_reg_indices = kvm_vz_copy_reg_indices,
.get_one_reg = kvm_vz_get_one_reg,
.set_one_reg = kvm_vz_set_one_reg,
.vcpu_load = kvm_vz_vcpu_load,
.vcpu_put = kvm_vz_vcpu_put,
.vcpu_run = kvm_vz_vcpu_run,
.vcpu_reenter = kvm_vz_vcpu_reenter,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
{
if (!cpu_has_vz)
return -ENODEV;
/*
* VZ requires at least 2 KScratch registers, so it should have been
* possible to allocate pgd_reg.
*/
if (WARN(pgd_reg == -1,
"pgd_reg not allocated even though cpu_has_vz\n"))
return -ENODEV;
pr_info("Starting KVM with MIPS VZ extensions\n");
*install_callbacks = &kvm_vz_callbacks;
return 0;
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* Cache operations. */ /* Cache operations. */
void (*flush_cache_all)(void); void (*flush_cache_all)(void);
void (*__flush_cache_all)(void); void (*__flush_cache_all)(void);
EXPORT_SYMBOL_GPL(__flush_cache_all);
void (*flush_cache_mm)(struct mm_struct *mm); void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
......
...@@ -348,7 +348,7 @@ void maar_init(void) ...@@ -348,7 +348,7 @@ void maar_init(void)
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2); pr_info(" [%d]: ", i / 2);
if (!(attr & MIPS_MAAR_V)) { if (!(attr & MIPS_MAAR_VL)) {
pr_cont("disabled\n"); pr_cont("disabled\n");
continue; continue;
} }
......
...@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt { ...@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2 #define KVM_VM_PPC_PR 2
/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
#define KVM_VM_MIPS_TE 0
#define KVM_VM_MIPS_VZ 1
#define KVM_S390_SIE_PAGE_OFFSET 1 #define KVM_S390_SIE_PAGE_OFFSET 1
/* /*
...@@ -883,6 +887,9 @@ struct kvm_ppc_resize_hpt { ...@@ -883,6 +887,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_MMU_RADIX 134 #define KVM_CAP_PPC_MMU_RADIX 134
#define KVM_CAP_PPC_MMU_HASH_V3 135 #define KVM_CAP_PPC_MMU_HASH_V3 135
#define KVM_CAP_IMMEDIATE_EXIT 136 #define KVM_CAP_IMMEDIATE_EXIT 136
#define KVM_CAP_MIPS_VZ 137
#define KVM_CAP_MIPS_TE 138
#define KVM_CAP_MIPS_64BIT 139
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment