Commit 90036013 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM updates from Paolo Bonzini:
 "First batch of KVM changes for 4.1

  The most interesting bit here is irqfd/ioeventfd support for ARM and
  ARM64.

  Summary:

  ARM/ARM64:
     fixes for live migration, irqfd and ioeventfd support (enabling
     vhost, too), page aging

  s390:
     interrupt handling rework, allowing to inject all local interrupts
     via new ioctl and to get/set the full local irq state for migration
     and introspection.  New ioctls to access memory by virtual address,
     and to get/set the guest storage keys.  SIMD support.

  MIPS:
     FPU and MIPS SIMD Architecture (MSA) support.  Includes some
     patches from Ralf Baechle's MIPS tree.

  x86:
     bugfixes (notably for pvclock, the others are small) and cleanups.
     Another small latency improvement for the TSC deadline timer"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits)
  KVM: use slowpath for cross page cached accesses
  kvm: mmu: lazy collapse small sptes into large sptes
  KVM: x86: Clear CR2 on VCPU reset
  KVM: x86: DR0-DR3 are not clear on reset
  KVM: x86: BSP in MSR_IA32_APICBASE is writable
  KVM: x86: simplify kvm_apic_map
  KVM: x86: avoid logical_map when it is invalid
  KVM: x86: fix mixed APIC mode broadcast
  KVM: x86: use MDA for interrupt matching
  kvm/ppc/mpic: drop unused IRQ_testbit
  KVM: nVMX: remove unnecessary double caching of MAXPHYADDR
  KVM: nVMX: checks for address bits beyond MAXPHYADDR on VM-entry
  KVM: x86: cache maxphyaddr CPUID leaf in struct kvm_vcpu
  KVM: vmx: pass error code with internal error #2
  x86: vdso: fix pvclock races with task migration
  KVM: remove kvm_read_hva and kvm_read_hva_atomic
  KVM: x86: optimize delivery of TSC deadline timer interrupt
  KVM: x86: extract blocking logic from __vcpu_run
  kvm: x86: fix x86 eflags fixed bit
  KVM: s390: migrate vcpu interrupt state
  ...
parents 4541fec3 ca3f0874
This diff is collapsed.
...@@ -27,6 +27,9 @@ Groups: ...@@ -27,6 +27,9 @@ Groups:
Copies all floating interrupts into a buffer provided by userspace. Copies all floating interrupts into a buffer provided by userspace.
When the buffer is too small it returns -ENOMEM, which is the indication When the buffer is too small it returns -ENOMEM, which is the indication
for userspace to try again with a bigger buffer. for userspace to try again with a bigger buffer.
-ENOBUFS is returned when the allocation of a kernelspace buffer has
failed.
-EFAULT is returned when copying data to userspace failed.
All interrupts remain pending, i.e. are not deleted from the list of All interrupts remain pending, i.e. are not deleted from the list of
currently pending interrupts. currently pending interrupts.
attr->addr contains the userspace address of the buffer into which all attr->addr contains the userspace address of the buffer into which all
......
...@@ -5591,6 +5591,8 @@ S: Supported ...@@ -5591,6 +5591,8 @@ S: Supported
F: Documentation/*/kvm*.txt F: Documentation/*/kvm*.txt
F: Documentation/virtual/kvm/ F: Documentation/virtual/kvm/
F: arch/*/kvm/ F: arch/*/kvm/
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
F: arch/*/include/asm/kvm* F: arch/*/include/asm/kvm*
F: include/linux/kvm* F: include/linux/kvm*
F: include/uapi/linux/kvm* F: include/uapi/linux/kvm*
......
...@@ -185,6 +185,7 @@ ...@@ -185,6 +185,7 @@
#define HSR_COND (0xfU << HSR_COND_SHIFT) #define HSR_COND (0xfU << HSR_COND_SHIFT)
#define FSC_FAULT (0x04) #define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c) #define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <asm/fpstate.h> #include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h> #include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#if defined(CONFIG_KVM_ARM_MAX_VCPUS) #if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else #else
...@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); ...@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */ /* We do not have shadow page tables, hence the empty hooks */
static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
unsigned long end)
{
return 0;
}
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return 0;
}
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address) unsigned long address)
{ {
......
...@@ -28,28 +28,6 @@ struct kvm_decode { ...@@ -28,28 +28,6 @@ struct kvm_decode {
bool sign_extend; bool sign_extend;
}; };
/*
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
* which is an anonymous type. Use our own type instead.
*/
struct kvm_exit_mmio {
phys_addr_t phys_addr;
u8 data[8];
u32 len;
bool is_write;
void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
struct kvm_exit_mmio *mmio)
{
run->mmio.phys_addr = mmio->phys_addr;
run->mmio.len = mmio->len;
run->mmio.is_write = mmio->is_write;
memcpy(run->mmio.data, mmio->data, mmio->len);
run->exit_reason = KVM_EXIT_MMIO;
}
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa); phys_addr_t fault_ipa);
......
...@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot { ...@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */ /* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127 #define KVM_ARM_IRQ_GIC_MAX 127
/* One single KVM irqchip, ie. the VGIC */
#define KVM_NR_IRQCHIPS 1
/* PSCI interface */ /* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e #define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
......
...@@ -190,7 +190,6 @@ int main(void) ...@@ -190,7 +190,6 @@ int main(void)
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
...@@ -200,14 +199,11 @@ int main(void) ...@@ -200,14 +199,11 @@ int main(void)
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif #endif
return 0; return 0;
......
...@@ -18,6 +18,7 @@ if VIRTUALIZATION ...@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM config KVM
bool "Kernel-based Virtual Machine (KVM) support" bool "Kernel-based Virtual Machine (KVM) support"
depends on MMU && OF
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
...@@ -26,10 +27,12 @@ config KVM ...@@ -26,10 +27,12 @@ config KVM
select KVM_ARM_HOST select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU select SRCU
depends on ARM_VIRT_EXT && ARM_LPAE select MMU_NOTIFIER
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help--- ---help---
Support hosting virtualized guest machines. You will also Support hosting virtualized guest machines.
need to select one or more of the processor modules below.
This module provides access to the hardware capabilities through This module provides access to the hardware capabilities through
a character device node named /dev/kvm. a character device node named /dev/kvm.
...@@ -37,10 +40,7 @@ config KVM ...@@ -37,10 +40,7 @@ config KVM
If unsure, say N. If unsure, say N.
config KVM_ARM_HOST config KVM_ARM_HOST
bool "KVM host support for ARM cpus." bool
depends on KVM
depends on MMU
select MMU_NOTIFIER
---help--- ---help---
Provides host support for ARM processors. Provides host support for ARM processors.
...@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS ...@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to large, so only choose a reasonable number that you expect to
actually use. actually use.
config KVM_ARM_VGIC
bool "KVM support for Virtual GIC"
depends on KVM_ARM_HOST && OF
select HAVE_KVM_IRQCHIP
default y
---help---
Adds support for a hardware assisted, in-kernel GIC emulation.
config KVM_ARM_TIMER
bool "KVM support for Architected Timers"
depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
select HAVE_KVM_IRQCHIP
default y
---help---
Adds support for the Architected Timers in virtual machines
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt) ...@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1 plus_virt_def := -DREQUIRES_VIRT=1
endif endif
ccflags-y += -Ivirt/kvm -Iarch/arm/kvm ccflags-y += -Iarch/arm/kvm
CFLAGS_arm.o := -I. $(plus_virt_def) CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I. CFLAGS_mmu.o := -I.
...@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) ...@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
obj-y += kvm-arm.o init.o interrupts.o obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o obj-y += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o obj-y += $(KVM)/arm/vgic-v2.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o obj-y += $(KVM)/arm/vgic-v2-emul.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o obj-y += $(KVM)/arm/arch_timer.o
...@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); ...@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid; static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock); static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
...@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r; int r;
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
r = vgic_present; case KVM_CAP_IRQFD:
break; case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU: case KVM_CAP_SYNC_MMU:
...@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PSCI: case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2: case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM: case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
...@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
return 0; return kvm_timer_should_fire(vcpu);
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
...@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
return -EINVAL; if (vcpu->arch.pause)
mp_state->mp_state = KVM_MP_STATE_STOPPED;
else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
return 0;
} }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.pause = false;
break;
case KVM_MP_STATE_STOPPED:
vcpu->arch.pause = true;
break;
default:
return -EINVAL; return -EINVAL;
}
return 0;
} }
/** /**
...@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
bool kvm_arch_intc_initialized(struct kvm *kvm)
{
return vgic_initialized(kvm);
}
static void vcpu_pause(struct kvm_vcpu *vcpu) static void vcpu_pause(struct kvm_vcpu *vcpu)
{ {
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
...@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, ...@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) { switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2: case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present)
return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default: default:
return -ENODEV; return -ENODEV;
...@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) { switch (ioctl) {
case KVM_CREATE_IRQCHIP: { case KVM_CREATE_IRQCHIP: {
if (vgic_present)
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
else
return -ENXIO;
} }
case KVM_ARM_SET_DEVICE_ADDR: { case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr; struct kvm_arm_device_addr dev_addr;
...@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void) ...@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
if (err) if (err)
goto out_free_context; goto out_free_context;
#ifdef CONFIG_KVM_ARM_VGIC
vgic_present = true;
#endif
/* /*
* Init HYP architected timer support * Init HYP architected timer support
*/ */
......
...@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL; return -EINVAL;
} }
#ifndef CONFIG_KVM_ARM_TIMER
#define NUM_TIMER_REGS 0
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
return 0;
}
static bool is_timer_reg(u64 index)
{
return false;
}
#else
#define NUM_TIMER_REGS 3 #define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index) static bool is_timer_reg(u64 index)
...@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return 0; return 0;
} }
#endif
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
......
...@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
* Assumes vcpu pointer in vcpu reg * Assumes vcpu pointer in vcpu reg
*/ */
.macro save_vgic_state .macro save_vgic_state
#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */ /* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM] ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL] ldr r2, [r2, #KVM_VGIC_VCTRL]
...@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1 subs r4, r4, #1
bne 1b bne 1b
2: 2:
#endif
.endm .endm
/* /*
...@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
* Assumes vcpu pointer in vcpu reg * Assumes vcpu pointer in vcpu reg
*/ */
.macro restore_vgic_state .macro restore_vgic_state
#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */ /* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM] ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL] ldr r2, [r2, #KVM_VGIC_VCTRL]
...@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1 subs r4, r4, #1
bne 1b bne 1b
2: 2:
#endif
.endm .endm
#define CNTHCTL_PL1PCTEN (1 << 0) #define CNTHCTL_PL1PCTEN (1 << 0)
...@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
* Clobbers r2-r5 * Clobbers r2-r5
*/ */
.macro save_timer_state .macro save_timer_state
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM] ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED] ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0 cmp r2, #0
...@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1: 1:
#endif
@ Allow physical timer/counter access for the host @ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
...@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
bic r2, r2, #CNTHCTL_PL1PCEN bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM] ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED] ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0 cmp r2, #0
...@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 ) ...@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
and r2, r2, #3 and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1: 1:
#endif
.endm .endm
.equ vmentry, 0 .equ vmentry, 0
......
...@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 0; return 0;
} }
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
struct kvm_exit_mmio *mmio)
{ {
unsigned long rt; unsigned long rt;
int len; int access_size;
bool is_write, sign_extend; bool sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) { if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */ /* cache operation on I/O addr, tell guest unsupported */
...@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 1; return 1;
} }
len = kvm_vcpu_dabt_get_as(vcpu); access_size = kvm_vcpu_dabt_get_as(vcpu);
if (unlikely(len < 0)) if (unlikely(access_size < 0))
return len; return access_size;
is_write = kvm_vcpu_dabt_iswrite(vcpu); *is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu); sign_extend = kvm_vcpu_dabt_issext(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu); rt = kvm_vcpu_dabt_get_rd(vcpu);
mmio->is_write = is_write; *len = access_size;
mmio->phys_addr = fault_ipa;
mmio->len = len;
vcpu->arch.mmio_decode.sign_extend = sign_extend; vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt; vcpu->arch.mmio_decode.rt = rt;
...@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa) phys_addr_t fault_ipa)
{ {
struct kvm_exit_mmio mmio;
unsigned long data; unsigned long data;
unsigned long rt; unsigned long rt;
int ret; int ret;
bool is_write;
int len;
u8 data_buf[8];
/* /*
* Prepare MMIO operation. First stash it in a private * Prepare MMIO operation. First decode the syndrome data we get
* structure that we can use for in-kernel emulation. If the * from the CPU. Then try if some in-kernel emulation feels
* kernel can't handle it, copy it into run->mmio and let user * responsible, otherwise let user space do its magic.
* space do its magic.
*/ */
if (kvm_vcpu_dabt_isvalid(vcpu)) { if (kvm_vcpu_dabt_isvalid(vcpu)) {
ret = decode_hsr(vcpu, fault_ipa, &mmio); ret = decode_hsr(vcpu, &is_write, &len);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt; rt = vcpu->arch.mmio_decode.rt;
if (mmio.is_write) { if (is_write) {
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
mmio.len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
fault_ipa, data); data_buf);
mmio_write_buf(mmio.data, mmio.len, data);
} else { } else {
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
fault_ipa, 0); fault_ipa, 0);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
} }
if (vgic_handle_mmio(vcpu, run, &mmio)) /* Now prepare kvm_run for the potential return to userland. */
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
kvm_handle_mmio_return(vcpu, run);
return 1; return 1;
}
kvm_prepare_mmio(run, &mmio); run->exit_reason = KVM_EXIT_MMIO;
return 0; return 0;
} }
...@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock: out_unlock:
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return ret; return ret;
} }
/*
* Resolve the access fault by making the page young again.
* Note that because the faulting entry is guaranteed not to be
* cached in the TLB, we don't need to invalidate anything.
*/
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
pmd_t *pmd;
pte_t *pte;
pfn_t pfn;
bool pfn_valid = false;
trace_kvm_access_fault(fault_ipa);
spin_lock(&vcpu->kvm->mmu_lock);
pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
goto out;
if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
*pmd = pmd_mkyoung(*pmd);
pfn = pmd_pfn(*pmd);
pfn_valid = true;
goto out;
}
pte = pte_offset_kernel(pmd, fault_ipa);
if (pte_none(*pte)) /* Nothing there either */
goto out;
*pte = pte_mkyoung(*pte); /* Just a page... */
pfn = pte_pfn(*pte);
pfn_valid = true;
out:
spin_unlock(&vcpu->kvm->mmu_lock);
if (pfn_valid)
kvm_set_pfn_accessed(pfn);
}
/** /**
* kvm_handle_guest_abort - handles all 2nd stage aborts * kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer * @vcpu: the VCPU pointer
...@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Check the stage-2 fault is trans. fault or write fault */ /* Check the stage-2 fault is trans. fault or write fault */
fault_status = kvm_vcpu_trap_get_fault_type(vcpu); fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu), kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu), (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
...@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Userspace should not be able to register out-of-bounds IPAs */ /* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
if (fault_status == FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
ret = 1;
goto out_unlock;
}
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0) if (ret == 0)
ret = 1; ret = 1;
...@@ -1408,15 +1456,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1408,15 +1456,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
return ret; return ret;
} }
static void handle_hva_to_gpa(struct kvm *kvm, static int handle_hva_to_gpa(struct kvm *kvm,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
void (*handler)(struct kvm *kvm, int (*handler)(struct kvm *kvm,
gpa_t gpa, void *data), gpa_t gpa, void *data),
void *data) void *data)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int ret = 0;
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
...@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm, ...@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
for (; gfn < gfn_end; ++gfn) { for (; gfn < gfn_end; ++gfn) {
gpa_t gpa = gfn << PAGE_SHIFT; gpa_t gpa = gfn << PAGE_SHIFT;
handler(kvm, gpa, data); ret |= handler(kvm, gpa, data);
} }
} }
return ret;
} }
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{ {
unmap_stage2_range(kvm, gpa, PAGE_SIZE); unmap_stage2_range(kvm, gpa, PAGE_SIZE);
return 0;
} }
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
...@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, ...@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0; return 0;
} }
static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{ {
pte_t *pte = (pte_t *)data; pte_t *pte = (pte_t *)data;
...@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) ...@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
* through this calling path. * through this calling path.
*/ */
stage2_set_pte(kvm, NULL, gpa, pte, 0); stage2_set_pte(kvm, NULL, gpa, pte, 0);
return 0;
} }
...@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
} }
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pmd_t *pmd;
pte_t *pte;
pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0;
if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
if (pmd_young(*pmd)) {
*pmd = pmd_mkold(*pmd);
return 1;
}
return 0;
}
pte = pte_offset_kernel(pmd, gpa);
if (pte_none(*pte))
return 0;
if (pte_young(*pte)) {
*pte = pte_mkold(*pte); /* Just a page... */
return 1;
}
return 0;
}
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pmd_t *pmd;
pte_t *pte;
pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0;
if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
return pmd_young(*pmd);
pte = pte_offset_kernel(pmd, gpa);
if (!pte_none(*pte)) /* Just a page... */
return pte_young(*pte);
return 0;
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
trace_kvm_age_hva(start, end);
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
trace_kvm_test_age_hva(hva);
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
}
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{ {
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
......
...@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault, ...@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
__entry->hxfar, __entry->vcpu_pc) __entry->hxfar, __entry->vcpu_pc)
); );
TRACE_EVENT(kvm_access_fault,
TP_PROTO(unsigned long ipa),
TP_ARGS(ipa),
TP_STRUCT__entry(
__field( unsigned long, ipa )
),
TP_fast_assign(
__entry->ipa = ipa;
),
TP_printk("IPA: %lx", __entry->ipa)
);
TRACE_EVENT(kvm_irq_line, TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level), TP_ARGS(type, vcpu_idx, irq_num, level),
...@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva, ...@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
); );
TRACE_EVENT(kvm_age_hva,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),
TP_STRUCT__entry(
__field( unsigned long, start )
__field( unsigned long, end )
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
),
TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
__entry->start, __entry->end)
);
TRACE_EVENT(kvm_test_age_hva,
TP_PROTO(unsigned long hva),
TP_ARGS(hva),
TP_STRUCT__entry(
__field( unsigned long, hva )
),
TP_fast_assign(
__entry->hva = hva;
),
TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
);
TRACE_EVENT(kvm_hvc, TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm), TP_ARGS(vcpu_pc, r0, imm),
......
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#define ESR_ELx_FSC (0x3F) #define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C) #define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10) #define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04) #define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C) #define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_CV (UL(1) << 24)
......
...@@ -188,6 +188,7 @@ ...@@ -188,6 +188,7 @@
/* For compatibility with fault code shared with 32-bit */ /* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT #define FSC_FAULT ESR_ELx_FSC_FAULT
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
#define FSC_PERM ESR_ELx_FSC_PERM #define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#if defined(CONFIG_KVM_ARM_MAX_VCPUS) #if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else #else
...@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); ...@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */ /* We do not have shadow page tables, hence the empty hooks */
static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
unsigned long end)
{
return 0;
}
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return 0;
}
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address) unsigned long address)
{ {
......
...@@ -31,28 +31,6 @@ struct kvm_decode { ...@@ -31,28 +31,6 @@ struct kvm_decode {
bool sign_extend; bool sign_extend;
}; };
/*
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
* which is an anonymous type. Use our own type instead.
*/
struct kvm_exit_mmio {
phys_addr_t phys_addr;
u8 data[8];
u32 len;
bool is_write;
void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
struct kvm_exit_mmio *mmio)
{
run->mmio.phys_addr = mmio->phys_addr;
run->mmio.len = mmio->len;
run->mmio.is_write = mmio->is_write;
memcpy(run->mmio.data, mmio->data, mmio->len);
run->exit_reason = KVM_EXIT_MMIO;
}
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa); phys_addr_t fault_ipa);
......
...@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot { ...@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */ /* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127 #define KVM_ARM_IRQ_GIC_MAX 127
/* One single KVM irqchip, ie. the VGIC */
#define KVM_NR_IRQCHIPS 1
/* PSCI interface */ /* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e #define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
......
...@@ -18,6 +18,7 @@ if VIRTUALIZATION ...@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM config KVM
bool "Kernel-based Virtual Machine (KVM) support" bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
select MMU_NOTIFIER select MMU_NOTIFIER
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
...@@ -25,10 +26,10 @@ config KVM ...@@ -25,10 +26,10 @@ config KVM
select HAVE_KVM_ARCH_TLB_FLUSH_ALL select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO select KVM_MMIO
select KVM_ARM_HOST select KVM_ARM_HOST
select KVM_ARM_VGIC
select KVM_ARM_TIMER
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU select SRCU
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
---help--- ---help---
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
...@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS ...@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to large, so only choose a reasonable number that you expect to
actually use. actually use.
config KVM_ARM_VGIC
bool
depends on KVM_ARM_HOST && OF
select HAVE_KVM_IRQCHIP
---help---
Adds support for a hardware assisted, in-kernel GIC emulation.
config KVM_ARM_TIMER
bool
depends on KVM_ARM_VGIC
---help---
Adds support for the Architected Timers in virtual machines.
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module # Makefile for Kernel-based Virtual Machine module
# #
ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm ccflags-y += -Iarch/arm64/kvm
CFLAGS_arm.o := -I. CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I. CFLAGS_mmu.o := -I.
...@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm ...@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
...@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o ...@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
...@@ -16,38 +16,38 @@ ...@@ -16,38 +16,38 @@
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
cfc1 \tmp, fcr31 cfc1 \tmp, fcr31
swc1 $f0, THREAD_FPR0_LS64(\thread) swc1 $f0, THREAD_FPR0(\thread)
swc1 $f1, THREAD_FPR1_LS64(\thread) swc1 $f1, THREAD_FPR1(\thread)
swc1 $f2, THREAD_FPR2_LS64(\thread) swc1 $f2, THREAD_FPR2(\thread)
swc1 $f3, THREAD_FPR3_LS64(\thread) swc1 $f3, THREAD_FPR3(\thread)
swc1 $f4, THREAD_FPR4_LS64(\thread) swc1 $f4, THREAD_FPR4(\thread)
swc1 $f5, THREAD_FPR5_LS64(\thread) swc1 $f5, THREAD_FPR5(\thread)
swc1 $f6, THREAD_FPR6_LS64(\thread) swc1 $f6, THREAD_FPR6(\thread)
swc1 $f7, THREAD_FPR7_LS64(\thread) swc1 $f7, THREAD_FPR7(\thread)
swc1 $f8, THREAD_FPR8_LS64(\thread) swc1 $f8, THREAD_FPR8(\thread)
swc1 $f9, THREAD_FPR9_LS64(\thread) swc1 $f9, THREAD_FPR9(\thread)
swc1 $f10, THREAD_FPR10_LS64(\thread) swc1 $f10, THREAD_FPR10(\thread)
swc1 $f11, THREAD_FPR11_LS64(\thread) swc1 $f11, THREAD_FPR11(\thread)
swc1 $f12, THREAD_FPR12_LS64(\thread) swc1 $f12, THREAD_FPR12(\thread)
swc1 $f13, THREAD_FPR13_LS64(\thread) swc1 $f13, THREAD_FPR13(\thread)
swc1 $f14, THREAD_FPR14_LS64(\thread) swc1 $f14, THREAD_FPR14(\thread)
swc1 $f15, THREAD_FPR15_LS64(\thread) swc1 $f15, THREAD_FPR15(\thread)
swc1 $f16, THREAD_FPR16_LS64(\thread) swc1 $f16, THREAD_FPR16(\thread)
swc1 $f17, THREAD_FPR17_LS64(\thread) swc1 $f17, THREAD_FPR17(\thread)
swc1 $f18, THREAD_FPR18_LS64(\thread) swc1 $f18, THREAD_FPR18(\thread)
swc1 $f19, THREAD_FPR19_LS64(\thread) swc1 $f19, THREAD_FPR19(\thread)
swc1 $f20, THREAD_FPR20_LS64(\thread) swc1 $f20, THREAD_FPR20(\thread)
swc1 $f21, THREAD_FPR21_LS64(\thread) swc1 $f21, THREAD_FPR21(\thread)
swc1 $f22, THREAD_FPR22_LS64(\thread) swc1 $f22, THREAD_FPR22(\thread)
swc1 $f23, THREAD_FPR23_LS64(\thread) swc1 $f23, THREAD_FPR23(\thread)
swc1 $f24, THREAD_FPR24_LS64(\thread) swc1 $f24, THREAD_FPR24(\thread)
swc1 $f25, THREAD_FPR25_LS64(\thread) swc1 $f25, THREAD_FPR25(\thread)
swc1 $f26, THREAD_FPR26_LS64(\thread) swc1 $f26, THREAD_FPR26(\thread)
swc1 $f27, THREAD_FPR27_LS64(\thread) swc1 $f27, THREAD_FPR27(\thread)
swc1 $f28, THREAD_FPR28_LS64(\thread) swc1 $f28, THREAD_FPR28(\thread)
swc1 $f29, THREAD_FPR29_LS64(\thread) swc1 $f29, THREAD_FPR29(\thread)
swc1 $f30, THREAD_FPR30_LS64(\thread) swc1 $f30, THREAD_FPR30(\thread)
swc1 $f31, THREAD_FPR31_LS64(\thread) swc1 $f31, THREAD_FPR31(\thread)
sw \tmp, THREAD_FCR31(\thread) sw \tmp, THREAD_FCR31(\thread)
.set pop .set pop
.endm .endm
...@@ -56,38 +56,38 @@ ...@@ -56,38 +56,38 @@
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread) lw \tmp, THREAD_FCR31(\thread)
lwc1 $f0, THREAD_FPR0_LS64(\thread) lwc1 $f0, THREAD_FPR0(\thread)
lwc1 $f1, THREAD_FPR1_LS64(\thread) lwc1 $f1, THREAD_FPR1(\thread)
lwc1 $f2, THREAD_FPR2_LS64(\thread) lwc1 $f2, THREAD_FPR2(\thread)
lwc1 $f3, THREAD_FPR3_LS64(\thread) lwc1 $f3, THREAD_FPR3(\thread)
lwc1 $f4, THREAD_FPR4_LS64(\thread) lwc1 $f4, THREAD_FPR4(\thread)
lwc1 $f5, THREAD_FPR5_LS64(\thread) lwc1 $f5, THREAD_FPR5(\thread)
lwc1 $f6, THREAD_FPR6_LS64(\thread) lwc1 $f6, THREAD_FPR6(\thread)
lwc1 $f7, THREAD_FPR7_LS64(\thread) lwc1 $f7, THREAD_FPR7(\thread)
lwc1 $f8, THREAD_FPR8_LS64(\thread) lwc1 $f8, THREAD_FPR8(\thread)
lwc1 $f9, THREAD_FPR9_LS64(\thread) lwc1 $f9, THREAD_FPR9(\thread)
lwc1 $f10, THREAD_FPR10_LS64(\thread) lwc1 $f10, THREAD_FPR10(\thread)
lwc1 $f11, THREAD_FPR11_LS64(\thread) lwc1 $f11, THREAD_FPR11(\thread)
lwc1 $f12, THREAD_FPR12_LS64(\thread) lwc1 $f12, THREAD_FPR12(\thread)
lwc1 $f13, THREAD_FPR13_LS64(\thread) lwc1 $f13, THREAD_FPR13(\thread)
lwc1 $f14, THREAD_FPR14_LS64(\thread) lwc1 $f14, THREAD_FPR14(\thread)
lwc1 $f15, THREAD_FPR15_LS64(\thread) lwc1 $f15, THREAD_FPR15(\thread)
lwc1 $f16, THREAD_FPR16_LS64(\thread) lwc1 $f16, THREAD_FPR16(\thread)
lwc1 $f17, THREAD_FPR17_LS64(\thread) lwc1 $f17, THREAD_FPR17(\thread)
lwc1 $f18, THREAD_FPR18_LS64(\thread) lwc1 $f18, THREAD_FPR18(\thread)
lwc1 $f19, THREAD_FPR19_LS64(\thread) lwc1 $f19, THREAD_FPR19(\thread)
lwc1 $f20, THREAD_FPR20_LS64(\thread) lwc1 $f20, THREAD_FPR20(\thread)
lwc1 $f21, THREAD_FPR21_LS64(\thread) lwc1 $f21, THREAD_FPR21(\thread)
lwc1 $f22, THREAD_FPR22_LS64(\thread) lwc1 $f22, THREAD_FPR22(\thread)
lwc1 $f23, THREAD_FPR23_LS64(\thread) lwc1 $f23, THREAD_FPR23(\thread)
lwc1 $f24, THREAD_FPR24_LS64(\thread) lwc1 $f24, THREAD_FPR24(\thread)
lwc1 $f25, THREAD_FPR25_LS64(\thread) lwc1 $f25, THREAD_FPR25(\thread)
lwc1 $f26, THREAD_FPR26_LS64(\thread) lwc1 $f26, THREAD_FPR26(\thread)
lwc1 $f27, THREAD_FPR27_LS64(\thread) lwc1 $f27, THREAD_FPR27(\thread)
lwc1 $f28, THREAD_FPR28_LS64(\thread) lwc1 $f28, THREAD_FPR28(\thread)
lwc1 $f29, THREAD_FPR29_LS64(\thread) lwc1 $f29, THREAD_FPR29(\thread)
lwc1 $f30, THREAD_FPR30_LS64(\thread) lwc1 $f30, THREAD_FPR30(\thread)
lwc1 $f31, THREAD_FPR31_LS64(\thread) lwc1 $f31, THREAD_FPR31(\thread)
ctc1 \tmp, fcr31 ctc1 \tmp, fcr31
.set pop .set pop
.endm .endm
......
...@@ -60,22 +60,22 @@ ...@@ -60,22 +60,22 @@
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
cfc1 \tmp, fcr31 cfc1 \tmp, fcr31
sdc1 $f0, THREAD_FPR0_LS64(\thread) sdc1 $f0, THREAD_FPR0(\thread)
sdc1 $f2, THREAD_FPR2_LS64(\thread) sdc1 $f2, THREAD_FPR2(\thread)
sdc1 $f4, THREAD_FPR4_LS64(\thread) sdc1 $f4, THREAD_FPR4(\thread)
sdc1 $f6, THREAD_FPR6_LS64(\thread) sdc1 $f6, THREAD_FPR6(\thread)
sdc1 $f8, THREAD_FPR8_LS64(\thread) sdc1 $f8, THREAD_FPR8(\thread)
sdc1 $f10, THREAD_FPR10_LS64(\thread) sdc1 $f10, THREAD_FPR10(\thread)
sdc1 $f12, THREAD_FPR12_LS64(\thread) sdc1 $f12, THREAD_FPR12(\thread)
sdc1 $f14, THREAD_FPR14_LS64(\thread) sdc1 $f14, THREAD_FPR14(\thread)
sdc1 $f16, THREAD_FPR16_LS64(\thread) sdc1 $f16, THREAD_FPR16(\thread)
sdc1 $f18, THREAD_FPR18_LS64(\thread) sdc1 $f18, THREAD_FPR18(\thread)
sdc1 $f20, THREAD_FPR20_LS64(\thread) sdc1 $f20, THREAD_FPR20(\thread)
sdc1 $f22, THREAD_FPR22_LS64(\thread) sdc1 $f22, THREAD_FPR22(\thread)
sdc1 $f24, THREAD_FPR24_LS64(\thread) sdc1 $f24, THREAD_FPR24(\thread)
sdc1 $f26, THREAD_FPR26_LS64(\thread) sdc1 $f26, THREAD_FPR26(\thread)
sdc1 $f28, THREAD_FPR28_LS64(\thread) sdc1 $f28, THREAD_FPR28(\thread)
sdc1 $f30, THREAD_FPR30_LS64(\thread) sdc1 $f30, THREAD_FPR30(\thread)
sw \tmp, THREAD_FCR31(\thread) sw \tmp, THREAD_FCR31(\thread)
.set pop .set pop
.endm .endm
...@@ -84,22 +84,22 @@ ...@@ -84,22 +84,22 @@
.set push .set push
.set mips64r2 .set mips64r2
SET_HARDFLOAT SET_HARDFLOAT
sdc1 $f1, THREAD_FPR1_LS64(\thread) sdc1 $f1, THREAD_FPR1(\thread)
sdc1 $f3, THREAD_FPR3_LS64(\thread) sdc1 $f3, THREAD_FPR3(\thread)
sdc1 $f5, THREAD_FPR5_LS64(\thread) sdc1 $f5, THREAD_FPR5(\thread)
sdc1 $f7, THREAD_FPR7_LS64(\thread) sdc1 $f7, THREAD_FPR7(\thread)
sdc1 $f9, THREAD_FPR9_LS64(\thread) sdc1 $f9, THREAD_FPR9(\thread)
sdc1 $f11, THREAD_FPR11_LS64(\thread) sdc1 $f11, THREAD_FPR11(\thread)
sdc1 $f13, THREAD_FPR13_LS64(\thread) sdc1 $f13, THREAD_FPR13(\thread)
sdc1 $f15, THREAD_FPR15_LS64(\thread) sdc1 $f15, THREAD_FPR15(\thread)
sdc1 $f17, THREAD_FPR17_LS64(\thread) sdc1 $f17, THREAD_FPR17(\thread)
sdc1 $f19, THREAD_FPR19_LS64(\thread) sdc1 $f19, THREAD_FPR19(\thread)
sdc1 $f21, THREAD_FPR21_LS64(\thread) sdc1 $f21, THREAD_FPR21(\thread)
sdc1 $f23, THREAD_FPR23_LS64(\thread) sdc1 $f23, THREAD_FPR23(\thread)
sdc1 $f25, THREAD_FPR25_LS64(\thread) sdc1 $f25, THREAD_FPR25(\thread)
sdc1 $f27, THREAD_FPR27_LS64(\thread) sdc1 $f27, THREAD_FPR27(\thread)
sdc1 $f29, THREAD_FPR29_LS64(\thread) sdc1 $f29, THREAD_FPR29(\thread)
sdc1 $f31, THREAD_FPR31_LS64(\thread) sdc1 $f31, THREAD_FPR31(\thread)
.set pop .set pop
.endm .endm
...@@ -118,22 +118,22 @@ ...@@ -118,22 +118,22 @@
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread) lw \tmp, THREAD_FCR31(\thread)
ldc1 $f0, THREAD_FPR0_LS64(\thread) ldc1 $f0, THREAD_FPR0(\thread)
ldc1 $f2, THREAD_FPR2_LS64(\thread) ldc1 $f2, THREAD_FPR2(\thread)
ldc1 $f4, THREAD_FPR4_LS64(\thread) ldc1 $f4, THREAD_FPR4(\thread)
ldc1 $f6, THREAD_FPR6_LS64(\thread) ldc1 $f6, THREAD_FPR6(\thread)
ldc1 $f8, THREAD_FPR8_LS64(\thread) ldc1 $f8, THREAD_FPR8(\thread)
ldc1 $f10, THREAD_FPR10_LS64(\thread) ldc1 $f10, THREAD_FPR10(\thread)
ldc1 $f12, THREAD_FPR12_LS64(\thread) ldc1 $f12, THREAD_FPR12(\thread)
ldc1 $f14, THREAD_FPR14_LS64(\thread) ldc1 $f14, THREAD_FPR14(\thread)
ldc1 $f16, THREAD_FPR16_LS64(\thread) ldc1 $f16, THREAD_FPR16(\thread)
ldc1 $f18, THREAD_FPR18_LS64(\thread) ldc1 $f18, THREAD_FPR18(\thread)
ldc1 $f20, THREAD_FPR20_LS64(\thread) ldc1 $f20, THREAD_FPR20(\thread)
ldc1 $f22, THREAD_FPR22_LS64(\thread) ldc1 $f22, THREAD_FPR22(\thread)
ldc1 $f24, THREAD_FPR24_LS64(\thread) ldc1 $f24, THREAD_FPR24(\thread)
ldc1 $f26, THREAD_FPR26_LS64(\thread) ldc1 $f26, THREAD_FPR26(\thread)
ldc1 $f28, THREAD_FPR28_LS64(\thread) ldc1 $f28, THREAD_FPR28(\thread)
ldc1 $f30, THREAD_FPR30_LS64(\thread) ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31 ctc1 \tmp, fcr31
.endm .endm
...@@ -141,22 +141,22 @@ ...@@ -141,22 +141,22 @@
.set push .set push
.set mips64r2 .set mips64r2
SET_HARDFLOAT SET_HARDFLOAT
ldc1 $f1, THREAD_FPR1_LS64(\thread) ldc1 $f1, THREAD_FPR1(\thread)
ldc1 $f3, THREAD_FPR3_LS64(\thread) ldc1 $f3, THREAD_FPR3(\thread)
ldc1 $f5, THREAD_FPR5_LS64(\thread) ldc1 $f5, THREAD_FPR5(\thread)
ldc1 $f7, THREAD_FPR7_LS64(\thread) ldc1 $f7, THREAD_FPR7(\thread)
ldc1 $f9, THREAD_FPR9_LS64(\thread) ldc1 $f9, THREAD_FPR9(\thread)
ldc1 $f11, THREAD_FPR11_LS64(\thread) ldc1 $f11, THREAD_FPR11(\thread)
ldc1 $f13, THREAD_FPR13_LS64(\thread) ldc1 $f13, THREAD_FPR13(\thread)
ldc1 $f15, THREAD_FPR15_LS64(\thread) ldc1 $f15, THREAD_FPR15(\thread)
ldc1 $f17, THREAD_FPR17_LS64(\thread) ldc1 $f17, THREAD_FPR17(\thread)
ldc1 $f19, THREAD_FPR19_LS64(\thread) ldc1 $f19, THREAD_FPR19(\thread)
ldc1 $f21, THREAD_FPR21_LS64(\thread) ldc1 $f21, THREAD_FPR21(\thread)
ldc1 $f23, THREAD_FPR23_LS64(\thread) ldc1 $f23, THREAD_FPR23(\thread)
ldc1 $f25, THREAD_FPR25_LS64(\thread) ldc1 $f25, THREAD_FPR25(\thread)
ldc1 $f27, THREAD_FPR27_LS64(\thread) ldc1 $f27, THREAD_FPR27(\thread)
ldc1 $f29, THREAD_FPR29_LS64(\thread) ldc1 $f29, THREAD_FPR29(\thread)
ldc1 $f31, THREAD_FPR31_LS64(\thread) ldc1 $f31, THREAD_FPR31(\thread)
.set pop .set pop
.endm .endm
...@@ -211,6 +211,22 @@ ...@@ -211,6 +211,22 @@
.endm .endm
#ifdef TOOLCHAIN_SUPPORTS_MSA #ifdef TOOLCHAIN_SUPPORTS_MSA
.macro _cfcmsa rd, cs
.set push
.set mips32r2
.set msa
cfcmsa \rd, $\cs
.set pop
.endm
.macro _ctcmsa cd, rs
.set push
.set mips32r2
.set msa
ctcmsa $\cd, \rs
.set pop
.endm
.macro ld_d wd, off, base .macro ld_d wd, off, base
.set push .set push
.set mips32r2 .set mips32r2
...@@ -227,35 +243,35 @@ ...@@ -227,35 +243,35 @@
.set pop .set pop
.endm .endm
.macro copy_u_w rd, ws, n .macro copy_u_w ws, n
.set push .set push
.set mips32r2 .set mips32r2
.set msa .set msa
copy_u.w \rd, $w\ws[\n] copy_u.w $1, $w\ws[\n]
.set pop .set pop
.endm .endm
.macro copy_u_d rd, ws, n .macro copy_u_d ws, n
.set push .set push
.set mips64r2 .set mips64r2
.set msa .set msa
copy_u.d \rd, $w\ws[\n] copy_u.d $1, $w\ws[\n]
.set pop .set pop
.endm .endm
.macro insert_w wd, n, rs .macro insert_w wd, n
.set push .set push
.set mips32r2 .set mips32r2
.set msa .set msa
insert.w $w\wd[\n], \rs insert.w $w\wd[\n], $1
.set pop .set pop
.endm .endm
.macro insert_d wd, n, rs .macro insert_d wd, n
.set push .set push
.set mips64r2 .set mips64r2
.set msa .set msa
insert.d $w\wd[\n], \rs insert.d $w\wd[\n], $1
.set pop .set pop
.endm .endm
#else #else
...@@ -283,7 +299,7 @@ ...@@ -283,7 +299,7 @@
/* /*
* Temporary until all toolchains in use include MSA support. * Temporary until all toolchains in use include MSA support.
*/ */
.macro cfcmsa rd, cs .macro _cfcmsa rd, cs
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
...@@ -293,7 +309,7 @@ ...@@ -293,7 +309,7 @@
.set pop .set pop
.endm .endm
.macro ctcmsa cd, rs .macro _ctcmsa cd, rs
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
...@@ -320,44 +336,36 @@ ...@@ -320,44 +336,36 @@
.set pop .set pop
.endm .endm
.macro copy_u_w rd, ws, n .macro copy_u_w ws, n
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
.insn .insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop .set pop
.endm .endm
.macro copy_u_d rd, ws, n .macro copy_u_d ws, n
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
.insn .insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop .set pop
.endm .endm
.macro insert_w wd, n, rs .macro insert_w wd, n
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop .set pop
.endm .endm
.macro insert_d wd, n, rs .macro insert_d wd, n
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop .set pop
.endm .endm
...@@ -399,7 +407,7 @@ ...@@ -399,7 +407,7 @@
.set push .set push
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
cfcmsa $1, MSA_CSR _cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread) sw $1, THREAD_MSA_CSR(\thread)
.set pop .set pop
.endm .endm
...@@ -409,7 +417,7 @@ ...@@ -409,7 +417,7 @@
.set noat .set noat
SET_HARDFLOAT SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread) lw $1, THREAD_MSA_CSR(\thread)
ctcmsa MSA_CSR, $1 _ctcmsa MSA_CSR, $1
.set pop .set pop
ld_d 0, THREAD_FPR0, \thread ld_d 0, THREAD_FPR0, \thread
ld_d 1, THREAD_FPR1, \thread ld_d 1, THREAD_FPR1, \thread
...@@ -452,9 +460,6 @@ ...@@ -452,9 +460,6 @@
insert_w \wd, 2 insert_w \wd, 2
insert_w \wd, 3 insert_w \wd, 3
#endif #endif
.if 31-\wd
msa_init_upper (\wd+1)
.endif
.endm .endm
.macro msa_init_all_upper .macro msa_init_all_upper
...@@ -463,6 +468,37 @@ ...@@ -463,6 +468,37 @@
SET_HARDFLOAT SET_HARDFLOAT
not $1, zero not $1, zero
msa_init_upper 0 msa_init_upper 0
msa_init_upper 1
msa_init_upper 2
msa_init_upper 3
msa_init_upper 4
msa_init_upper 5
msa_init_upper 6
msa_init_upper 7
msa_init_upper 8
msa_init_upper 9
msa_init_upper 10
msa_init_upper 11
msa_init_upper 12
msa_init_upper 13
msa_init_upper 14
msa_init_upper 15
msa_init_upper 16
msa_init_upper 17
msa_init_upper 18
msa_init_upper 19
msa_init_upper 20
msa_init_upper 21
msa_init_upper 22
msa_init_upper 23
msa_init_upper 24
msa_init_upper 25
msa_init_upper 26
msa_init_upper 27
msa_init_upper 28
msa_init_upper 29
msa_init_upper 30
msa_init_upper 31
.set pop .set pop
.endm .endm
......
...@@ -48,6 +48,12 @@ enum fpu_mode { ...@@ -48,6 +48,12 @@ enum fpu_mode {
#define FPU_FR_MASK 0x1 #define FPU_FR_MASK 0x1
}; };
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
disable_fpu_hazard(); \
} while (0)
static inline int __enable_fpu(enum fpu_mode mode) static inline int __enable_fpu(enum fpu_mode mode)
{ {
int fr; int fr;
...@@ -86,7 +92,12 @@ static inline int __enable_fpu(enum fpu_mode mode) ...@@ -86,7 +92,12 @@ static inline int __enable_fpu(enum fpu_mode mode)
enable_fpu_hazard(); enable_fpu_hazard();
/* check FR has the desired value */ /* check FR has the desired value */
return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE; if (!!(read_c0_status() & ST0_FR) == !!fr)
return 0;
/* unsupported FR value */
__disable_fpu();
return SIGFPE;
default: default:
BUG(); BUG();
...@@ -95,12 +106,6 @@ static inline int __enable_fpu(enum fpu_mode mode) ...@@ -95,12 +106,6 @@ static inline int __enable_fpu(enum fpu_mode mode)
return SIGFPE; return SIGFPE;
} }
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
disable_fpu_hazard(); \
} while (0)
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void) static inline int __is_fpu_owner(void)
...@@ -170,6 +175,7 @@ static inline void lose_fpu(int save) ...@@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
} }
disable_msa(); disable_msa();
clear_thread_flag(TIF_USEDMSA); clear_thread_flag(TIF_USEDMSA);
__disable_fpu();
} else if (is_fpu_owner()) { } else if (is_fpu_owner()) {
if (save) if (save)
_save_fp(current); _save_fp(current);
......
...@@ -10,7 +10,8 @@ enum die_val { ...@@ -10,7 +10,8 @@ enum die_val {
DIE_RI, DIE_RI,
DIE_PAGE_FAULT, DIE_PAGE_FAULT,
DIE_BREAK, DIE_BREAK,
DIE_SSTEPBP DIE_SSTEPBP,
DIE_MSAFP
}; };
#endif /* _ASM_MIPS_KDEBUG_H */ #endif /* _ASM_MIPS_KDEBUG_H */
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
/* MIPS KVM register ids */ /* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \ #define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
#define MIPS_CP0_64(_R, _S) \ #define MIPS_CP0_64(_R, _S) \
(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
...@@ -42,11 +42,14 @@ ...@@ -42,11 +42,14 @@
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
...@@ -119,6 +122,10 @@ struct kvm_vcpu_stat { ...@@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
u32 syscall_exits; u32 syscall_exits;
u32 resvd_inst_exits; u32 resvd_inst_exits;
u32 break_inst_exits; u32 break_inst_exits;
u32 trap_inst_exits;
u32 msa_fpe_exits;
u32 fpe_exits;
u32 msa_disabled_exits;
u32 flush_dcache_exits; u32 flush_dcache_exits;
u32 halt_successful_poll; u32 halt_successful_poll;
u32 halt_wakeup; u32 halt_wakeup;
...@@ -138,6 +145,10 @@ enum kvm_mips_exit_types { ...@@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
SYSCALL_EXITS, SYSCALL_EXITS,
RESVD_INST_EXITS, RESVD_INST_EXITS,
BREAK_INST_EXITS, BREAK_INST_EXITS,
TRAP_INST_EXITS,
MSA_FPE_EXITS,
FPE_EXITS,
MSA_DISABLED_EXITS,
FLUSH_DCACHE_EXITS, FLUSH_DCACHE_EXITS,
MAX_KVM_MIPS_EXIT_TYPES MAX_KVM_MIPS_EXIT_TYPES
}; };
...@@ -206,6 +217,8 @@ struct mips_coproc { ...@@ -206,6 +217,8 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG1_SEL 1 #define MIPS_CP0_CONFIG1_SEL 1
#define MIPS_CP0_CONFIG2_SEL 2 #define MIPS_CP0_CONFIG2_SEL 2
#define MIPS_CP0_CONFIG3_SEL 3 #define MIPS_CP0_CONFIG3_SEL 3
#define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5
/* Config0 register bits */ /* Config0 register bits */
#define CP0C0_M 31 #define CP0C0_M 31
...@@ -262,31 +275,6 @@ struct mips_coproc { ...@@ -262,31 +275,6 @@ struct mips_coproc {
#define CP0C3_SM 1 #define CP0C3_SM 1
#define CP0C3_TL 0 #define CP0C3_TL 0
/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
#define MIPS_CONFIG0 \
((1 << CP0C0_M) | (0x3 << CP0C0_K0))
/* Have config2, no coprocessor2 attached, no MDMX support attached,
no performance counters, watch registers present,
no code compression, EJTAG present, no FPU, no watch registers */
#define MIPS_CONFIG1 \
((1 << CP0C1_M) | \
(0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
(0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
(0 << CP0C1_FP))
/* Have config3, no tertiary/secondary caches implemented */
#define MIPS_CONFIG2 \
((1 << CP0C2_M))
/* No config4, no DSP ASE, no large physaddr (PABITS),
no external interrupt controller, no vectored interrupts,
no 1kb pages, no SmartMIPS ASE, no trace logic */
#define MIPS_CONFIG3 \
((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
(0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
(0 << CP0C3_SM) | (0 << CP0C3_TL))
/* MMU types, the first four entries have the same layout as the /* MMU types, the first four entries have the same layout as the
CP0C0_MT field. */ CP0C0_MT field. */
enum mips_mmu_types { enum mips_mmu_types {
...@@ -321,7 +309,9 @@ enum mips_mmu_types { ...@@ -321,7 +309,9 @@ enum mips_mmu_types {
*/ */
#define T_TRAP 13 /* Trap instruction */ #define T_TRAP 13 /* Trap instruction */
#define T_VCEI 14 /* Virtual coherency exception */ #define T_VCEI 14 /* Virtual coherency exception */
#define T_MSAFPE 14 /* MSA floating point exception */
#define T_FPE 15 /* Floating point exception */ #define T_FPE 15 /* Floating point exception */
#define T_MSADIS 21 /* MSA disabled exception */
#define T_WATCH 23 /* Watch address reference */ #define T_WATCH 23 /* Watch address reference */
#define T_VCED 31 /* Virtual coherency data */ #define T_VCED 31 /* Virtual coherency data */
...@@ -374,6 +364,9 @@ struct kvm_mips_tlb { ...@@ -374,6 +364,9 @@ struct kvm_mips_tlb {
long tlb_lo1; long tlb_lo1;
}; };
#define KVM_MIPS_FPU_FPU 0x1
#define KVM_MIPS_FPU_MSA 0x2
#define KVM_MIPS_GUEST_TLB_SIZE 64 #define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase; void *host_ebase, *guest_ebase;
...@@ -395,6 +388,8 @@ struct kvm_vcpu_arch { ...@@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
/* FPU State */ /* FPU State */
struct mips_fpu_struct fpu; struct mips_fpu_struct fpu;
/* Which FPU state is loaded (KVM_MIPS_FPU_*) */
unsigned int fpu_inuse;
/* COP0 State */ /* COP0 State */
struct mips_coproc *cop0; struct mips_coproc *cop0;
...@@ -441,6 +436,9 @@ struct kvm_vcpu_arch { ...@@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
/* WAIT executed */ /* WAIT executed */
int wait; int wait;
u8 fpu_enabled;
u8 msa_enabled;
}; };
...@@ -482,11 +480,15 @@ struct kvm_vcpu_arch { ...@@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
...@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, ...@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
} }
/* Helpers */
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
{
return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
vcpu->fpu_enabled;
}
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_fpu(vcpu) &&
kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
}
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
{
return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
vcpu->msa_enabled;
}
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_msa(vcpu) &&
kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
}
struct kvm_mips_callbacks { struct kvm_mips_callbacks {
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
...@@ -578,6 +605,10 @@ struct kvm_mips_callbacks { ...@@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
int (*handle_syscall)(struct kvm_vcpu *vcpu); int (*handle_syscall)(struct kvm_vcpu *vcpu);
int (*handle_res_inst)(struct kvm_vcpu *vcpu); int (*handle_res_inst)(struct kvm_vcpu *vcpu);
int (*handle_break)(struct kvm_vcpu *vcpu); int (*handle_break)(struct kvm_vcpu *vcpu);
int (*handle_trap)(struct kvm_vcpu *vcpu);
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
int (*handle_fpe)(struct kvm_vcpu *vcpu);
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm); int (*vm_init)(struct kvm *kvm);
int (*vcpu_init)(struct kvm_vcpu *vcpu); int (*vcpu_init)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu); int (*vcpu_setup)(struct kvm_vcpu *vcpu);
...@@ -596,6 +627,8 @@ struct kvm_mips_callbacks { ...@@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 *v); const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu, int (*set_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 v); const struct kvm_one_reg *reg, s64 v);
int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
}; };
extern struct kvm_mips_callbacks *kvm_mips_callbacks; extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
...@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); ...@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* Trampoline ASM routine to start running in "Guest" context */ /* Trampoline ASM routine to start running in "Guest" context */
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
/* FPU/MSA context management */
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
void kvm_own_fpu(struct kvm_vcpu *vcpu);
void kvm_own_msa(struct kvm_vcpu *vcpu);
void kvm_drop_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
/* TLB handling */ /* TLB handling */
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
...@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, ...@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run); struct kvm_run *run);
...@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, ...@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */ /* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
......
...@@ -105,7 +105,7 @@ union fpureg { ...@@ -105,7 +105,7 @@ union fpureg {
#ifdef CONFIG_CPU_LITTLE_ENDIAN #ifdef CONFIG_CPU_LITTLE_ENDIAN
# define FPR_IDX(width, idx) (idx) # define FPR_IDX(width, idx) (idx)
#else #else
# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx)) # define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
#endif #endif
#define BUILD_FPR_ACCESS(width) \ #define BUILD_FPR_ACCESS(width) \
......
...@@ -36,77 +36,85 @@ struct kvm_regs { ...@@ -36,77 +36,85 @@ struct kvm_regs {
/* /*
* for KVM_GET_FPU and KVM_SET_FPU * for KVM_GET_FPU and KVM_SET_FPU
*
* If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
* are zero filled.
*/ */
struct kvm_fpu { struct kvm_fpu {
__u64 fpr[32];
__u32 fir;
__u32 fccr;
__u32 fexr;
__u32 fenr;
__u32 fcsr;
__u32 pad;
}; };
/* /*
* For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows: * registers. The id field is broken down as follows:
* *
* bits[2..0] - Register 'sel' index.
* bits[7..3] - Register 'rd' index.
* bits[15..8] - Must be zero.
* bits[31..16] - 1 -> CP0 registers.
* bits[51..32] - Must be zero.
* bits[63..52] - As per linux/kvm.h * bits[63..52] - As per linux/kvm.h
* bits[51..32] - Must be zero.
* bits[31..16] - Register set.
*
* Register set = 0: GP registers from kvm_regs (see definitions below).
*
* Register set = 1: CP0 registers.
* bits[15..8] - Must be zero.
* bits[7..3] - Register 'rd' index.
* bits[2..0] - Register 'sel' index.
*
* Register set = 2: KVM specific registers (see definitions below).
*
* Register set = 3: FPU / MSA registers (see definitions below).
* *
* Other sets registers may be added in the future. Each set would * Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16]. * have its own identifier in bits[31..16].
*
* The registers defined in struct kvm_regs are also accessible, the
* id values for these are below.
*/ */
#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) #define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) #define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) #define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) #define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) /*
#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) */
#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) #define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) #define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) #define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) #define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) #define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) #define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) #define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) #define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) #define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) #define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) #define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) #define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) #define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) #define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) #define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) #define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) #define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) #define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) #define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) #define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) #define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) #define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) #define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) #define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) #define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
/* KVM specific control registers */ #define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
/*
* KVM_REG_MIPS_KVM - KVM specific control registers.
*/
/* /*
* CP0_Count control * CP0_Count control
...@@ -118,8 +126,7 @@ struct kvm_fpu { ...@@ -118,8 +126,7 @@ struct kvm_fpu {
* safely without losing time or guest timer interrupts. * safely without losing time or guest timer interrupts.
* Other: Reserved, do not change. * Other: Reserved, do not change.
*/ */
#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ #define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
0x20000 | 0)
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
/* /*
...@@ -131,15 +138,46 @@ struct kvm_fpu { ...@@ -131,15 +138,46 @@ struct kvm_fpu {
* emulated. * emulated.
* Modifications to times in the future are rejected. * Modifications to times in the future are rejected.
*/ */
#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ #define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
0x20000 | 1)
/* /*
* CP0_Count rate in Hz * CP0_Count rate in Hz
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
* discontinuities in CP0_Count. * discontinuities in CP0_Count.
*/ */
#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ #define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
0x20000 | 2)
/*
* KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
*
* bits[15..8] - Register subset (see definitions below).
* bits[7..5] - Must be zero.
* bits[4..0] - Register number within register subset.
*/
#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
/*
* KVM_REG_MIPS_FPR - Floating point / Vector registers.
*/
#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
/*
* KVM_REG_MIPS_FCR - Floating point control registers.
*/
#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
/*
* KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
*/
#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
/* /*
* KVM MIPS specific structures and definitions * KVM MIPS specific structures and definitions
......
...@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void) ...@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
/* the least significant 64 bits of each FP register */
OFFSET(THREAD_FPR0_LS64, task_struct,
thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR1_LS64, task_struct,
thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR2_LS64, task_struct,
thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR3_LS64, task_struct,
thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR4_LS64, task_struct,
thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR5_LS64, task_struct,
thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR6_LS64, task_struct,
thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR7_LS64, task_struct,
thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR8_LS64, task_struct,
thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR9_LS64, task_struct,
thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR10_LS64, task_struct,
thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR11_LS64, task_struct,
thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR12_LS64, task_struct,
thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR13_LS64, task_struct,
thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR14_LS64, task_struct,
thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR15_LS64, task_struct,
thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR16_LS64, task_struct,
thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR17_LS64, task_struct,
thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR18_LS64, task_struct,
thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR19_LS64, task_struct,
thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR20_LS64, task_struct,
thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR21_LS64, task_struct,
thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR22_LS64, task_struct,
thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR23_LS64, task_struct,
thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR24_LS64, task_struct,
thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR25_LS64, task_struct,
thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR26_LS64, task_struct,
thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR27_LS64, task_struct,
thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR28_LS64, task_struct,
thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR29_LS64, task_struct,
thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR30_LS64, task_struct,
thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR31_LS64, task_struct,
thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK(); BLANK();
...@@ -470,6 +404,45 @@ void output_kvm_defines(void) ...@@ -470,6 +404,45 @@ void output_kvm_defines(void)
OFFSET(VCPU_LO, kvm_vcpu_arch, lo); OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
OFFSET(VCPU_HI, kvm_vcpu_arch, hi); OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
OFFSET(VCPU_PC, kvm_vcpu_arch, pc); OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
BLANK();
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
......
...@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp) ...@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set mips1 .set mips1
SET_HARDFLOAT SET_HARDFLOAT
cfc1 a1, fcr31 cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
ctc1 a2, fcr31
.set pop .set pop
TRACE_IRQS_ON CLI
STI TRACE_IRQS_OFF
.endm
.macro __build_clear_msa_fpe
_cfcmsa a1, MSA_CSR
CLI
TRACE_IRQS_OFF
.endm .endm
.macro __build_clear_ade .macro __build_clear_ade
...@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp) ...@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER tr tr sti silent /* #13 */
BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */ BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */ BUILD_HANDLER msa msa sti silent /* #21 */
......
...@@ -46,6 +46,26 @@ ...@@ -46,6 +46,26 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
static void init_fp_ctx(struct task_struct *target)
{
/* If FP has been used then the target already has context */
if (tsk_used_math(target))
return;
/* Begin with data registers set to all 1s... */
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
/* ...and FCSR zeroed */
target->thread.fpu.fcr31 = 0;
/*
* Record that the target has "used" math, such that the context
* just initialised, and any modifications made by the caller,
* aren't discarded.
*/
set_stopped_child_used_math(target);
}
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching..
* *
...@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) ...@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
if (!access_ok(VERIFY_READ, data, 33 * 8)) if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO; return -EIO;
init_fp_ctx(child);
fregs = get_fpu_regs(child); fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
...@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target, ...@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
/* XXX fcr31 */ /* XXX fcr31 */
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu, &target->thread.fpu,
...@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPR_BASE ... FPR_BASE + 31: { case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child); union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) { init_fp_ctx(child);
/* FP not yet used */
memset(&child->thread.fpu, ~0,
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) { if (test_thread_flag(TIF_32BIT_FPREGS)) {
/* /*
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
.endm .endm
.set noreorder .set noreorder
.set MIPS_ISA_ARCH_LEVEL_RAW
LEAF(_save_fp_context) LEAF(_save_fp_context)
.set push .set push
...@@ -103,6 +102,7 @@ LEAF(_save_fp_context) ...@@ -103,6 +102,7 @@ LEAF(_save_fp_context)
/* Save 32-bit process floating point context */ /* Save 32-bit process floating point context */
LEAF(_save_fp_context32) LEAF(_save_fp_context32)
.set push .set push
.set MIPS_ISA_ARCH_LEVEL_RAW
SET_HARDFLOAT SET_HARDFLOAT
cfc1 t1, fcr31 cfc1 t1, fcr31
......
...@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs) ...@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr) int process_fpemu_return(int sig, void __user *fault_addr)
{ {
/*
* We can't allow the emulated instruction to leave any of the cause
* bits set in FCSR. If they were then the kernel would take an FP
* exception when restoring FP context.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
if (sig == SIGSEGV || sig == SIGBUS) { if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0}; struct siginfo si = {0};
si.si_addr = fault_addr; si.si_addr = fault_addr;
...@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) ...@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
SIGFPE) == NOTIFY_STOP) SIGFPE) == NOTIFY_STOP)
goto out; goto out;
/* Clear FCSR.Cause before enabling interrupts */
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs); die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) { if (fcr31 & FPU_CSR_UNI_X) {
...@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) ...@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr); &fault_addr);
/* /* If something went wrong, signal */
* We can't allow the emulated instruction to leave any of process_fpemu_return(sig, fault_addr);
* the cause bit set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */ /* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */ own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
goto out; goto out;
} else if (fcr31 & FPU_CSR_INV_X) } else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV; info.si_code = FPE_FLTINV;
...@@ -1392,13 +1398,22 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -1392,13 +1398,22 @@ asmlinkage void do_cpu(struct pt_regs *regs)
exception_exit(prev_state); exception_exit(prev_state);
} }
asmlinkage void do_msa_fpe(struct pt_regs *regs) asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{ {
enum ctx_state prev_state; enum ctx_state prev_state;
prev_state = exception_enter(); prev_state = exception_enter();
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear MSACSR.Cause before enabling interrupts */
write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
local_irq_enable();
die_if_kernel("do_msa_fpe invoked from kernel context!", regs); die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE, current); force_sig(SIGFPE, current);
out:
exception_exit(prev_state); exception_exit(prev_state);
} }
......
# Makefile for KVM support for MIPS # Makefile for KVM support for MIPS
# #
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
kvm-objs := $(common-objs) mips.o emulate.o locore.o \ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
interrupt.o stats.o commpage.o \ interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o dyntrans.o trap_emul.o fpu.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o obj-y += callback.o tlb.o
...@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) ...@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
/**
* kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
* @vcpu: Virtual CPU.
*
* Finds the mask of bits which are writable in the guest's Config1 CP0
* register, by userland (currently read-only to the guest).
*/
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = 0;
/* Permit FPU to be present if FPU is supported */
if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
mask |= MIPS_CONF1_FP;
return mask;
}
/**
* kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
* @vcpu: Virtual CPU.
*
* Finds the mask of bits which are writable in the guest's Config3 CP0
* register, by userland (currently read-only to the guest).
*/
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
{
/* Config4 is optional */
unsigned int mask = MIPS_CONF_M;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
mask |= MIPS_CONF3_MSA;
return mask;
}
/**
* kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
* @vcpu: Virtual CPU.
*
* Finds the mask of bits which are writable in the guest's Config4 CP0
* register, by userland (currently read-only to the guest).
*/
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
{
/* Config5 is optional */
return MIPS_CONF_M;
}
/**
* kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
* @vcpu: Virtual CPU.
*
* Finds the mask of bits which are writable in the guest's Config5 CP0
* register, by the guest itself.
*/
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
{
unsigned int mask = 0;
/* Permit MSAEn changes if MSA supported and enabled */
if (kvm_mips_guest_has_msa(&vcpu->arch))
mask |= MIPS_CONF5_MSAEN;
/*
* Permit guest FPU mode changes if FPU is enabled and the relevant
* feature exists according to FIR register.
*/
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
if (cpu_has_fre)
mask |= MIPS_CONF5_FRE;
/* We don't support UFR or UFE */
}
return mask;
}
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
uint32_t cause, struct kvm_run *run, uint32_t cause, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
...@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, ...@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_compare(vcpu, kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
kvm_write_c0_guest_status(cop0, unsigned int old_val, val, change;
vcpu->arch.gprs[rt]);
old_val = kvm_read_c0_guest_status(cop0);
val = vcpu->arch.gprs[rt];
change = val ^ old_val;
/* Make sure that the NMI bit is never set */
val &= ~ST0_NMI;
/*
* Don't allow CU1 or FR to be set unless FPU
* capability enabled and exists in guest
* configuration.
*/
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
val &= ~(ST0_CU1 | ST0_FR);
/*
* Also don't allow FR to be set if host doesn't
* support it.
*/
if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
val &= ~ST0_FR;
/* Handle changes in FPU mode */
preempt_disable();
/*
* FPU and Vector register state is made
* UNPREDICTABLE by a change of FR, so don't
* even bother saving it.
*/
if (change & ST0_FR)
kvm_drop_fpu(vcpu);
/*
* If MSA state is already live, it is undefined
* how it interacts with FR=0 FPU state, and we
* don't want to hit reserved instruction
* exceptions trying to save the MSA state later
* when CU=1 && FR=1, so play it safe and save
* it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
kvm_lose_fpu(vcpu);
/* /*
* Make sure that CU1 and NMI bits are * Propagate CU1 (FPU enable) changes
* never set * immediately if the FPU context is already
* loaded. When disabling we leave the context
* loaded so it can be quickly enabled again in
* the near future.
*/ */
kvm_clear_c0_guest_status(cop0, if (change & ST0_CU1 &&
(ST0_CU1 | ST0_NMI)); vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
change_c0_status(ST0_CU1, val);
preempt_enable();
kvm_write_c0_guest_status(cop0, val);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS #ifdef CONFIG_KVM_MIPS_DYN_TRANS
/*
* If FPU present, we need CU1/FR bits to take
* effect fairly soon.
*/
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
kvm_mips_trans_mtc0(inst, opc, vcpu); kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif #endif
} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
unsigned int old_val, val, change, wrmask;
old_val = kvm_read_c0_guest_config5(cop0);
val = vcpu->arch.gprs[rt];
/* Only a few bits are writable in Config5 */
wrmask = kvm_mips_config5_wrmask(vcpu);
change = (val ^ old_val) & wrmask;
val = old_val ^ change;
/* Handle changes in FPU/MSA modes */
preempt_disable();
/*
* Propagate FRE changes immediately if the FPU
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
/*
* Propagate MSAEn changes immediately if the
* MSA context is already loaded. When disabling
* we leave the context loaded so it can be
* quickly enabled again in the near future.
*/
if (change & MIPS_CONF5_MSAEN &&
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
change_c0_config5(MIPS_CONF5_MSAEN,
val);
preempt_enable();
kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
uint32_t old_cause, new_cause; uint32_t old_cause, new_cause;
...@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, ...@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er; return er;
} }
enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc);
kvm_set_c0_guest_status(cop0, ST0_EXL);
if (cause & CAUSEF_BD)
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
else
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
(T_TRAP << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
} else {
kvm_err("Trying to deliver TRAP when EXL is already set\n");
er = EMULATE_FAIL;
}
return er;
}
enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc);
kvm_set_c0_guest_status(cop0, ST0_EXL);
if (cause & CAUSEF_BD)
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
else
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
(T_MSAFPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
} else {
kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
er = EMULATE_FAIL;
}
return er;
}
enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc);
kvm_set_c0_guest_status(cop0, ST0_EXL);
if (cause & CAUSEF_BD)
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
else
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
(T_FPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
} else {
kvm_err("Trying to deliver FPE when EXL is already set\n");
er = EMULATE_FAIL;
}
return er;
}
enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
uint32_t *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc);
kvm_set_c0_guest_status(cop0, ST0_EXL);
if (cause & CAUSEF_BD)
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
else
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
(T_MSADIS << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
} else {
kvm_err("Trying to deliver MSADIS when EXL is already set\n");
er = EMULATE_FAIL;
}
return er;
}
/* ll/sc, rdhwr, sync emulation */ /* ll/sc, rdhwr, sync emulation */
#define OPCODE 0xfc000000 #define OPCODE 0xfc000000
...@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, ...@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
case T_SYSCALL: case T_SYSCALL:
case T_BREAK: case T_BREAK:
case T_RES_INST: case T_RES_INST:
case T_TRAP:
case T_MSAFPE:
case T_FPE:
case T_MSADIS:
break; break;
case T_COP_UNUSABLE: case T_COP_UNUSABLE:
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* FPU context handling code for KVM.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
.set noreorder
.set noat
LEAF(__kvm_save_fpu)
.set push
.set mips64r2
SET_HARDFLOAT
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
nop
sdc1 $f1, VCPU_FPR1(a0)
sdc1 $f3, VCPU_FPR3(a0)
sdc1 $f5, VCPU_FPR5(a0)
sdc1 $f7, VCPU_FPR7(a0)
sdc1 $f9, VCPU_FPR9(a0)
sdc1 $f11, VCPU_FPR11(a0)
sdc1 $f13, VCPU_FPR13(a0)
sdc1 $f15, VCPU_FPR15(a0)
sdc1 $f17, VCPU_FPR17(a0)
sdc1 $f19, VCPU_FPR19(a0)
sdc1 $f21, VCPU_FPR21(a0)
sdc1 $f23, VCPU_FPR23(a0)
sdc1 $f25, VCPU_FPR25(a0)
sdc1 $f27, VCPU_FPR27(a0)
sdc1 $f29, VCPU_FPR29(a0)
sdc1 $f31, VCPU_FPR31(a0)
1: sdc1 $f0, VCPU_FPR0(a0)
sdc1 $f2, VCPU_FPR2(a0)
sdc1 $f4, VCPU_FPR4(a0)
sdc1 $f6, VCPU_FPR6(a0)
sdc1 $f8, VCPU_FPR8(a0)
sdc1 $f10, VCPU_FPR10(a0)
sdc1 $f12, VCPU_FPR12(a0)
sdc1 $f14, VCPU_FPR14(a0)
sdc1 $f16, VCPU_FPR16(a0)
sdc1 $f18, VCPU_FPR18(a0)
sdc1 $f20, VCPU_FPR20(a0)
sdc1 $f22, VCPU_FPR22(a0)
sdc1 $f24, VCPU_FPR24(a0)
sdc1 $f26, VCPU_FPR26(a0)
sdc1 $f28, VCPU_FPR28(a0)
jr ra
sdc1 $f30, VCPU_FPR30(a0)
.set pop
END(__kvm_save_fpu)
LEAF(__kvm_restore_fpu)
.set push
.set mips64r2
SET_HARDFLOAT
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
nop
ldc1 $f1, VCPU_FPR1(a0)
ldc1 $f3, VCPU_FPR3(a0)
ldc1 $f5, VCPU_FPR5(a0)
ldc1 $f7, VCPU_FPR7(a0)
ldc1 $f9, VCPU_FPR9(a0)
ldc1 $f11, VCPU_FPR11(a0)
ldc1 $f13, VCPU_FPR13(a0)
ldc1 $f15, VCPU_FPR15(a0)
ldc1 $f17, VCPU_FPR17(a0)
ldc1 $f19, VCPU_FPR19(a0)
ldc1 $f21, VCPU_FPR21(a0)
ldc1 $f23, VCPU_FPR23(a0)
ldc1 $f25, VCPU_FPR25(a0)
ldc1 $f27, VCPU_FPR27(a0)
ldc1 $f29, VCPU_FPR29(a0)
ldc1 $f31, VCPU_FPR31(a0)
1: ldc1 $f0, VCPU_FPR0(a0)
ldc1 $f2, VCPU_FPR2(a0)
ldc1 $f4, VCPU_FPR4(a0)
ldc1 $f6, VCPU_FPR6(a0)
ldc1 $f8, VCPU_FPR8(a0)
ldc1 $f10, VCPU_FPR10(a0)
ldc1 $f12, VCPU_FPR12(a0)
ldc1 $f14, VCPU_FPR14(a0)
ldc1 $f16, VCPU_FPR16(a0)
ldc1 $f18, VCPU_FPR18(a0)
ldc1 $f20, VCPU_FPR20(a0)
ldc1 $f22, VCPU_FPR22(a0)
ldc1 $f24, VCPU_FPR24(a0)
ldc1 $f26, VCPU_FPR26(a0)
ldc1 $f28, VCPU_FPR28(a0)
jr ra
ldc1 $f30, VCPU_FPR30(a0)
.set pop
END(__kvm_restore_fpu)
LEAF(__kvm_restore_fcsr)
.set push
SET_HARDFLOAT
lw t0, VCPU_FCR31(a0)
/*
* The ctc1 must stay at this offset in __kvm_restore_fcsr.
* See kvm_mips_csr_die_notify() which handles t0 containing a value
* which triggers an FP Exception, which must be stepped over and
* ignored since the set cause bits must remain there for the guest.
*/
ctc1 t0, fcr31
jr ra
nop
.set pop
END(__kvm_restore_fcsr)
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#define PT_HOST_USERLOCAL PT_EPC #define PT_HOST_USERLOCAL PT_EPC
#define CP0_DDATA_LO $28,3 #define CP0_DDATA_LO $28,3
#define CP0_CONFIG3 $16,3
#define CP0_CONFIG5 $16,5
#define CP0_EBASE $15,1 #define CP0_EBASE $15,1
#define CP0_INTCTL $12,1 #define CP0_INTCTL $12,1
...@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) ...@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1) LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE mtc0 k0,CP0_EBASE
/*
* If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
* trigger FPE for pending exceptions.
*/
.set at
and v1, v0, ST0_CU1
beqz v1, 1f
nop
.set push
SET_HARDFLOAT
cfc1 t0, fcr31
sw t0, VCPU_FCR31(k1)
ctc1 zero,fcr31
.set pop
.set noat
1:
#ifdef CONFIG_CPU_HAS_MSA
/*
* If MSA is enabled, save MSACSR and clear it so that later
* instructions don't trigger MSAFPE for pending exceptions.
*/
mfc0 t0, CP0_CONFIG3
ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
beqz t0, 1f
nop
mfc0 t0, CP0_CONFIG5
ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
beqz t0, 1f
nop
_cfcmsa t0, MSA_CSR
sw t0, VCPU_MSA_CSR(k1)
_ctcmsa MSA_CSR, zero
1:
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at .set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
......
This diff is collapsed.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* MIPS SIMD Architecture (MSA) context handling code for KVM.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
.set noreorder
.set noat
LEAF(__kvm_save_msa)
st_d 0, VCPU_FPR0, a0
st_d 1, VCPU_FPR1, a0
st_d 2, VCPU_FPR2, a0
st_d 3, VCPU_FPR3, a0
st_d 4, VCPU_FPR4, a0
st_d 5, VCPU_FPR5, a0
st_d 6, VCPU_FPR6, a0
st_d 7, VCPU_FPR7, a0
st_d 8, VCPU_FPR8, a0
st_d 9, VCPU_FPR9, a0
st_d 10, VCPU_FPR10, a0
st_d 11, VCPU_FPR11, a0
st_d 12, VCPU_FPR12, a0
st_d 13, VCPU_FPR13, a0
st_d 14, VCPU_FPR14, a0
st_d 15, VCPU_FPR15, a0
st_d 16, VCPU_FPR16, a0
st_d 17, VCPU_FPR17, a0
st_d 18, VCPU_FPR18, a0
st_d 19, VCPU_FPR19, a0
st_d 20, VCPU_FPR20, a0
st_d 21, VCPU_FPR21, a0
st_d 22, VCPU_FPR22, a0
st_d 23, VCPU_FPR23, a0
st_d 24, VCPU_FPR24, a0
st_d 25, VCPU_FPR25, a0
st_d 26, VCPU_FPR26, a0
st_d 27, VCPU_FPR27, a0
st_d 28, VCPU_FPR28, a0
st_d 29, VCPU_FPR29, a0
st_d 30, VCPU_FPR30, a0
st_d 31, VCPU_FPR31, a0
jr ra
nop
END(__kvm_save_msa)
LEAF(__kvm_restore_msa)
ld_d 0, VCPU_FPR0, a0
ld_d 1, VCPU_FPR1, a0
ld_d 2, VCPU_FPR2, a0
ld_d 3, VCPU_FPR3, a0
ld_d 4, VCPU_FPR4, a0
ld_d 5, VCPU_FPR5, a0
ld_d 6, VCPU_FPR6, a0
ld_d 7, VCPU_FPR7, a0
ld_d 8, VCPU_FPR8, a0
ld_d 9, VCPU_FPR9, a0
ld_d 10, VCPU_FPR10, a0
ld_d 11, VCPU_FPR11, a0
ld_d 12, VCPU_FPR12, a0
ld_d 13, VCPU_FPR13, a0
ld_d 14, VCPU_FPR14, a0
ld_d 15, VCPU_FPR15, a0
ld_d 16, VCPU_FPR16, a0
ld_d 17, VCPU_FPR17, a0
ld_d 18, VCPU_FPR18, a0
ld_d 19, VCPU_FPR19, a0
ld_d 20, VCPU_FPR20, a0
ld_d 21, VCPU_FPR21, a0
ld_d 22, VCPU_FPR22, a0
ld_d 23, VCPU_FPR23, a0
ld_d 24, VCPU_FPR24, a0
ld_d 25, VCPU_FPR25, a0
ld_d 26, VCPU_FPR26, a0
ld_d 27, VCPU_FPR27, a0
ld_d 28, VCPU_FPR28, a0
ld_d 29, VCPU_FPR29, a0
ld_d 30, VCPU_FPR30, a0
ld_d 31, VCPU_FPR31, a0
jr ra
nop
END(__kvm_restore_msa)
.macro kvm_restore_msa_upper wr, off, base
.set push
.set noat
#ifdef CONFIG_64BIT
ld $1, \off(\base)
insert_d \wr, 1
#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
lw $1, \off(\base)
insert_w \wr, 2
lw $1, (\off+4)(\base)
insert_w \wr, 3
#else /* CONFIG_CPU_BIG_ENDIAN */
lw $1, (\off+4)(\base)
insert_w \wr, 2
lw $1, \off(\base)
insert_w \wr, 3
#endif
.set pop
.endm
LEAF(__kvm_restore_msa_upper)
kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
jr ra
nop
END(__kvm_restore_msa_upper)
LEAF(__kvm_restore_msacsr)
lw t0, VCPU_MSA_CSR(a0)
/*
* The ctcmsa must stay at this offset in __kvm_restore_msacsr.
* See kvm_mips_csr_die_notify() which handles t0 containing a value
* which triggers an MSA FP Exception, which must be stepped over and
* ignored since the set cause bits must remain there for the guest.
*/
_ctcmsa MSA_CSR, t0
jr ra
nop
END(__kvm_restore_msacsr)
...@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { ...@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
"System Call", "System Call",
"Reserved Inst", "Reserved Inst",
"Break Inst", "Break Inst",
"Trap Inst",
"MSA FPE",
"FPE",
"MSA Disabled",
"D-Cache Flushes", "D-Cache Flushes",
}; };
......
...@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
} }
/* restore guest state to registers */
kvm_mips_callbacks->vcpu_set_regs(vcpu);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.preempt_entryhi = read_c0_entryhi(); vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu; vcpu->arch.last_sched_cpu = cpu;
/* save guest state in registers */
kvm_mips_callbacks->vcpu_get_regs(vcpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) { ASID_VERSION_MASK)) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
......
...@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) ...@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause; unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
/* FPU Unusable */
if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
(kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
/*
* Unusable/no FPU in guest:
* deliver guest COP1 Unusable Exception
*/
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
else } else {
/* Restore FPU state */
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
} else {
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
}
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
...@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) ...@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
/**
* kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use MSA when it is disabled.
*/
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
(kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
/*
* No MSA in guest, or FPU enabled and not in FR=1 mode,
* guest reserved instruction exception
*/
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
/* MSA disabled by guest, guest MSA disabled exception */
er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
} else {
/* Restore MSA/FPU state */
kvm_own_msa(vcpu);
er = EMULATE_DONE;
}
switch (er) {
case EMULATE_DONE:
ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
default:
BUG();
}
return ret;
}
static int kvm_trap_emul_vm_init(struct kvm *kvm) static int kvm_trap_emul_vm_init(struct kvm *kvm)
{ {
return 0; return 0;
...@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
* guest will come up as expected, for now we simulate a MIPS 24kc * guest will come up as expected, for now we simulate a MIPS 24kc
*/ */
kvm_write_c0_guest_prid(cop0, 0x00019300); kvm_write_c0_guest_prid(cop0, 0x00019300);
kvm_write_c0_guest_config(cop0, /* Have config1, Cacheable, noncoherent, write-back, write allocate */
MIPS_CONFIG0 | (0x1 << CP0C0_AR) | kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
(0x1 << CP0C0_AR) |
(MMU_TYPE_R4000 << CP0C0_MT)); (MMU_TYPE_R4000 << CP0C0_MT));
/* Read the cache characteristics from the host Config1 Register */ /* Read the cache characteristics from the host Config1 Register */
...@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
(1 << CP0C1_WR) | (1 << CP0C1_CA)); (1 << CP0C1_WR) | (1 << CP0C1_CA));
kvm_write_c0_guest_config1(cop0, config1); kvm_write_c0_guest_config1(cop0, config1);
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); /* Have config3, no tertiary/secondary caches implemented */
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
(1 << CP0C3_ULRI));
/* Have config4, UserLocal */
kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
/* Have config5 */
kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
/* No config6 */
kvm_write_c0_guest_config5(cop0, 0);
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
...@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
int ret = 0; int ret = 0;
unsigned int cur, change;
switch (reg->id) { switch (reg->id) {
case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_CP0_COUNT:
...@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_write_c0_guest_cause(cop0, v); kvm_write_c0_guest_cause(cop0, v);
} }
break; break;
case KVM_REG_MIPS_CP0_CONFIG:
/* read-only for now */
break;
case KVM_REG_MIPS_CP0_CONFIG1:
cur = kvm_read_c0_guest_config1(cop0);
change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
if (change) {
v = cur ^ change;
kvm_write_c0_guest_config1(cop0, v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG2:
/* read-only for now */
break;
case KVM_REG_MIPS_CP0_CONFIG3:
cur = kvm_read_c0_guest_config3(cop0);
change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
if (change) {
v = cur ^ change;
kvm_write_c0_guest_config3(cop0, v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG4:
cur = kvm_read_c0_guest_config4(cop0);
change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
if (change) {
v = cur ^ change;
kvm_write_c0_guest_config4(cop0, v);
}
break;
case KVM_REG_MIPS_CP0_CONFIG5:
cur = kvm_read_c0_guest_config5(cop0);
change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
if (change) {
v = cur ^ change;
kvm_write_c0_guest_config5(cop0, v);
}
break;
case KVM_REG_MIPS_COUNT_CTL: case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v); ret = kvm_mips_set_count_ctl(vcpu, v);
break; break;
...@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
{
kvm_lose_fpu(vcpu);
return 0;
}
static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
{
return 0;
}
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */ /* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
...@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { ...@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_syscall = kvm_trap_emul_handle_syscall, .handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst, .handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break, .handle_break = kvm_trap_emul_handle_break,
.handle_trap = kvm_trap_emul_handle_trap,
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
.handle_fpe = kvm_trap_emul_handle_fpe,
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.vm_init = kvm_trap_emul_vm_init, .vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init, .vcpu_init = kvm_trap_emul_vcpu_init,
...@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { ...@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.irq_clear = kvm_mips_irq_clear_cb, .irq_clear = kvm_mips_irq_clear_cb,
.get_one_reg = kvm_trap_emul_get_one_reg, .get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg, .set_one_reg = kvm_trap_emul_set_one_reg,
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
.vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
}; };
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include "iodev.h" #include <kvm/iodev.h>
#define MAX_CPU 32 #define MAX_CPU 32
#define MAX_SRC 256 #define MAX_SRC 256
...@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ) ...@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
clear_bit(n_IRQ, q->queue); clear_bit(n_IRQ, q->queue);
} }
static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
{
return test_bit(n_IRQ, q->queue);
}
static void IRQ_check(struct openpic *opp, struct irq_queue *q) static void IRQ_check(struct openpic *opp, struct irq_queue *q)
{ {
int irq = -1; int irq = -1;
...@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val) ...@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
return -ENXIO; return -ENXIO;
} }
static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr, static int kvm_mpic_read(struct kvm_vcpu *vcpu,
int len, void *ptr) struct kvm_io_device *this,
gpa_t addr, int len, void *ptr)
{ {
struct openpic *opp = container_of(this, struct openpic, mmio); struct openpic *opp = container_of(this, struct openpic, mmio);
int ret; int ret;
...@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr, ...@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
return ret; return ret;
} }
static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr, static int kvm_mpic_write(struct kvm_vcpu *vcpu,
int len, const void *ptr) struct kvm_io_device *this,
gpa_t addr, int len, const void *ptr)
{ {
struct openpic *opp = container_of(this, struct openpic, mmio); struct openpic *opp = container_of(this, struct openpic, mmio);
int ret; int ret;
......
...@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data); bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
...@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data); bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
......
...@@ -172,7 +172,9 @@ struct kvm_s390_sie_block { ...@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */ __u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */ __u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */ __u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[30]; /* 0x01c0 */ __u8 reserved1c0[8]; /* 0x01c0 */
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */ __u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */ __u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */ __u64 itdba; /* 0x01e8 */
...@@ -183,11 +185,17 @@ struct kvm_s390_itdb { ...@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256]; __u8 data[256];
} __packed; } __packed;
struct kvm_s390_vregs {
__vector128 vrs[32];
__u8 reserved200[512]; /* for future vector expansion */
} __packed;
struct sie_page { struct sie_page {
struct kvm_s390_sie_block sie_block; struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */ __u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */ struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */ __u8 reserved700[1280]; /* 0x0700 */
struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed; } __packed;
struct kvm_vcpu_stat { struct kvm_vcpu_stat {
...@@ -238,6 +246,7 @@ struct kvm_vcpu_stat { ...@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop; u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status; u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status; u32 instruction_sigp_store_status;
u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch; u32 instruction_sigp_arch;
u32 instruction_sigp_prefix; u32 instruction_sigp_prefix;
u32 instruction_sigp_restart; u32 instruction_sigp_restart;
...@@ -270,6 +279,7 @@ struct kvm_vcpu_stat { ...@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13 #define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15 #define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16 #define PGM_TRACE_TABEL 0x16
#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c #define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d #define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f #define PGM_PC_TRANSLATION_SPEC 0x1f
...@@ -334,6 +344,11 @@ enum irq_types { ...@@ -334,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT IRQ_PEND_COUNT
}; };
/* We have 2M for virtio device descriptor pages. Smallest amount of
* memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
*/
#define KVM_S390_MAX_VIRTIO_IRQS 87381
/* /*
* Repressible (non-floating) machine check interrupts * Repressible (non-floating) machine check interrupts
* subclass bits in MCIC * subclass bits in MCIC
...@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt { ...@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
unsigned long pending_irqs; unsigned long pending_irqs;
}; };
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT 8
#define FIRQ_LIST_VIRTIO 9
#define FIRQ_LIST_COUNT 10
#define FIRQ_CNTR_IO 0
#define FIRQ_CNTR_SERVICE 1
#define FIRQ_CNTR_VIRTIO 2
#define FIRQ_CNTR_PFAULT 3
#define FIRQ_MAX_COUNT 4
struct kvm_s390_float_interrupt { struct kvm_s390_float_interrupt {
unsigned long pending_irqs;
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head lists[FIRQ_LIST_COUNT];
atomic_t active; int counters[FIRQ_MAX_COUNT];
struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal;
int next_rr_cpu; int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
unsigned int irq_count;
}; };
struct kvm_hw_wp_info_arch { struct kvm_hw_wp_info_arch {
...@@ -465,6 +499,7 @@ struct kvm_vcpu_arch { ...@@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs; s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs; s390_fp_regs guest_fpregs;
struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
...@@ -553,6 +588,7 @@ struct kvm_arch{ ...@@ -553,6 +588,7 @@ struct kvm_arch{
int use_cmma; int use_cmma;
int user_cpu_state_ctrl; int user_cpu_state_ctrl;
int user_sigp; int user_sigp;
int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq; wait_queue_head_t ipte_wq;
int ipte_lock_count; int ipte_lock_count;
......
...@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { ...@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3) #define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5) #define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */ /* definition of registers in kvm_run */
struct kvm_sync_regs { struct kvm_sync_regs {
__u64 prefix; /* prefix register */ __u64 prefix; /* prefix register */
...@@ -164,6 +165,9 @@ struct kvm_sync_regs { ...@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */ __u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */
__u64 vrs[32][2]; /* vector registers */
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* only valid with vector registers */
}; };
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
......
...@@ -230,7 +230,7 @@ ...@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name * and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table. * of the instruction in the icpt_insn_codes table.
*/ */
#define icpt_insn_decoder(insn) \ #define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
...@@ -239,6 +239,6 @@ ...@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
INSN_DECODE(insn) INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */ #endif /* _UAPI_ASM_S390_SIE_H */
...@@ -171,6 +171,7 @@ int main(void) ...@@ -171,6 +171,7 @@ int main(void)
#else /* CONFIG_32BIT */ #else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
......
...@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) ...@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
if (vcpu->run->s.regs.gprs[rx] & 7) if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
...@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) ...@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* - gpr 3 contains the virtqueue index (passed as datamatch) * - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally) * - gpr 4 contains the index on the bus (optionally)
*/ */
ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff, vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3], 8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]); vcpu->run->s.regs.gprs[4]);
...@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) ...@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{ {
int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
......
This diff is collapsed.
...@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, ...@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
} }
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, int write); ar_t ar, unsigned long *gpa, int write);
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long length, int is_write);
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write); unsigned long len, int write);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
...@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* write_guest - copy data from kernel space to guest space * write_guest - copy data from kernel space to guest space
* @vcpu: virtual cpu * @vcpu: virtual cpu
* @ga: guest address * @ga: guest address
* @ar: access register
* @data: source address in kernel space * @data: source address in kernel space
* @len: number of bytes to copy * @len: number of bytes to copy
* *
...@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* If DAT is off data will be copied to guest real or absolute memory. * If DAT is off data will be copied to guest real or absolute memory.
* If DAT is on data will be copied to the address space as specified by * If DAT is on data will be copied to the address space as specified by
* the address space bits of the PSW: * the address space bits of the PSW:
* Primary, secondory or home space (access register mode is currently not * Primary, secondary, home space or access register mode.
* implemented).
* The addressing mode of the PSW is also inspected, so that address wrap * The addressing mode of the PSW is also inspected, so that address wrap
* around is taken into account for 24-, 31- and 64-bit addressing mode, * around is taken into account for 24-, 31- and 64-bit addressing mode,
* if the to be copied data crosses page boundaries in guest address space. * if the to be copied data crosses page boundaries in guest address space.
...@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception. * if data has been changed in guest space in case of an exception.
*/ */
static inline __must_check static inline __must_check
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, data, len, 1); return access_guest(vcpu, ga, ar, data, len, 1);
} }
/** /**
* read_guest - copy data from guest space to kernel space * read_guest - copy data from guest space to kernel space
* @vcpu: virtual cpu * @vcpu: virtual cpu
* @ga: guest address * @ga: guest address
* @ar: access register
* @data: destination address in kernel space * @data: destination address in kernel space
* @len: number of bytes to copy * @len: number of bytes to copy
* *
...@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, ...@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
* data will be copied from guest space to kernel space. * data will be copied from guest space to kernel space.
*/ */
static inline __must_check static inline __must_check
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, data, len, 0); return access_guest(vcpu, ga, ar, data, len, 0);
} }
/** /**
...@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, ...@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
void ipte_lock(struct kvm_vcpu *vcpu); void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu); void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */
...@@ -191,7 +191,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, ...@@ -191,7 +191,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data) if (!wp_info->old_data)
return -ENOMEM; return -ENOMEM;
/* try to backup the original value */ /* try to backup the original value */
ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
wp_info->len); wp_info->len);
if (ret) { if (ret) {
kfree(wp_info->old_data); kfree(wp_info->old_data);
...@@ -362,7 +362,7 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) ...@@ -362,7 +362,7 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue; continue;
/* refetch the wp data and compare it to the old value */ /* refetch the wp data and compare it to the old value */
if (!read_guest(vcpu, wp_info->phys_addr, temp, if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
wp_info->len)) { wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) { if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp); kfree(temp);
......
...@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, ...@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc; pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break; break;
case PGM_VECTOR_PROCESSING:
case PGM_DATA: case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break; break;
...@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the source is paged-in */ /* Make sure that the source is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
&srcaddr, 0); reg2, &srcaddr, 0);
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
...@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the destination is paged-in */ /* Make sure that the destination is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
&dstaddr, 1); reg1, &dstaddr, 1);
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) ...@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
case SIGP_STORE_STATUS_AT_ADDRESS: case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++; vcpu->stat.instruction_sigp_store_status++;
break; break;
case SIGP_STORE_ADDITIONAL_STATUS:
vcpu->stat.instruction_sigp_store_adtl_status++;
break;
case SIGP_SET_PREFIX: case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++; vcpu->stat.instruction_sigp_prefix++;
break; break;
...@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) ...@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu); order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (handle_sigp_order_in_user_space(vcpu, order_code)) if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) ...@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
int r3 = vcpu->arch.sie_block->ipa & 0x000f; int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu; struct kvm_vcpu *dest_vcpu;
u8 order_code = kvm_s390_get_base_disp_rs(vcpu); u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
......
This diff is collapsed.
...@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void) ...@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
static inline bool kvm_para_available(void) static inline bool kvm_para_available(void)
{ {
return 0; return false;
} }
static inline unsigned int kvm_arch_para_features(void) static inline unsigned int kvm_arch_para_features(void)
......
...@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, ...@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info { struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti; struct pvclock_vcpu_time_info pvti;
u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment