Commit 8b88ed3c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Radim Krčmář:
 "ARM:
   - Lazy FPSIMD switching fixes
   - Really disable compat ioctls on architectures that don't want it
   - Disable compat on arm64 (it was never implemented...)
   - Rely on architectural requirements for GICV on GICv3
   - Detect bad alignments in unmap_stage2_range

  x86:
   - Add nested VM entry checks to avoid broken error recovery path
   - Minor documentation fix"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: fix KVM_CAP_HYPERV_TLBFLUSH paragraph number
  kvm: vmx: Nested VM-entry prereqs for event inj.
  KVM: arm64: Prevent KVM_COMPAT from being selected
  KVM: Enforce error in ioctl for compat tasks when !KVM_COMPAT
  KVM: arm/arm64: add WARN_ON if size is not PAGE_SIZE aligned in unmap_stage2_range
  KVM: arm64: Avoid mistaken attempts to save SVE state for vcpus
  KVM: arm64/sve: Fix SVE trap restoration for non-current tasks
  KVM: arm64: Don't mask softirq with IRQs disabled in vcpu_put()
  arm64: Introduce sysreg_clear_set()
  KVM: arm/arm64: Drop resource size check for GICV window
parents 4ab59fcf 2ddc6498
...@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle ...@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
reset, migration and nested KVM for branch prediction blocking. The stfle reset, migration and nested KVM for branch prediction blocking. The stfle
facility 82 should not be provided to the guest without this capability. facility 82 should not be provided to the guest without this capability.
8.14 KVM_CAP_HYPERV_TLBFLUSH 8.18 KVM_CAP_HYPERV_TLBFLUSH
Architectures: x86 Architectures: x86
......
...@@ -306,6 +306,7 @@ struct kvm_vcpu_arch { ...@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
......
...@@ -728,6 +728,17 @@ asm( ...@@ -728,6 +728,17 @@ asm(
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0) } while (0)
/*
* Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
* set mask are set. Other bits are left as-is.
*/
#define sysreg_clear_set(sysreg, clear, set) do { \
u64 __scs_val = read_sysreg(sysreg); \
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
if (__scs_new != __scs_val) \
write_sysreg(__scs_new, sysreg); \
} while (0)
static inline void config_sctlr_el1(u32 clear, u32 set) static inline void config_sctlr_el1(u32 clear, u32 set)
{ {
u32 val; u32 val;
......
...@@ -5,13 +5,14 @@ ...@@ -5,13 +5,14 @@
* Copyright 2018 Arm Limited * Copyright 2018 Arm Limited
* Author: Dave Martin <Dave.Martin@arm.com> * Author: Dave Martin <Dave.Martin@arm.com>
*/ */
#include <linux/bottom_half.h> #include <linux/irqflags.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
/* /*
* Called on entry to KVM_RUN unless this vcpu previously ran at least * Called on entry to KVM_RUN unless this vcpu previously ran at least
...@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) ...@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{ {
BUG_ON(!current->mm); BUG_ON(!current->mm);
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_HOST_SVE_IN_USE |
KVM_ARM64_HOST_SVE_ENABLED);
vcpu->arch.flags |= KVM_ARM64_FP_HOST; vcpu->arch.flags |= KVM_ARM64_FP_HOST;
if (test_thread_flag(TIF_SVE)) if (test_thread_flag(TIF_SVE))
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
} }
/* /*
...@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) ...@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
*/ */
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{ {
local_bh_disable(); unsigned long flags;
update_thread_flag(TIF_SVE, local_irq_save(flags);
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
/* Clean guest FP state to memory and invalidate cpu view */ /* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save(); fpsimd_save();
fpsimd_flush_cpu_state(); fpsimd_flush_cpu_state();
} else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { } else if (system_supports_sve()) {
/* Ensure user trap controls are correctly restored */ /*
fpsimd_bind_task_to_cpu(); * The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
* reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
} }
local_bh_enable(); update_thread_flag(TIF_SVE,
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
local_irq_restore(flags);
} }
...@@ -114,6 +114,7 @@ ...@@ -114,6 +114,7 @@
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
#define VMX_MISC_SAVE_EFER_LMA 0x00000020 #define VMX_MISC_SAVE_EFER_LMA 0x00000020
#define VMX_MISC_ACTIVITY_HLT 0x00000040 #define VMX_MISC_ACTIVITY_HLT 0x00000040
#define VMX_MISC_ZERO_LEN_INS 0x40000000
/* VMFUNC functions */ /* VMFUNC functions */
#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
...@@ -351,11 +352,13 @@ enum vmcs_field { ...@@ -351,11 +352,13 @@ enum vmcs_field {
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
#define INTR_TYPE_RESERVED (1 << 8) /* reserved */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
#define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */
/* GUEST_INTERRUPTIBILITY_INFO flags. */ /* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001 #define GUEST_INTR_STATE_STI 0x00000001
......
...@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) ...@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
} }
static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
}
static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
CPU_BASED_MONITOR_TRAP_FLAG;
}
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{ {
return vmcs12->cpu_based_vm_exec_control & bit; return vmcs12->cpu_based_vm_exec_control & bit;
...@@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
!nested_cr3_valid(vcpu, vmcs12->host_cr3)) !nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
/*
* From the Intel SDM, volume 3:
* Fields relevant to VM-entry event injection must be set properly.
* These fields are the VM-entry interruption-information field, the
* VM-entry exception error code, and the VM-entry instruction length.
*/
if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
u32 intr_info = vmcs12->vm_entry_intr_info_field;
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
bool should_have_error_code;
bool urg = nested_cpu_has2(vmcs12,
SECONDARY_EXEC_UNRESTRICTED_GUEST);
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
/* VM-entry interruption-info field: interruption type */
if (intr_type == INTR_TYPE_RESERVED ||
(intr_type == INTR_TYPE_OTHER_EVENT &&
!nested_cpu_supports_monitor_trap_flag(vcpu)))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/* VM-entry interruption-info field: vector */
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/* VM-entry interruption-info field: deliver error code */
should_have_error_code =
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
x86_exception_has_error_code(vector);
if (has_error_code != should_have_error_code)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/* VM-entry exception error code */
if (has_error_code &&
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/* VM-entry interruption-info field: reserved bits */
if (intr_info & INTR_INFO_RESVD_BITS_MASK)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
/* VM-entry instruction length */
switch (intr_type) {
case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR:
case INTR_TYPE_PRIV_SW_EXCEPTION:
if ((vmcs12->vm_entry_instruction_len > 15) ||
(vmcs12->vm_entry_instruction_len == 0 &&
!nested_cpu_has_zero_length_injection(vcpu)))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
}
}
return 0; return 0;
} }
......
...@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu) ...@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
#endif #endif
} }
static inline bool x86_exception_has_error_code(unsigned int vector)
{
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
BIT(PF_VECTOR) | BIT(AC_VECTOR);
return (1U << vector) & exception_has_error_code;
}
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
......
...@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT ...@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT config KVM_COMPAT
def_bool y def_bool y
depends on KVM && COMPAT && !S390 depends on KVM && COMPAT && !(S390 || ARM64)
config HAVE_KVM_IRQ_BYPASS config HAVE_KVM_IRQ_BYPASS
bool bool
......
...@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) ...@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t next; phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock); assert_spin_locked(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
pgd = kvm->arch.pgd + stage2_pgd_index(addr); pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do { do {
/* /*
......
...@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
pr_warn("GICV physical address 0x%llx not page aligned\n", pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start); (unsigned long long)info->vcpu.start);
kvm_vgic_global_state.vcpu_base = 0; kvm_vgic_global_state.vcpu_base = 0;
} else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&info->vcpu),
PAGE_SIZE);
kvm_vgic_global_state.vcpu_base = 0;
} else { } else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true; kvm_vgic_global_state.can_emulate_gicv2 = true;
......
...@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, ...@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
#ifdef CONFIG_KVM_COMPAT #ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg); unsigned long arg);
#define KVM_COMPAT(c) .compat_ioctl = (c)
#else
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg) { return -EINVAL; }
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
#endif #endif
static int hardware_enable_all(void); static int hardware_enable_all(void);
static void hardware_disable_all(void); static void hardware_disable_all(void);
...@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) ...@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vcpu_fops = { static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release, .release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl, .unlocked_ioctl = kvm_vcpu_ioctl,
#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vcpu_compat_ioctl,
#endif
.mmap = kvm_vcpu_mmap, .mmap = kvm_vcpu_mmap,
.llseek = noop_llseek, .llseek = noop_llseek,
KVM_COMPAT(kvm_vcpu_compat_ioctl),
}; };
/* /*
...@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp) ...@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
static const struct file_operations kvm_device_fops = { static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl, .unlocked_ioctl = kvm_device_ioctl,
#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_device_ioctl,
#endif
.release = kvm_device_release, .release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
}; };
struct kvm_device *kvm_device_from_filp(struct file *filp) struct kvm_device *kvm_device_from_filp(struct file *filp)
...@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp, ...@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
static struct file_operations kvm_vm_fops = { static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release, .release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl, .unlocked_ioctl = kvm_vm_ioctl,
#ifdef CONFIG_KVM_COMPAT
.compat_ioctl = kvm_vm_compat_ioctl,
#endif
.llseek = noop_llseek, .llseek = noop_llseek,
KVM_COMPAT(kvm_vm_compat_ioctl),
}; };
static int kvm_dev_ioctl_create_vm(unsigned long type) static int kvm_dev_ioctl_create_vm(unsigned long type)
...@@ -3259,8 +3258,8 @@ static long kvm_dev_ioctl(struct file *filp, ...@@ -3259,8 +3258,8 @@ static long kvm_dev_ioctl(struct file *filp,
static struct file_operations kvm_chardev_ops = { static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl, .unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek, .llseek = noop_llseek,
KVM_COMPAT(kvm_dev_ioctl),
}; };
static struct miscdevice kvm_dev = { static struct miscdevice kvm_dev = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment