Commit 1a0a02d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "ARM and x86 fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: nVMX: VMX instructions: fix segment checks when L1 is in long mode.
  KVM: LAPIC: cap __delay at lapic_timer_advance_ns
  KVM: x86: move nsec_to_cycles from x86.c to x86.h
  pvclock: Get rid of __pvclock_read_cycles in function pvclock_read_flags
  pvclock: Cleanup to remove function pvclock_get_nsec_offset
  pvclock: Add CPU barriers to get correct version value
  KVM: arm/arm64: Stop leaking vcpu pid references
  arm64: KVM: fix build with CONFIG_ARM_PMU disabled
parents 284341d2 f5c5c225
...@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_terminate(vcpu); kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu); kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu); kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu);
} }
......
...@@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) ...@@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
return product; return product;
} }
static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
u64 delta = rdtsc_ordered() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}
static __always_inline static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags) cycle_t *cycles, u8 *flags)
{ {
unsigned version; unsigned version;
cycle_t ret, offset; cycle_t offset;
u8 ret_flags; u64 delta;
version = src->version; version = src->version;
/* Make the latest version visible */
smp_rmb();
offset = pvclock_get_nsec_offset(src); delta = rdtsc_ordered() - src->tsc_timestamp;
ret = src->system_time + offset; offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
ret_flags = src->flags; src->tsc_shift);
*cycles = src->system_time + offset;
*cycles = ret; *flags = src->flags;
*flags = ret_flags;
return version; return version;
} }
......
...@@ -61,11 +61,16 @@ void pvclock_resume(void) ...@@ -61,11 +61,16 @@ void pvclock_resume(void)
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{ {
unsigned version; unsigned version;
cycle_t ret;
u8 flags; u8 flags;
do { do {
version = __pvclock_read_cycles(src, &ret, &flags); version = src->version;
/* Make the latest version visible */
smp_rmb();
flags = src->flags;
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version); } while ((src->version & 1) || version != src->version);
return flags & valid_flags; return flags & valid_flags;
...@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) ...@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
do { do {
version = __pvclock_read_cycles(src, &ret, &flags); version = __pvclock_read_cycles(src, &ret, &flags);
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version); } while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
......
...@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */ /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline) if (guest_tsc < tsc_deadline)
__delay(tsc_deadline - guest_tsc); __delay(min(tsc_deadline - guest_tsc,
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
} }
static void start_apic_timer(struct kvm_lapic *apic) static void start_apic_timer(struct kvm_lapic *apic)
......
...@@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, ...@@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Checks for #GP/#SS exceptions. */ /* Checks for #GP/#SS exceptions. */
exn = false; exn = false;
if (is_protmode(vcpu)) { if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the /* Protected mode: apply checks for segment validity in the
* following order: * following order:
* - segment type check (#GP(0) may be thrown) * - segment type check (#GP(0) may be thrown)
...@@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, ...@@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
* execute-only code segment * execute-only code segment
*/ */
exn = ((s.type & 0xa) == 8); exn = ((s.type & 0xa) == 8);
}
if (exn) { if (exn) {
kvm_queue_exception_e(vcpu, GP_VECTOR, 0); kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1; return 1;
} }
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is an only check for long mode.
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable. /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/ */
exn = (s.unusable != 0); exn = (s.unusable != 0);
......
...@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); ...@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz; static unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
static u32 adjust_tsc_khz(u32 khz, s32 ppm) static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{ {
u64 v = (u64)khz * (1000000 + ppm); u64 v = (u64)khz * (1000000 + ppm);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H #define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
...@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns; ...@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
extern struct static_key kvm_no_apic_vcpu; extern struct static_key kvm_no_apic_vcpu;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
/* Same "calling convention" as do_div: /* Same "calling convention" as do_div:
* - divide (n << 32) by base * - divide (n << 32) by base
* - put result in n * - put result in n
......
...@@ -18,13 +18,13 @@ ...@@ -18,13 +18,13 @@
#ifndef __ASM_ARM_KVM_PMU_H #ifndef __ASM_ARM_KVM_PMU_H
#define __ASM_ARM_KVM_PMU_H #define __ASM_ARM_KVM_PMU_H
#ifdef CONFIG_KVM_ARM_PMU
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#ifdef CONFIG_KVM_ARM_PMU
struct kvm_pmc { struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */ u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event; struct perf_event *perf_event;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment