Commit ea4a0ce1 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Marcelo Tosatti.

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
  KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
  KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
  KVM: x86: fix deadlock in clock-in-progress request handling
  KVM: allow host header to be included even for !CONFIG_KVM
parents 10b38669 a2c118bf
...@@ -414,8 +414,8 @@ struct kvm_vcpu_arch { ...@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
gpa_t time; gpa_t time;
struct pvclock_vcpu_time_info hv_clock; struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz; unsigned int hw_tsc_khz;
unsigned int time_offset; struct gfn_to_hva_cache pv_time;
struct page *time_page; bool pv_time_enabled;
/* set guest stopped flag in pvclock flags field */ /* set guest stopped flag in pvclock flags field */
bool pvclock_set_guest_stopped_request; bool pvclock_set_guest_stopped_request;
......
...@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz; unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch; struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
s64 kernel_ns, max_kernel_ns; s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc; u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info *guest_hv_clock; struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags; u8 pvclock_flags;
bool use_master_clock; bool use_master_clock;
kernel_ns = 0; kernel_ns = 0;
host_tsc = 0; host_tsc = 0;
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
/* /*
* If the host uses TSC clock, then passthrough TSC as stable * If the host uses TSC clock, then passthrough TSC as stable
* to the guest. * to the guest.
...@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = ka->master_kernel_ns; kernel_ns = ka->master_kernel_ns;
} }
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) { if (!use_master_clock) {
host_tsc = native_read_tsc(); host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns(); kernel_ns = get_kernel_ns();
...@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags); local_irq_restore(flags);
if (!vcpu->time_page) if (!vcpu->pv_time_enabled)
return 0; return 0;
/* /*
...@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/ */
vcpu->hv_clock.version += 2; vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page); if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
guest_hv_clock = shared_kaddr + vcpu->time_offset; return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) { if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED; pvclock_flags |= PVCLOCK_GUEST_STOPPED;
...@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags; vcpu->hv_clock.flags = pvclock_flags;
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
sizeof(vcpu->hv_clock)); &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0; return 0;
} }
...@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) ...@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvmclock_reset(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.time_page) { vcpu->arch.pv_time_enabled = false;
kvm_release_page_dirty(vcpu->arch.time_page);
vcpu->arch.time_page = NULL;
}
} }
static void accumulate_steal_time(struct kvm_vcpu *vcpu) static void accumulate_steal_time(struct kvm_vcpu *vcpu)
...@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: { case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
kvmclock_reset(vcpu); kvmclock_reset(vcpu);
vcpu->arch.time = data; vcpu->arch.time = data;
...@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1)) if (!(data & 1))
break; break;
/* ...but clean it before doing the actual write */ gpa_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_page = /* Check that the address is 32-byte aligned. */
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
if (is_error_page(vcpu->arch.time_page)) if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
vcpu->arch.time_page = NULL; &vcpu->arch.pv_time, data & ~1ULL))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
break; break;
} }
...@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, ...@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/ */
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->arch.time_page) if (!vcpu->arch.pv_time_enabled)
return -EINVAL; return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true; vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
...@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask; goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.ia32_tsc_adjust_msr = 0x0;
vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu); kvm_pmu_init(vcpu);
......
#ifndef __KVM_HOST_H #ifndef __KVM_HOST_H
#define __KVM_HOST_H #define __KVM_HOST_H
#if IS_ENABLED(CONFIG_KVM)
/* /*
* This work is licensed under the terms of the GNU GPL, version 2. See * This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory. * the COPYING file in the top-level directory.
...@@ -1055,5 +1057,8 @@ static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) ...@@ -1055,5 +1057,8 @@ static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
} }
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
#else
static inline void __guest_enter(void) { return; }
static inline void __guest_exit(void) { return; }
#endif /* IS_ENABLED(CONFIG_KVM) */
#endif #endif
...@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, ...@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
u64 redir_content; u64 redir_content;
ASSERT(redir_index < IOAPIC_NUM_PINS); if (redir_index < IOAPIC_NUM_PINS)
redir_content =
ioapic->redirtbl[redir_index].bits;
else
redir_content = ~0ULL;
redir_content = ioapic->redirtbl[redir_index].bits;
result = (ioapic->ioregsel & 0x1) ? result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff : (redir_content >> 32) & 0xffffffff :
redir_content & 0xffffffff; redir_content & 0xffffffff;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment