Commit 3b5d56b9 authored by Eric B Munson's avatar Eric B Munson Committed by Avi Kivity

kvmclock: Add functions to check if the host has stopped the vm

When a host stops or suspends a VM it will set a flag to show this.  The
watchdog will use these functions to determine if a softlockup is real, or the
result of a suspended VM.
Signed-off-by: default avatarEric B Munson <emunson@mgebm.net>
asm-generic changes Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent eae3ee7d
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
......@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif
#endif
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
......@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
return r;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* __KERNEL__ */
#endif /* __POWERPC_KVM_PARA_H__ */
......@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif
#endif /* __S390_KVM_PARA_H */
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
......@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
#ifdef CONFIG_KVM_CLOCK
bool kvm_check_and_clear_guest_paused(void);
#else
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* CONFIG_KVMCLOCK */
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
......
......@@ -22,6 +22,7 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
......@@ -114,6 +115,26 @@ static void kvm_get_preset_lpj(void)
preset_lpj = lpj;
}
bool kvm_check_and_clear_guest_paused(void)
{
bool ret = false;
struct pvclock_vcpu_time_info *src;
/*
* per_cpu() is safe here because this function is only called from
* timer functions where preemption is already disabled.
*/
WARN_ON(!in_atomic());
src = &__get_cpu_var(hv_clock);
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
ret = true;
}
return ret;
}
EXPORT_SYMBOL_GPL(kvm_check_and_clear_guest_paused);
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_get_cycles,
......
#include <asm-generic/kvm_para.h>
#ifndef _ASM_GENERIC_KVM_PARA_H
#define _ASM_GENERIC_KVM_PARA_H
/*
* This function is used by architectures that support kvm to avoid issuing
* false soft lockup messages.
*/
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment