Commit b13216cf authored by Christoffer Dall's avatar Christoffer Dall

KVM: arm/arm64: Provide functionality to pause and resume a guest

For some rare corner cases in our VGIC emulation later we have to stop
the guest to make sure the VGIC state is consistent.
Provide the necessary framework to pause and resume a guest.
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarAndre Przywara <andre.przywara@arm.com>
parent 2defaff4
......@@ -41,6 +41,8 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#define KVM_REQ_VCPU_EXIT 8
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
......@@ -225,6 +227,8 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
......
......@@ -492,30 +492,31 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return vgic_initialized(kvm);
}
static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
static void kvm_arm_halt_guest(struct kvm *kvm)
void kvm_arm_halt_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true;
force_vm_exit(cpu_all_mask);
kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
}
static void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false;
swake_up(wq);
}
static void kvm_arm_resume_guest(struct kvm *kvm)
void kvm_arm_resume_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false;
swake_up(wq);
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arm_resume_vcpu(vcpu);
}
static void vcpu_sleep(struct kvm_vcpu *vcpu)
......
......@@ -44,6 +44,8 @@
#define KVM_VCPU_MAX_FEATURES 4
#define KVM_REQ_VCPU_EXIT 8
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
......@@ -325,6 +327,8 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment