Commit 4543bdc0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Introduce kvm_vcpu_destroy()

Add kvm_vcpu_destroy() and wire up all architectures to call the common
function instead of their arch specific implementation.  The common
destruction function will be used by future patches to move allocation
and initialization of vCPUs to common KVM code, i.e. to free resources
that are allocated by arch agnostic code.

No functional change intended.
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d2423b34
...@@ -156,7 +156,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) ...@@ -156,7 +156,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_arch_vcpu_destroy(vcpu); kvm_vcpu_destroy(vcpu);
} }
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
......
...@@ -475,7 +475,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -475,7 +475,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
#endif #endif
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_destroy(vcpu); kvm_vcpu_destroy(vcpu);
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
......
...@@ -2541,7 +2541,7 @@ static void kvm_free_vcpus(struct kvm *kvm) ...@@ -2541,7 +2541,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_destroy(vcpu); kvm_vcpu_destroy(vcpu);
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
......
...@@ -9680,7 +9680,7 @@ static void kvm_free_vcpus(struct kvm *kvm) ...@@ -9680,7 +9680,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_unload_vcpu_mmu(vcpu); kvm_unload_vcpu_mmu(vcpu);
} }
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_destroy(vcpu); kvm_vcpu_destroy(vcpu);
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
......
...@@ -581,6 +581,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) ...@@ -581,6 +581,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu);
......
...@@ -194,7 +194,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -194,7 +194,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) { if (kvm->vcpus[i]) {
kvm_arch_vcpu_destroy(kvm->vcpus[i]); kvm_vcpu_destroy(kvm->vcpus[i]);
kvm->vcpus[i] = NULL; kvm->vcpus[i] = NULL;
} }
} }
......
...@@ -375,6 +375,12 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -375,6 +375,12 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kvm_arch_vcpu_destroy(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment