Commit ddd259c9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Drop kvm_arch_vcpu_init() and kvm_arch_vcpu_uninit()

Remove kvm_arch_vcpu_init() and kvm_arch_vcpu_uninit() now that all
arch specific implementations are nops.
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 19bcc89e
...@@ -363,7 +363,6 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); ...@@ -363,7 +363,6 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline bool kvm_arch_requires_vhe(void) { return false; } static inline bool kvm_arch_requires_vhe(void) { return false; }
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
......
...@@ -54,7 +54,6 @@ int kvm_arm_init_sve(void); ...@@ -54,7 +54,6 @@ int kvm_arm_init_sve(void);
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
......
...@@ -204,11 +204,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) ...@@ -204,11 +204,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
return true; return true;
} }
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
kfree(vcpu->arch.sve_state); kfree(vcpu->arch.sve_state);
......
...@@ -1230,16 +1230,6 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) ...@@ -1230,16 +1230,6 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
return kvm_mips_count_timeout(vcpu); return kvm_mips_count_timeout(vcpu);
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr) struct kvm_translation *tr)
{ {
......
...@@ -801,16 +801,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -801,16 +801,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu); return kvmppc_core_pending_dec(vcpu);
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -914,7 +914,6 @@ extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); ...@@ -914,7 +914,6 @@ extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
......
...@@ -2700,11 +2700,6 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2700,11 +2700,6 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS; return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */ /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{ {
......
...@@ -9575,16 +9575,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) ...@@ -9575,16 +9575,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
struct static_key kvm_no_apic_vcpu __read_mostly; struct static_key kvm_no_apic_vcpu __read_mostly;
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
......
...@@ -864,9 +864,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); ...@@ -864,9 +864,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
int kvm_arch_init(void *opaque); int kvm_arch_init(void *opaque);
void kvm_arch_exit(void); void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
......
...@@ -360,11 +360,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) ...@@ -360,11 +360,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
preempt_enable(); preempt_enable();
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
int *last_ran; int *last_ran;
......
...@@ -324,7 +324,6 @@ void kvm_reload_remote_mmus(struct kvm *kvm) ...@@ -324,7 +324,6 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{ {
struct page *page; struct page *page;
int r;
mutex_init(&vcpu->mutex); mutex_init(&vcpu->mutex);
vcpu->cpu = -1; vcpu->cpu = -1;
...@@ -338,10 +337,8 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) ...@@ -338,10 +337,8 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
page = alloc_page(GFP_KERNEL | __GFP_ZERO); page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) { if (!page)
r = -ENOMEM; return -ENOMEM;
goto fail;
}
vcpu->run = page_address(page); vcpu->run = page_address(page);
kvm_vcpu_set_in_spin_loop(vcpu, false); kvm_vcpu_set_in_spin_loop(vcpu, false);
...@@ -350,15 +347,7 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) ...@@ -350,15 +347,7 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->ready = false; vcpu->ready = false;
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
goto fail_free_run;
return 0; return 0;
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
return r;
} }
static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
...@@ -369,7 +358,6 @@ static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -369,7 +358,6 @@ static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
* descriptors are already gone. * descriptors are already gone.
*/ */
put_pid(rcu_dereference_protected(vcpu->pid, 1)); put_pid(rcu_dereference_protected(vcpu->pid, 1));
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run); free_page((unsigned long)vcpu->run);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment