Commit cc5705fb authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid

With the transition to kvm_arch_vcpu_run_pid_change() to handle
the "run once" activities, it becomes obvious that has_run_once
is now an exact shadow of vcpu->pid.

Replace vcpu->arch.has_run_once with a new vcpu_has_run_once()
helper that directly checks for vcpu->pid, and get rid of the
now unused field.
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent b5aa368a
...@@ -367,9 +367,6 @@ struct kvm_vcpu_arch { ...@@ -367,9 +367,6 @@ struct kvm_vcpu_arch {
int target; int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
bool has_run_once;
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2; u64 vsesr_el2;
...@@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \ #define kvm_call_hyp_nvhe(f, ...) \
({ \ ({ \
......
...@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use); static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
...@@ -609,7 +609,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) ...@@ -609,7 +609,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret) if (ret)
return ret; return ret;
if (likely(vcpu->arch.has_run_once)) if (likely(vcpu_has_run_once(vcpu)))
return 0; return 0;
kvm_arm_vcpu_init_debug(vcpu); kvm_arm_vcpu_init_debug(vcpu);
...@@ -640,8 +640,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) ...@@ -640,8 +640,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static_branch_inc(&userspace_irqchip_in_use); static_branch_inc(&userspace_irqchip_in_use);
} }
vcpu->arch.has_run_once = true;
/* /*
* Initialize traps for protected VMs. * Initialize traps for protected VMs.
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
...@@ -1132,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, ...@@ -1132,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not* * need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC. * imply CTR_EL0.DIC.
*/ */
if (vcpu->arch.has_run_once) { if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm); stage2_unmap_vm(vcpu->kvm);
else else
......
...@@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
return ret; return ret;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
} }
ret = 0; ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment