Commit 5d766508 authored by Wei Wang's avatar Wei Wang Committed by Paolo Bonzini

KVM: x86/pmu: Add kvm_pmu_call() to simplify static calls of kvm_pmu_ops

Similar to kvm_x86_call(), kvm_pmu_call() is added to streamline the usage
of static calls of kvm_pmu_ops, which improves code readability.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarWei Wang <wei.w.wang@intel.com>
Link: https://lore.kernel.org/r/20240507133103.15052-4-wei.w.wang@intel.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 89604647
...@@ -1875,6 +1875,7 @@ extern bool __read_mostly enable_apicv; ...@@ -1875,6 +1875,7 @@ extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops; extern struct kvm_x86_ops kvm_x86_ops;
#define kvm_x86_call(func) static_call(kvm_x86_##func) #define kvm_x86_call(func) static_call(kvm_x86_##func)
#define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
#define KVM_X86_OP(func) \ #define KVM_X86_OP(func) \
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
......
...@@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx) ...@@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
if (!kvm_pmu_ops.check_rdpmc_early) if (!kvm_pmu_ops.check_rdpmc_early)
return 0; return 0;
return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx); return kvm_pmu_call(check_rdpmc_early)(vcpu, idx);
} }
bool is_vmware_backdoor_pmc(u32 pmc_idx) bool is_vmware_backdoor_pmc(u32 pmc_idx)
...@@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) ...@@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (is_vmware_backdoor_pmc(idx)) if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data); return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask); pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
if (!pmc) if (!pmc)
return 1; return 1;
...@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) ...@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{ {
if (lapic_in_kernel(vcpu)) { if (lapic_in_kernel(vcpu)) {
static_call(kvm_x86_pmu_deliver_pmi)(vcpu); kvm_pmu_call(deliver_pmi)(vcpu);
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
} }
} }
...@@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) ...@@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
default: default:
break; break;
} }
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) || return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) ||
static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr); kvm_pmu_call(is_valid_msr)(vcpu, msr);
} }
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr); struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr);
if (pmc) if (pmc)
__set_bit(pmc->idx, pmu->pmc_in_use); __set_bit(pmc->idx, pmu->pmc_in_use);
...@@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0; msr_info->data = 0;
break; break;
default: default:
return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info); return kvm_pmu_call(get_msr)(vcpu, msr_info);
} }
return 0; return 0;
...@@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
default: default:
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info); return kvm_pmu_call(set_msr)(vcpu, msr_info);
} }
return 0; return 0;
...@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu) ...@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
static_call(kvm_x86_pmu_reset)(vcpu); kvm_pmu_call(reset)(vcpu);
} }
...@@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
if (!vcpu->kvm->arch.enable_pmu) if (!vcpu->kvm->arch.enable_pmu)
return; return;
static_call(kvm_x86_pmu_refresh)(vcpu); kvm_pmu_call(refresh)(vcpu);
/* /*
* At RESET, both Intel and AMD CPUs set all enable bits for general * At RESET, both Intel and AMD CPUs set all enable bits for general
...@@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu) ...@@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
memset(pmu, 0, sizeof(*pmu)); memset(pmu, 0, sizeof(*pmu));
static_call(kvm_x86_pmu_init)(vcpu); kvm_pmu_call(init)(vcpu);
kvm_pmu_refresh(vcpu); kvm_pmu_refresh(vcpu);
} }
...@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) ...@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmc_stop_counter(pmc); pmc_stop_counter(pmc);
} }
static_call(kvm_x86_pmu_cleanup)(vcpu); kvm_pmu_call(cleanup)(vcpu);
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment