Commit 259720c3 authored by Sean Christopherson's avatar Sean Christopherson

KVM: VMX: Combine "check" and "get" APIs for passthrough MSR lookups

Combine possible_passthrough_msr_slot() and is_valid_passthrough_msr()
into a single function, vmx_get_passthrough_msr_slot(), and have the
combined helper return the slot on success, using a negative value to
indicate "failure".

Combining the operations avoids iterating over the array of passthrough
MSRs twice for relevant MSRs.
Suggested-by: default avatarDongli Zhang <dongli.zhang@oracle.com>
Reviewed-by: default avatarDongli Zhang <dongli.zhang@oracle.com>
Link: https://lore.kernel.org/r/20240223202104.3330974-4-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent bab22040
...@@ -658,25 +658,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) ...@@ -658,25 +658,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
return flexpriority_enabled && lapic_in_kernel(vcpu); return flexpriority_enabled && lapic_in_kernel(vcpu);
} }
static int possible_passthrough_msr_slot(u32 msr) static int vmx_get_passthrough_msr_slot(u32 msr)
{ {
u32 i; int i;
for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
if (vmx_possible_passthrough_msrs[i] == msr)
return i;
return -ENOENT;
}
static bool is_valid_passthrough_msr(u32 msr)
{
bool r;
switch (msr) { switch (msr) {
case 0x800 ... 0x8ff: case 0x800 ... 0x8ff:
/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */ /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
return true; return -ENOENT;
case MSR_IA32_RTIT_STATUS: case MSR_IA32_RTIT_STATUS:
case MSR_IA32_RTIT_OUTPUT_BASE: case MSR_IA32_RTIT_OUTPUT_BASE:
case MSR_IA32_RTIT_OUTPUT_MASK: case MSR_IA32_RTIT_OUTPUT_MASK:
...@@ -691,14 +680,16 @@ static bool is_valid_passthrough_msr(u32 msr) ...@@ -691,14 +680,16 @@ static bool is_valid_passthrough_msr(u32 msr)
case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
return true; return -ENOENT;
} }
r = possible_passthrough_msr_slot(msr) != -ENOENT; for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
if (vmx_possible_passthrough_msrs[i] == msr)
WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr); return i;
}
return r; WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
return -ENOENT;
} }
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
...@@ -3954,6 +3945,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) ...@@ -3954,6 +3945,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
int idx;
if (!cpu_has_vmx_msr_bitmap()) if (!cpu_has_vmx_msr_bitmap())
return; return;
...@@ -3963,16 +3955,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) ...@@ -3963,16 +3955,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
/* /*
* Mark the desired intercept state in shadow bitmap, this is needed * Mark the desired intercept state in shadow bitmap, this is needed
* for resync when the MSR filters change. * for resync when the MSR filters change.
*/ */
if (is_valid_passthrough_msr(msr)) { idx = vmx_get_passthrough_msr_slot(msr);
int idx = possible_passthrough_msr_slot(msr); if (idx >= 0) {
if (type & MSR_TYPE_R)
if (idx != -ENOENT) { clear_bit(idx, vmx->shadow_msr_intercept.read);
if (type & MSR_TYPE_R) if (type & MSR_TYPE_W)
clear_bit(idx, vmx->shadow_msr_intercept.read); clear_bit(idx, vmx->shadow_msr_intercept.write);
if (type & MSR_TYPE_W)
clear_bit(idx, vmx->shadow_msr_intercept.write);
}
} }
if ((type & MSR_TYPE_R) && if ((type & MSR_TYPE_R) &&
...@@ -3998,6 +3987,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) ...@@ -3998,6 +3987,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
int idx;
if (!cpu_has_vmx_msr_bitmap()) if (!cpu_has_vmx_msr_bitmap())
return; return;
...@@ -4007,16 +3997,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) ...@@ -4007,16 +3997,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
/* /*
* Mark the desired intercept state in shadow bitmap, this is needed * Mark the desired intercept state in shadow bitmap, this is needed
* for resync when the MSR filter changes. * for resync when the MSR filter changes.
*/ */
if (is_valid_passthrough_msr(msr)) { idx = vmx_get_passthrough_msr_slot(msr);
int idx = possible_passthrough_msr_slot(msr); if (idx >= 0) {
if (type & MSR_TYPE_R)
if (idx != -ENOENT) { set_bit(idx, vmx->shadow_msr_intercept.read);
if (type & MSR_TYPE_R) if (type & MSR_TYPE_W)
set_bit(idx, vmx->shadow_msr_intercept.read); set_bit(idx, vmx->shadow_msr_intercept.write);
if (type & MSR_TYPE_W)
set_bit(idx, vmx->shadow_msr_intercept.write);
}
} }
if (type & MSR_TYPE_R) if (type & MSR_TYPE_R)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment