Commit 9cc39a5a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Export the number of uret MSRs to vendor modules

Split out and export the number of configured user return MSRs so that
VMX can iterate over the set of MSRs without having to do its own tracking.
Keep the list itself internal to x86 so that vendor code still has to go
through the "official" APIs to add/modify entries.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210504171734.1434054-13-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5e17c624
...@@ -1418,6 +1418,7 @@ struct kvm_arch_async_pf { ...@@ -1418,6 +1418,7 @@ struct kvm_arch_async_pf {
bool direct_map; bool direct_map;
}; };
extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer; extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr; extern bool __read_mostly allow_smaller_maxphyaddr;
extern struct kvm_x86_ops kvm_x86_ops; extern struct kvm_x86_ops kvm_x86_ops;
......
...@@ -184,11 +184,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); ...@@ -184,11 +184,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
*/ */
#define KVM_MAX_NR_USER_RETURN_MSRS 16 #define KVM_MAX_NR_USER_RETURN_MSRS 16
struct kvm_user_return_msrs_global {
int nr;
u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
};
struct kvm_user_return_msrs { struct kvm_user_return_msrs {
struct user_return_notifier urn; struct user_return_notifier urn;
bool registered; bool registered;
...@@ -198,7 +193,9 @@ struct kvm_user_return_msrs { ...@@ -198,7 +193,9 @@ struct kvm_user_return_msrs {
} values[KVM_MAX_NR_USER_RETURN_MSRS]; } values[KVM_MAX_NR_USER_RETURN_MSRS];
}; };
static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global; u32 __read_mostly kvm_nr_uret_msrs;
EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs; static struct kvm_user_return_msrs __percpu *user_return_msrs;
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
...@@ -330,10 +327,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn) ...@@ -330,10 +327,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
user_return_notifier_unregister(urn); user_return_notifier_unregister(urn);
} }
local_irq_restore(flags); local_irq_restore(flags);
for (slot = 0; slot < user_return_msrs_global.nr; ++slot) { for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot]; values = &msrs->values[slot];
if (values->host != values->curr) { if (values->host != values->curr) {
wrmsrl(user_return_msrs_global.msrs[slot], values->host); wrmsrl(kvm_uret_msrs_list[slot], values->host);
values->curr = values->host; values->curr = values->host;
} }
} }
...@@ -358,9 +355,9 @@ EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr); ...@@ -358,9 +355,9 @@ EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
void kvm_define_user_return_msr(unsigned slot, u32 msr) void kvm_define_user_return_msr(unsigned slot, u32 msr)
{ {
BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS); BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
user_return_msrs_global.msrs[slot] = msr; kvm_uret_msrs_list[slot] = msr;
if (slot >= user_return_msrs_global.nr) if (slot >= kvm_nr_uret_msrs)
user_return_msrs_global.nr = slot + 1; kvm_nr_uret_msrs = slot + 1;
} }
EXPORT_SYMBOL_GPL(kvm_define_user_return_msr); EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
...@@ -368,8 +365,8 @@ int kvm_find_user_return_msr(u32 msr) ...@@ -368,8 +365,8 @@ int kvm_find_user_return_msr(u32 msr)
{ {
int i; int i;
for (i = 0; i < user_return_msrs_global.nr; ++i) { for (i = 0; i < kvm_nr_uret_msrs; ++i) {
if (user_return_msrs_global.msrs[i] == msr) if (kvm_uret_msrs_list[i] == msr)
return i; return i;
} }
return -1; return -1;
...@@ -383,8 +380,8 @@ static void kvm_user_return_msr_cpu_online(void) ...@@ -383,8 +380,8 @@ static void kvm_user_return_msr_cpu_online(void)
u64 value; u64 value;
int i; int i;
for (i = 0; i < user_return_msrs_global.nr; ++i) { for (i = 0; i < kvm_nr_uret_msrs; ++i) {
rdmsrl_safe(user_return_msrs_global.msrs[i], &value); rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value; msrs->values[i].host = value;
msrs->values[i].curr = value; msrs->values[i].curr = value;
} }
...@@ -399,7 +396,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) ...@@ -399,7 +396,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
value = (value & mask) | (msrs->values[slot].host & ~mask); value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr) if (value == msrs->values[slot].curr)
return 0; return 0;
err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value); err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
if (err) if (err)
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment