Commit 9f9a1ce3 authored by Andy Honig's avatar Andy Honig Committed by Jiri Slaby

KVM: x86: Prevent host from panicking on shared MSR writes.

commit 8b3c3104 upstream.

The previous patch blocked invalid writes directly when the MSR
is written.  As a precaution, prevent future similar mistakes by
gracefulling handle GPs caused by writes to shared MSRs.
Signed-off-by: default avatarAndrew Honig <ahonig@google.com>
[Remove parts obsoleted by Nadav's patch. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 24854135
...@@ -1032,7 +1032,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); ...@@ -1032,7 +1032,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_define_shared_msr(unsigned index, u32 msr); void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
......
...@@ -2540,12 +2540,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2540,12 +2540,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
u64 old_msr_data = msr->data;
msr->data = data; msr->data = data;
if (msr - vmx->guest_msrs < vmx->save_nmsrs) { if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
preempt_disable(); preempt_disable();
kvm_set_shared_msr(msr->index, msr->data, ret = kvm_set_shared_msr(msr->index, msr->data,
msr->mask); msr->mask);
preempt_enable(); preempt_enable();
if (ret)
msr->data = old_msr_data;
} }
break; break;
} }
......
...@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void) ...@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void)
shared_msr_update(i, shared_msrs_global.msrs[i]); shared_msr_update(i, shared_msrs_global.msrs[i]);
} }
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
int err;
if (((value ^ smsr->values[slot].curr) & mask) == 0) if (((value ^ smsr->values[slot].curr) & mask) == 0)
return; return 0;
smsr->values[slot].curr = value; smsr->values[slot].curr = value;
wrmsrl(shared_msrs_global.msrs[slot], value); err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
if (err)
return 1;
if (!smsr->registered) { if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return; smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn); user_return_notifier_register(&smsr->urn);
smsr->registered = true; smsr->registered = true;
} }
return 0;
} }
EXPORT_SYMBOL_GPL(kvm_set_shared_msr); EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment