Commit ccae663c authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM bugfixes from Marcelo Tosatti.

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: use dynamic percpu allocations for shared msrs area
  KVM: PPC: Book3S HV: Fix compilation without CONFIG_PPC_POWERNV
  powerpc: Corrected include header path in kvm_para.h
  Add rcu user eqs exception hooks for async page fault
parents 4ffd4ebf 013f6a5d
...@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared { ...@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) #define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
#include <uapi/asm/epapr_hcalls.h> #include <asm/epapr_hcalls.h>
#define KVM_FEATURE_MAGIC_PAGE 1 #define KVM_FEATURE_MAGIC_PAGE 1
......
...@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu) ...@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{ {
unsigned long srr1 = vcpu->arch.shregs.msr; unsigned long srr1 = vcpu->arch.shregs.msr;
#ifdef CONFIG_PPC_POWERNV
struct opal_machine_check_event *opal_evt; struct opal_machine_check_event *opal_evt;
#endif
long handled = 1; long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) { if (srr1 & SRR1_MC_LDSTERR) {
...@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) ...@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
handled = 0; handled = 0;
} }
#ifdef CONFIG_PPC_POWERNV
/* /*
* See if OPAL has already handled the condition. * See if OPAL has already handled the condition.
* We assume that if the condition is recovered then OPAL * We assume that if the condition is recovered then OPAL
...@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) ...@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
if (handled) if (handled)
opal_evt->in_use = 0; opal_evt->in_use = 0;
#endif
return handled; return handled;
} }
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/kvm_guest.h> #include <asm/kvm_guest.h>
#include <asm/context_tracking.h>
static int kvmapf = 1; static int kvmapf = 1;
...@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token) ...@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
struct kvm_task_sleep_node n, *e; struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
rcu_irq_enter();
spin_lock(&b->lock); spin_lock(&b->lock);
e = _find_apf_task(b, token); e = _find_apf_task(b, token);
if (e) { if (e) {
...@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token) ...@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
hlist_del(&e->link); hlist_del(&e->link);
kfree(e); kfree(e);
spin_unlock(&b->lock); spin_unlock(&b->lock);
rcu_irq_exit();
return; return;
} }
...@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token) ...@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
/* /*
* We cannot reschedule. So halt. * We cannot reschedule. So halt.
*/ */
rcu_irq_exit();
native_safe_halt(); native_safe_halt();
rcu_irq_enter();
local_irq_disable(); local_irq_disable();
} }
} }
if (!n.halted) if (!n.halted)
finish_wait(&n.wq, &wait); finish_wait(&n.wq, &wait);
rcu_irq_exit();
return; return;
} }
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
...@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
break; break;
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */ /* page is swapped out by the host. */
rcu_irq_enter(); exception_enter(regs);
exit_idle(); exit_idle();
kvm_async_pf_task_wait((u32)read_cr2()); kvm_async_pf_task_wait((u32)read_cr2());
rcu_irq_exit(); exception_exit(regs);
break; break;
case KVM_PV_REASON_PAGE_READY: case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter(); rcu_irq_enter();
......
...@@ -120,7 +120,7 @@ struct kvm_shared_msrs { ...@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
}; };
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); static struct kvm_shared_msrs __percpu *shared_msrs;
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_fixed", VCPU_STAT(pf_fixed) },
...@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn) ...@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
static void shared_msr_update(unsigned slot, u32 msr) static void shared_msr_update(unsigned slot, u32 msr)
{ {
struct kvm_shared_msrs *smsr;
u64 value; u64 value;
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
smsr = &__get_cpu_var(shared_msrs);
/* only read, and nobody should modify it at this time, /* only read, and nobody should modify it at this time,
* so don't need lock */ * so don't need lock */
if (slot >= shared_msrs_global.nr) { if (slot >= shared_msrs_global.nr) {
...@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void) ...@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{ {
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (((value ^ smsr->values[slot].curr) & mask) == 0) if (((value ^ smsr->values[slot].curr) & mask) == 0)
return; return;
...@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr); ...@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void *ignore) static void drop_user_return_notifiers(void *ignore)
{ {
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (smsr->registered) if (smsr->registered)
kvm_on_user_return(&smsr->urn); kvm_on_user_return(&smsr->urn);
...@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque) ...@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
goto out; goto out;
} }
r = -ENOMEM;
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
goto out;
}
r = kvm_mmu_module_init(); r = kvm_mmu_module_init();
if (r) if (r)
goto out; goto out_free_percpu;
kvm_set_mmio_spte_mask(); kvm_set_mmio_spte_mask();
kvm_init_msr_list(); kvm_init_msr_list();
...@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque) ...@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
return 0; return 0;
out_free_percpu:
free_percpu(shared_msrs);
out: out:
return r; return r;
} }
...@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void) ...@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
#endif #endif
kvm_x86_ops = NULL; kvm_x86_ops = NULL;
kvm_mmu_module_exit(); kvm_mmu_module_exit();
free_percpu(shared_msrs);
} }
int kvm_emulate_halt(struct kvm_vcpu *vcpu) int kvm_emulate_halt(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment