Commit 1581ff3d authored by James Hogan's avatar James Hogan

KVM: MIPS/MMU: Move preempt/ASID handling to implementation

The MIPS KVM host and guest GVA ASIDs may need regenerating when
scheduling a process in guest context, which is done from the
kvm_arch_vcpu_load() / kvm_arch_vcpu_put() functions in mmu.c.

However this is a fairly implementation specific detail. VZ for example
may use GuestIDs instead of normal ASIDs to distinguish mappings
belonging to different guests, and even on VZ without GuestID the root
TLB will be used differently to trap & emulate.

Trap & emulate GVA ASIDs only relate to the user part of the full
address space, so can be left active during guest exit handling (guest
context) to allow guest instructions to be easily read and translated.

VZ root ASIDs however are for GPA mappings so can't be left active
during normal kernel code. They also aren't useful for accessing guest
virtual memory, and we should have CP0_BadInstr[P] registers available
to provide encodings of trapping guest instructions anyway.

Therefore move the ASID preemption handling into the implementation
callback.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent a60b8438
...@@ -235,39 +235,12 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) ...@@ -235,39 +235,12 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */ /* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags; unsigned long flags;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
/* Allocate new kernel and user ASIDs if needed */
local_irq_save(flags); local_irq_save(flags);
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
cpu, vcpu->arch.guest_kernel_asid[cpu]);
}
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
vcpu->arch.guest_user_asid[cpu]);
}
if (vcpu->arch.last_sched_cpu != cpu) { if (vcpu->arch.last_sched_cpu != cpu) {
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
...@@ -279,25 +252,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -279,25 +252,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_mips_migrate_count(vcpu); kvm_mips_migrate_count(vcpu);
} }
/*
* If we preempted while the guest was executing, then reload the ASID
* based on the mode of the Guest (Kernel/User)
*/
if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] &
asid_mask);
else
write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] &
asid_mask);
ehb();
}
/* restore guest state to registers */ /* restore guest state to registers */
kvm_mips_callbacks->vcpu_load(vcpu, cpu); kvm_mips_callbacks->vcpu_load(vcpu, cpu);
local_irq_restore(flags); local_irq_restore(flags);
} }
/* ASID can change if another task is scheduled during preemption */ /* ASID can change if another task is scheduled during preemption */
...@@ -314,15 +272,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -314,15 +272,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
/* save guest state in registers */ /* save guest state in registers */
kvm_mips_callbacks->vcpu_put(vcpu, cpu); kvm_mips_callbacks->vcpu_put(vcpu, cpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
cpu_context(cpu, current->mm));
drop_mmu_context(current->mm, cpu);
}
write_c0_entryhi(cpu_asid(cpu, current->mm));
ehb();
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include <asm/mmu_context.h>
#include "interrupt.h" #include "interrupt.h"
...@@ -635,6 +635,49 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -635,6 +635,49 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
/* Allocate new kernel and user ASIDs if needed */
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
cpu, vcpu->arch.guest_kernel_asid[cpu]);
}
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
vcpu->arch.guest_user_asid[cpu]);
}
/*
* Were we in guest context? If so then the pre-empted ASID is
* no longer valid, we need to set it to what it should be based
* on the mode of the Guest (Kernel/User)
*/
if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] &
asid_mask);
else
write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] &
asid_mask);
ehb();
}
return 0; return 0;
} }
...@@ -642,6 +685,15 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) ...@@ -642,6 +685,15 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{ {
kvm_lose_fpu(vcpu); kvm_lose_fpu(vcpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
cpu_context(cpu, current->mm));
drop_mmu_context(current->mm, cpu);
}
write_c0_entryhi(cpu_asid(cpu, current->mm));
ehb();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment