Commit 1534b396 authored by James Hogan's avatar James Hogan

KVM: MIPS/MMU: Simplify ASID restoration

KVM T&E uses an ASID for guest kernel mode and an ASID for guest user
mode. The current ASID is saved when the guest is scheduled out, and
restored when scheduling back in, with checks for whether the ASID needs
to be regenerated.

This isn't really necessary as the ASID can be easily determined by the
current guest mode, so lets simplify it to just read the required ASID
from guest_kernel_asid or guest_user_asid even if the ASID hasn't been
regenerated.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent 00104b41
...@@ -318,9 +318,6 @@ struct kvm_vcpu_arch { ...@@ -318,9 +318,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */ /* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr; unsigned long pending_exceptions_clr;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
/* S/W Based TLB for guest */ /* S/W Based TLB for guest */
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
......
...@@ -237,7 +237,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -237,7 +237,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags; unsigned long flags;
int newasid = 0;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
...@@ -250,7 +249,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -250,7 +249,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu]; vcpu->arch.guest_kernel_mm.context.asid[cpu];
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
...@@ -263,7 +261,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -263,7 +261,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu]; vcpu->arch.guest_user_mm.context.asid[cpu];
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
...@@ -282,35 +279,18 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -282,35 +279,18 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_mips_migrate_count(vcpu); kvm_mips_migrate_count(vcpu);
} }
if (!newasid) { /*
/* * If we preempted while the guest was executing, then reload the ASID
* If we preempted while the guest was executing, then reload * based on the mode of the Guest (Kernel/User)
* the pre-empted ASID */
*/ if (current->flags & PF_VCPU) {
if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch. write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] &
preempt_entryhi & asid_mask); asid_mask);
ehb(); else
} write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] &
} else { asid_mask);
/* New ASIDs were allocated for the VM */ ehb();
/*
* Were we in guest context? If so then the pre-empted ASID is
* no longer valid, we need to set it to what it should be based
* on the mode of the Guest (Kernel/User)
*/
if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.
guest_kernel_asid[cpu] &
asid_mask);
else
write_c0_entryhi(vcpu->arch.
guest_user_asid[cpu] &
asid_mask);
ehb();
}
} }
/* restore guest state to registers */ /* restore guest state to registers */
...@@ -329,8 +309,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -329,8 +309,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id(); cpu = smp_processor_id();
vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu; vcpu->arch.last_sched_cpu = cpu;
/* save guest state in registers */ /* save guest state in registers */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment