Commit c550d539 authored by James Hogan's avatar James Hogan

KVM: MIPS: Remove duplicated ASIDs from vcpu

The kvm_vcpu_arch structure contains both mm_structs for allocating MMU
contexts (primarily the ASID) but it also copies the resulting ASIDs
into guest_{user,kernel}_asid[] arrays which are referenced from uasm
generated code.

This duplication doesn't seem to serve any purpose, and it gets in the
way of generalising the ASID handling across guest kernel/user modes, so
lets just extract the ASID straight out of the mm_struct on demand, and
in fact there are convenient cpu_context() and cpu_asid() macros for
doing so.

To reduce the verbosity of this code we do also add kern_mm and user_mm
local variables where the kernel and user mm_structs are used.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent 1581ff3d
...@@ -321,9 +321,7 @@ struct kvm_vcpu_arch { ...@@ -321,9 +321,7 @@ struct kvm_vcpu_arch {
/* S/W Based TLB for guest */ /* S/W Based TLB for guest */
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
/* Cached guest kernel/user ASIDs */ /* Guest kernel/user [partial] mm */
u32 guest_user_asid[NR_CPUS];
u32 guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm; struct mm_struct guest_kernel_mm, guest_user_mm;
/* Guest ASID of last user mode execution */ /* Guest ASID of last user mode execution */
......
...@@ -856,6 +856,8 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) ...@@ -856,6 +856,8 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb) struct kvm_mips_tlb *tlb)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
int cpu, i; int cpu, i;
bool user; bool user;
...@@ -879,8 +881,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, ...@@ -879,8 +881,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
if (i == cpu) if (i == cpu)
continue; continue;
if (user) if (user)
vcpu->arch.guest_user_asid[i] = 0; cpu_context(i, user_mm) = 0;
vcpu->arch.guest_kernel_asid[i] = 0; cpu_context(i, kern_mm) = 0;
} }
preempt_enable(); preempt_enable();
...@@ -1056,6 +1058,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1056,6 +1058,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel; u32 rt, rd, sel;
unsigned long curr_pc; unsigned long curr_pc;
...@@ -1178,13 +1181,11 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1178,13 +1181,11 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
*/ */
preempt_disable(); preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, kvm_get_new_mmu_context(kern_mm,
cpu, vcpu); cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
vcpu->arch.guest_kernel_asid[i] = 0; cpu_context(i, kern_mm) = 0;
preempt_enable(); preempt_enable();
} }
kvm_write_c0_guest_entryhi(cop0, kvm_write_c0_guest_entryhi(cop0,
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/log2.h>
#include <asm/msa.h> #include <asm/msa.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uasm.h> #include <asm/uasm.h>
...@@ -286,23 +287,26 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -286,23 +287,26 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
uasm_i_xori(&p, T0, T0, KSU_USER); uasm_i_xori(&p, T0, T0, KSU_USER);
uasm_il_bnez(&p, &r, T0, label_kernel_asid); uasm_il_bnez(&p, &r, T0, label_kernel_asid);
UASM_i_ADDIU(&p, T1, K1, UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); guest_kernel_mm.context.asid));
/* else user */ /* else user */
UASM_i_ADDIU(&p, T1, K1, UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
offsetof(struct kvm_vcpu_arch, guest_user_asid)); guest_user_mm.context.asid));
uasm_l_kernel_asid(&l, p); uasm_l_kernel_asid(&l, p);
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */ /* smp_processor_id */
uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
/* x4 */ /* index the ASID array */
uasm_i_sll(&p, T2, T2, 2); uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
UASM_i_ADDU(&p, T3, T1, T2); UASM_i_ADDU(&p, T3, T1, T2);
uasm_i_lw(&p, K0, 0, T3); UASM_i_LW(&p, K0, 0, T3);
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
/* x sizeof(struct cpuinfo_mips)/4 */ /*
uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); * reuse ASID array offset
* cpuinfo_mips is a multiple of sizeof(long)
*/
uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
uasm_i_mul(&p, T2, T2, T3); uasm_i_mul(&p, T2, T2, T3);
UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
......
...@@ -413,6 +413,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -413,6 +413,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
/* Must be called with preemption disabled, just before entering guest */ /* Must be called with preemption disabled, just before entering guest */
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
int i, cpu = smp_processor_id(); int i, cpu = smp_processor_id();
unsigned int gasid; unsigned int gasid;
...@@ -426,13 +427,10 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) ...@@ -426,13 +427,10 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
if (!KVM_GUEST_KERNEL_MODE(vcpu)) { if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
if (gasid != vcpu->arch.last_user_gasid) { if (gasid != vcpu->arch.last_user_gasid) {
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, kvm_get_new_mmu_context(user_mm, cpu, vcpu);
vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
vcpu->arch.guest_user_asid[cpu] = 0; cpu_context(i, user_mm) = 0;
vcpu->arch.last_user_gasid = gasid; vcpu->arch.last_user_gasid = gasid;
} }
} }
......
...@@ -15,18 +15,18 @@ ...@@ -15,18 +15,18 @@
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
return vcpu->arch.guest_kernel_asid[cpu] & return cpu_asid(cpu, kern_mm);
cpu_asid_mask(&cpu_data[cpu]);
} }
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
return vcpu->arch.guest_user_asid[cpu] & return cpu_asid(cpu, user_mm);
cpu_asid_mask(&cpu_data[cpu]);
} }
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
......
...@@ -38,18 +38,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_instance); ...@@ -38,18 +38,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_instance);
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
return vcpu->arch.guest_kernel_asid[cpu] & return cpu_asid(cpu, kern_mm);
cpu_asid_mask(&cpu_data[cpu]);
} }
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
return vcpu->arch.guest_user_asid[cpu] & return cpu_asid(cpu, user_mm);
cpu_asid_mask(&cpu_data[cpu]);
} }
inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
......
...@@ -635,32 +635,29 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -635,32 +635,29 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
/* Allocate new kernel and user ASIDs if needed */ /* Allocate new kernel and user ASIDs if needed */
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) { asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); kvm_get_new_mmu_context(kern_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n",
cpu, vcpu->arch.guest_kernel_asid[cpu]); cpu, cpu_context(cpu, kern_mm));
} }
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) { asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); kvm_get_new_mmu_context(user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n",
vcpu->arch.guest_user_asid[cpu]); cpu, cpu_context(cpu, user_mm));
} }
/* /*
...@@ -670,11 +667,9 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -670,11 +667,9 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/ */
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu)) if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] & write_c0_entryhi(cpu_asid(cpu, kern_mm));
asid_mask);
else else
write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] & write_c0_entryhi(cpu_asid(cpu, user_mm));
asid_mask);
ehb(); ehb();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment