Commit a2c046e4 authored by James Hogan's avatar James Hogan

KVM: MIPS: Add vcpu_run() & vcpu_reenter() callbacks

Add implementation callbacks for entering the guest (vcpu_run()) and
reentering the guest (vcpu_reenter()), allowing implementation specific
operations to be performed before entering the guest or after returning
to the host without cluttering kvm_arch_vcpu_ioctl_run().

This allows the T&E specific lazy user GVA flush to be moved into
trap_emul.c, along with disabling of the HTW. We also move
kvm_mips_deliver_interrupts() as VZ will need to restore the guest timer
state prior to delivering interrupts.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent c550d539
...@@ -539,6 +539,8 @@ struct kvm_mips_callbacks { ...@@ -539,6 +539,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 v); const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
}; };
extern struct kvm_mips_callbacks *kvm_mips_callbacks; extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
......
...@@ -410,32 +410,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -410,32 +410,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
/* Must be called with preemption disabled, just before entering guest */
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
struct mips_coproc *cop0 = vcpu->arch.cop0;
int i, cpu = smp_processor_id();
unsigned int gasid;
/*
* Lazy host ASID regeneration for guest user mode.
* If the guest ASID has changed since the last guest usermode
* execution, regenerate the host ASID so as to invalidate stale TLB
* entries.
*/
if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
if (gasid != vcpu->arch.last_user_gasid) {
kvm_get_new_mmu_context(user_mm, cpu, vcpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, user_mm) = 0;
vcpu->arch.last_user_gasid = gasid;
}
}
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
int r = 0; int r = 0;
...@@ -453,25 +427,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -453,25 +427,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
lose_fpu(1); lose_fpu(1);
local_irq_disable(); local_irq_disable();
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
guest_enter_irqoff(); guest_enter_irqoff();
/* Disable hardware page table walking while in guest */
htw_stop();
trace_kvm_enter(vcpu); trace_kvm_enter(vcpu);
kvm_mips_check_asids(vcpu); r = kvm_mips_callbacks->vcpu_run(run, vcpu);
r = vcpu->arch.vcpu_run(run, vcpu);
trace_kvm_out(vcpu); trace_kvm_out(vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
guest_exit_irqoff(); guest_exit_irqoff();
local_irq_enable(); local_irq_enable();
...@@ -1570,7 +1531,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1570,7 +1531,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (ret == RESUME_GUEST) { if (ret == RESUME_GUEST) {
trace_kvm_reenter(vcpu); trace_kvm_reenter(vcpu);
kvm_mips_check_asids(vcpu); kvm_mips_callbacks->vcpu_reenter(run, vcpu);
/* /*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
......
...@@ -692,6 +692,52 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) ...@@ -692,6 +692,52 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
return 0; return 0;
} }
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
struct mips_coproc *cop0 = vcpu->arch.cop0;
int i, cpu = smp_processor_id();
unsigned int gasid;
/*
* Lazy host ASID regeneration for guest user mode.
* If the guest ASID has changed since the last guest usermode
* execution, regenerate the host ASID so as to invalidate stale TLB
* entries.
*/
if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
if (gasid != vcpu->arch.last_user_gasid) {
kvm_get_new_mmu_context(user_mm, cpu, vcpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, user_mm) = 0;
vcpu->arch.last_user_gasid = gasid;
}
}
}
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
kvm_trap_emul_vcpu_reenter(run, vcpu);
/* Disable hardware page table walking while in guest */
htw_stop();
r = vcpu->arch.vcpu_run(run, vcpu);
htw_start();
return r;
}
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */ /* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
...@@ -724,6 +770,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { ...@@ -724,6 +770,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.set_one_reg = kvm_trap_emul_set_one_reg, .set_one_reg = kvm_trap_emul_set_one_reg,
.vcpu_load = kvm_trap_emul_vcpu_load, .vcpu_load = kvm_trap_emul_vcpu_load,
.vcpu_put = kvm_trap_emul_vcpu_put, .vcpu_put = kvm_trap_emul_vcpu_put,
.vcpu_run = kvm_trap_emul_vcpu_run,
.vcpu_reenter = kvm_trap_emul_vcpu_reenter,
}; };
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment