Commit b93463aa authored by Avi Kivity's avatar Avi Kivity

KVM: Accelerated apic support

This adds a mechanism for exposing the virtual apic tpr to the guest, and a
protocol for letting the guest update the tpr without causing a vmexit if
conditions allow (e.g. there is no interrupt pending with a higher priority
than the new tpr).
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent b209749f
...@@ -815,7 +815,8 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) ...@@ -815,7 +815,8 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
if (!apic) if (!apic)
return; return;
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)); apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (apic_get_reg(apic, APIC_TASKPRI) & 4));
} }
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
...@@ -1104,3 +1105,51 @@ void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) ...@@ -1104,3 +1105,51 @@ void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
} }
EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer); EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
void *vapic;
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
kunmap_atomic(vapic, KM_USER0);
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic;
void *vapic;
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return;
apic = vcpu->arch.apic;
tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
max_irr = apic_find_highest_irr(apic);
if (max_irr < 0)
max_irr = 0;
max_isr = apic_find_highest_isr(apic);
if (max_isr < 0)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
kunmap_atomic(vapic, KM_USER0);
}
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
if (!irqchip_in_kernel(vcpu->kvm))
return;
vcpu->arch.apic->vapic_addr = vapic_addr;
}
...@@ -18,6 +18,8 @@ struct kvm_lapic { ...@@ -18,6 +18,8 @@ struct kvm_lapic {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct page *regs_page; struct page *regs_page;
void *regs; void *regs;
gpa_t vapic_addr;
struct page *vapic_page;
}; };
int kvm_create_lapic(struct kvm_vcpu *vcpu); int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu); void kvm_free_lapic(struct kvm_vcpu *vcpu);
...@@ -41,4 +43,8 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu); ...@@ -41,4 +43,8 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec); void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
#endif #endif
...@@ -1173,6 +1173,19 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1173,6 +1173,19 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0; r = 0;
break; break;
}; };
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm))
goto out;
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
r = 0;
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
default: default:
r = -EINVAL; r = -EINVAL;
} }
...@@ -2214,6 +2227,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ...@@ -2214,6 +2227,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
} }
switch (nr) { switch (nr) {
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
default: default:
ret = -KVM_ENOSYS; ret = -KVM_ENOSYS;
break; break;
...@@ -2421,6 +2437,29 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu, ...@@ -2421,6 +2437,29 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
vcpu->arch.irq_summary == 0); vcpu->arch.irq_summary == 0);
} }
static void vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
vcpu->arch.apic->vapic_page = page;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic || !apic->vapic_addr)
return;
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
}
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
int r; int r;
...@@ -2435,6 +2474,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2435,6 +2474,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
} }
vapic_enter(vcpu);
preempted: preempted:
if (vcpu->guest_debug.enabled) if (vcpu->guest_debug.enabled)
kvm_x86_ops->guest_debug_pre(vcpu); kvm_x86_ops->guest_debug_pre(vcpu);
...@@ -2444,6 +2485,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2444,6 +2485,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(r)) if (unlikely(r))
goto out; goto out;
if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
kvm_inject_pending_timer_irqs(vcpu); kvm_inject_pending_timer_irqs(vcpu);
preempt_disable(); preempt_disable();
...@@ -2469,6 +2518,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2469,6 +2518,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
else else
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
kvm_lapic_sync_to_vapic(vcpu);
vcpu->guest_mode = 1; vcpu->guest_mode = 1;
kvm_guest_enter(); kvm_guest_enter();
...@@ -2506,6 +2557,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2506,6 +2557,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu)) if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(kvm_run, vcpu); r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
if (r > 0) { if (r > 0) {
...@@ -2527,6 +2580,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2527,6 +2580,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
post_kvm_run_save(vcpu, kvm_run); post_kvm_run_save(vcpu, kvm_run);
vapic_exit(vcpu);
return r; return r;
} }
......
...@@ -216,6 +216,11 @@ struct kvm_tpr_access_ctl { ...@@ -216,6 +216,11 @@ struct kvm_tpr_access_ctl {
__u32 reserved[8]; __u32 reserved[8];
}; };
/* for KVM_SET_VAPIC_ADDR */
struct kvm_vapic_addr {
__u64 vapic_addr;
};
#define KVMIO 0xAE #define KVMIO 0xAE
/* /*
...@@ -291,5 +296,7 @@ struct kvm_tpr_access_ctl { ...@@ -291,5 +296,7 @@ struct kvm_tpr_access_ctl {
#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
/* Available with KVM_CAP_VAPIC */ /* Available with KVM_CAP_VAPIC */
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
/* Available with KVM_CAP_VAPIC */
#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
#endif #endif
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
/* Return values for hypercalls */ /* Return values for hypercalls */
#define KVM_ENOSYS 1000 #define KVM_ENOSYS 1000
#define KVM_HC_VAPIC_POLL_IRQ 1
#ifdef __KERNEL__ #ifdef __KERNEL__
/* /*
* hypercalls use architecture specific * hypercalls use architecture specific
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment