Commit a8eeb04a authored by Avi Kivity's avatar Avi Kivity

KVM: Add mini-API for vcpu->requests

Makes it a little more readable and hackable.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 36633f32
...@@ -534,7 +534,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write) ...@@ -534,7 +534,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_vcpu *vcpu = apic->vcpu; struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
run->tpr_access.rip = kvm_rip_read(vcpu); run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write; run->tpr_access.is_write = write;
} }
......
...@@ -1378,7 +1378,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1378,7 +1378,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
mmu_page_add_parent_pte(vcpu, sp, parent_pte); mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) { if (sp->unsync_children) {
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_mmu_mark_parents_unsync(sp); kvm_mmu_mark_parents_unsync(sp);
} else if (sp->unsync) } else if (sp->unsync)
kvm_mmu_mark_parents_unsync(sp); kvm_mmu_mark_parents_unsync(sp);
...@@ -2131,7 +2131,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) ...@@ -2131,7 +2131,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
int ret = 0; int ret = 0;
if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
ret = 1; ret = 1;
} }
...@@ -2329,7 +2329,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu) ...@@ -2329,7 +2329,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{ {
++vcpu->stat.tlb_flush; ++vcpu->stat.tlb_flush;
set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
static void paging_new_cr3(struct kvm_vcpu *vcpu) static void paging_new_cr3(struct kvm_vcpu *vcpu)
......
...@@ -1494,7 +1494,7 @@ static void svm_handle_mce(struct vcpu_svm *svm) ...@@ -1494,7 +1494,7 @@ static void svm_handle_mce(struct vcpu_svm *svm)
*/ */
pr_err("KVM: Guest triggered AMD Erratum 383\n"); pr_err("KVM: Guest triggered AMD Erratum 383\n");
set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests); kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
return; return;
} }
......
...@@ -32,7 +32,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) ...@@ -32,7 +32,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
if (ktimer->reinject || !atomic_read(&ktimer->pending)) { if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending); atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */ /* FIXME: this code should not know anything about vcpus */
set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
} }
if (waitqueue_active(q)) if (waitqueue_active(q))
......
...@@ -899,7 +899,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -899,7 +899,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
unsigned long sysenter_esp; unsigned long sysenter_esp;
kvm_migrate_timers(vcpu); kvm_migrate_timers(vcpu);
set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable(); local_irq_disable();
list_add(&vmx->local_vcpus_link, list_add(&vmx->local_vcpus_link,
&per_cpu(vcpus_on_cpu, cpu)); &per_cpu(vcpus_on_cpu, cpu));
......
...@@ -296,7 +296,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, ...@@ -296,7 +296,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
prev_nr = vcpu->arch.exception.nr; prev_nr = vcpu->arch.exception.nr;
if (prev_nr == DF_VECTOR) { if (prev_nr == DF_VECTOR) {
/* triple fault -> shutdown */ /* triple fault -> shutdown */
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return; return;
} }
class1 = exception_class(prev_nr); class1 = exception_class(prev_nr);
...@@ -948,7 +948,7 @@ static int kvm_request_guest_time_update(struct kvm_vcpu *v) ...@@ -948,7 +948,7 @@ static int kvm_request_guest_time_update(struct kvm_vcpu *v)
if (!vcpu->time_page) if (!vcpu->time_page)
return 0; return 0;
set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
return 1; return 1;
} }
...@@ -2253,7 +2253,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, ...@@ -2253,7 +2253,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
printk(KERN_DEBUG "kvm: set_mce: " printk(KERN_DEBUG "kvm: set_mce: "
"injects mce exception while " "injects mce exception while "
"previous one is in progress!\n"); "previous one is in progress!\n");
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0; return 0;
} }
if (banks[1] & MCI_STATUS_VAL) if (banks[1] & MCI_STATUS_VAL)
...@@ -4617,7 +4617,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -4617,7 +4617,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->run->request_interrupt_window; vcpu->run->request_interrupt_window;
if (vcpu->requests) if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
r = kvm_mmu_reload(vcpu); r = kvm_mmu_reload(vcpu);
...@@ -4625,26 +4625,25 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -4625,26 +4625,25 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out; goto out;
if (vcpu->requests) { if (vcpu->requests) {
if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu); __kvm_migrate_timers(vcpu);
if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests)) if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
kvm_write_guest_time(vcpu); kvm_write_guest_time(vcpu);
if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_x86_ops->tlb_flush(vcpu); kvm_x86_ops->tlb_flush(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
&vcpu->requests)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0; r = 0;
goto out; goto out;
} }
if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0; r = 0;
goto out; goto out;
} }
if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) { if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0; vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu); kvm_x86_ops->fpu_deactivate(vcpu);
} }
...@@ -4773,7 +4772,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4773,7 +4772,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
{ {
switch(vcpu->arch.mp_state) { switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED: case KVM_MP_STATE_HALTED:
...@@ -5255,7 +5254,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -5255,7 +5254,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
vcpu->guest_fpu_loaded = 0; vcpu->guest_fpu_loaded = 0;
fpu_save_init(&vcpu->arch.guest_fpu); fpu_save_init(&vcpu->arch.guest_fpu);
++vcpu->stat.fpu_reload; ++vcpu->stat.fpu_reload;
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
trace_kvm_fpu(0); trace_kvm_fpu(0);
} }
......
...@@ -624,5 +624,20 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, ...@@ -624,5 +624,20 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
#endif #endif
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
set_bit(req, &vcpu->requests);
}
static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
{
return test_and_set_bit(req, &vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
return test_and_clear_bit(req, &vcpu->requests);
}
#endif #endif
...@@ -145,7 +145,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) ...@@ -145,7 +145,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
raw_spin_lock(&kvm->requests_lock); raw_spin_lock(&kvm->requests_lock);
me = smp_processor_id(); me = smp_processor_id();
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests)) if (kvm_make_check_request(req, vcpu))
continue; continue;
cpu = vcpu->cpu; cpu = vcpu->cpu;
if (cpus != NULL && cpu != -1 && cpu != me) if (cpus != NULL && cpu != -1 && cpu != me)
...@@ -1212,7 +1212,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -1212,7 +1212,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_arch_vcpu_runnable(vcpu)) { if (kvm_arch_vcpu_runnable(vcpu)) {
set_bit(KVM_REQ_UNHALT, &vcpu->requests); kvm_make_request(KVM_REQ_UNHALT, vcpu);
break; break;
} }
if (kvm_cpu_has_pending_timer(vcpu)) if (kvm_cpu_has_pending_timer(vcpu))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment