Commit f2e10669 authored by chai wen's avatar chai wen Committed by Gleb Natapov

KVM: Drop FOLL_GET in GUP when doing async page fault

Page pinning is not mandatory in kvm async page fault processing since
after async page fault event is delivered to a guest it accesses page once
again and does its own GUP.  Drop the FOLL_GET flag in GUP in async_pf
code, and do some simplifying in check/clear processing.
Suggested-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarGu zheng <guz.fnst@cn.fujitsu.com>
Signed-off-by: default avatarchai wen <chaiw.fnst@cn.fujitsu.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent a7efdf6b
...@@ -7298,7 +7298,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -7298,7 +7298,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
int r; int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
is_error_page(work->page)) work->wakeup_all)
return; return;
r = kvm_mmu_reload(vcpu); r = kvm_mmu_reload(vcpu);
...@@ -7408,7 +7408,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -7408,7 +7408,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct x86_exception fault; struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva); trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page)) if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */ work->arch.token = ~0; /* broadcast wakeup */
else else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn); kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
......
...@@ -189,7 +189,7 @@ struct kvm_async_pf { ...@@ -189,7 +189,7 @@ struct kvm_async_pf {
gva_t gva; gva_t gva;
unsigned long addr; unsigned long addr;
struct kvm_arch_async_pf arch; struct kvm_arch_async_pf arch;
struct page *page; bool wakeup_all;
}; };
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
......
...@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, ...@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TRACE_EVENT( TRACE_EVENT(
kvm_async_pf_completed, kvm_async_pf_completed,
TP_PROTO(unsigned long address, struct page *page, u64 gva), TP_PROTO(unsigned long address, u64 gva),
TP_ARGS(address, page, gva), TP_ARGS(address, gva),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, address) __field(unsigned long, address)
__field(pfn_t, pfn)
__field(u64, gva) __field(u64, gva)
), ),
TP_fast_assign( TP_fast_assign(
__entry->address = address; __entry->address = address;
__entry->pfn = page ? page_to_pfn(page) : 0;
__entry->gva = gva; __entry->gva = gva;
), ),
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, TP_printk("gva %#llx address %#lx", __entry->gva,
__entry->address, __entry->pfn) __entry->address)
); );
#endif #endif
......
...@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
static void async_pf_execute(struct work_struct *work) static void async_pf_execute(struct work_struct *work)
{ {
struct page *page = NULL;
struct kvm_async_pf *apf = struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work); container_of(work, struct kvm_async_pf, work);
struct mm_struct *mm = apf->mm; struct mm_struct *mm = apf->mm;
...@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work) ...@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
use_mm(mm); use_mm(mm);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
unuse_mm(mm); unuse_mm(mm);
spin_lock(&vcpu->async_pf.lock); spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done); list_add_tail(&apf->link, &vcpu->async_pf.done);
apf->page = page;
spin_unlock(&vcpu->async_pf.lock); spin_unlock(&vcpu->async_pf.lock);
/* /*
...@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work) ...@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
* this point * this point
*/ */
trace_kvm_async_pf_completed(addr, page, gva); trace_kvm_async_pf_completed(addr, gva);
if (waitqueue_active(&vcpu->wq)) if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
...@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) ...@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_entry(vcpu->async_pf.done.next, list_entry(vcpu->async_pf.done.next,
typeof(*work), link); typeof(*work), link);
list_del(&work->link); list_del(&work->link);
if (!is_error_page(work->page))
kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work); kmem_cache_free(async_pf_cache, work);
} }
spin_unlock(&vcpu->async_pf.lock); spin_unlock(&vcpu->async_pf.lock);
...@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) ...@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->link); list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock); spin_unlock(&vcpu->async_pf.lock);
if (work->page) kvm_arch_async_page_ready(vcpu, work);
kvm_arch_async_page_ready(vcpu, work);
kvm_arch_async_page_present(vcpu, work); kvm_arch_async_page_present(vcpu, work);
list_del(&work->queue); list_del(&work->queue);
vcpu->async_pf.queued--; vcpu->async_pf.queued--;
if (!is_error_page(work->page))
kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work); kmem_cache_free(async_pf_cache, work);
} }
} }
...@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, ...@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
if (!work) if (!work)
return 0; return 0;
work->page = NULL; work->wakeup_all = false;
work->vcpu = vcpu; work->vcpu = vcpu;
work->gva = gva; work->gva = gva;
work->addr = gfn_to_hva(vcpu->kvm, gfn); work->addr = gfn_to_hva(vcpu->kvm, gfn);
...@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) ...@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
work->page = KVM_ERR_PTR_BAD_PAGE; work->wakeup_all = true;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */ INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock); spin_lock(&vcpu->async_pf.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment