Commit ecbcf030 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Reject attempts to consume or refresh inactive gfn_to_pfn_cache

Reject kvm_gpc_check() and kvm_gpc_refresh() if the cache is inactive.
Not checking the active flag during refresh is particularly egregious, as
KVM can end up with a valid, inactive cache, which can lead to a variety
of use-after-free bugs, e.g. consuming a NULL kernel pointer or missing
an mmu_notifier invalidation due to the cache not being on the list of
gfns to invalidate.

Note, "active" needs to be set if and only if the cache is on the list
of caches, i.e. is reachable via mmu_notifier events.  If a relevant
mmu_notifier event occurs while the cache is "active" but not on the
list, KVM will not acquire the cache's lock and so will not serailize
the mmu_notifier event with active users and/or kvm_gpc_refresh().

A race between KVM_XEN_ATTR_TYPE_SHARED_INFO and KVM_XEN_HVM_EVTCHN_SEND
can be exploited to trigger the bug.

1. Deactivate shinfo cache:

kvm_xen_hvm_set_attr
case KVM_XEN_ATTR_TYPE_SHARED_INFO
 kvm_gpc_deactivate
  kvm_gpc_unmap
   gpc->valid = false
   gpc->khva = NULL
  gpc->active = false

Result: active = false, valid = false

2. Cause cache refresh:

kvm_arch_vm_ioctl
case KVM_XEN_HVM_EVTCHN_SEND
 kvm_xen_hvm_evtchn_send
  kvm_xen_set_evtchn
   kvm_xen_set_evtchn_fast
    kvm_gpc_check
    return -EWOULDBLOCK because !gpc->valid
   kvm_xen_set_evtchn_fast
    return -EWOULDBLOCK
   kvm_gpc_refresh
    hva_to_pfn_retry
     gpc->valid = true
     gpc->khva = not NULL

Result: active = false, valid = true

3. Race ioctl KVM_XEN_HVM_EVTCHN_SEND against ioctl
KVM_XEN_ATTR_TYPE_SHARED_INFO:

kvm_arch_vm_ioctl
case KVM_XEN_HVM_EVTCHN_SEND
 kvm_xen_hvm_evtchn_send
  kvm_xen_set_evtchn
   kvm_xen_set_evtchn_fast
    read_lock gpc->lock
                                          kvm_xen_hvm_set_attr case
                                          KVM_XEN_ATTR_TYPE_SHARED_INFO
                                           mutex_lock kvm->lock
                                           kvm_xen_shared_info_init
                                            kvm_gpc_activate
                                             gpc->khva = NULL
    kvm_gpc_check
     [ Check passes because gpc->valid is
       still true, even though gpc->khva
       is already NULL. ]
    shinfo = gpc->khva
    pending_bits = shinfo->evtchn_pending
    CRASH: test_and_set_bit(..., pending_bits)

Fixes: 982ed0de ("KVM: Reinstate gfn_to_pfn_cache with invalidation support")
Cc: stable@vger.kernel.org
Reported-by: default avatar: Michal Luczaj <mhal@rbox.co>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20221013211234.1318131-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 52491a38
...@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
if (!gpc->active)
return false;
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false; return false;
...@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK; unsigned long page_offset = gpa & ~PAGE_MASK;
kvm_pfn_t old_pfn, new_pfn; bool unmap_old = false;
unsigned long old_uhva; unsigned long old_uhva;
kvm_pfn_t old_pfn;
void *old_khva; void *old_khva;
int ret = 0; int ret;
/* /*
* If must fit within a single page. The 'len' argument is * If must fit within a single page. The 'len' argument is
...@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
write_lock_irq(&gpc->lock); write_lock_irq(&gpc->lock);
if (!gpc->active) {
ret = -EINVAL;
goto out_unlock;
}
old_pfn = gpc->pfn; old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva); old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva; old_uhva = gpc->uhva;
...@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
/* If the HVA→PFN mapping was already valid, don't unmap it. */ /* If the HVA→PFN mapping was already valid, don't unmap it. */
old_pfn = KVM_PFN_ERR_FAULT; old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL; old_khva = NULL;
ret = 0;
} }
out: out:
...@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->khva = NULL; gpc->khva = NULL;
} }
/* Snapshot the new pfn before dropping the lock! */ /* Detect a pfn change before dropping the lock! */
new_pfn = gpc->pfn; unmap_old = (old_pfn != gpc->pfn);
out_unlock:
write_unlock_irq(&gpc->lock); write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock); mutex_unlock(&gpc->refresh_lock);
if (old_pfn != new_pfn) if (unmap_old)
gpc_unmap_khva(kvm, old_pfn, old_khva); gpc_unmap_khva(kvm, old_pfn, old_khva);
return ret; return ret;
...@@ -366,11 +377,19 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -366,11 +377,19 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->vcpu = vcpu; gpc->vcpu = vcpu;
gpc->usage = usage; gpc->usage = usage;
gpc->valid = false; gpc->valid = false;
gpc->active = true;
spin_lock(&kvm->gpc_lock); spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list); list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock); spin_unlock(&kvm->gpc_lock);
/*
* Activate the cache after adding it to the list, a concurrent
* refresh must not establish a mapping until the cache is
* reachable by mmu_notifier events.
*/
write_lock_irq(&gpc->lock);
gpc->active = true;
write_unlock_irq(&gpc->lock);
} }
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len); return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
} }
...@@ -379,12 +398,20 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate); ...@@ -379,12 +398,20 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{ {
if (gpc->active) { if (gpc->active) {
/*
* Deactivate the cache before removing it from the list, KVM
* must stall mmu_notifier events until all users go away, i.e.
* until gpc->lock is dropped and refresh is guaranteed to fail.
*/
write_lock_irq(&gpc->lock);
gpc->active = false;
write_unlock_irq(&gpc->lock);
spin_lock(&kvm->gpc_lock); spin_lock(&kvm->gpc_lock);
list_del(&gpc->list); list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock); spin_unlock(&kvm->gpc_lock);
kvm_gfn_to_pfn_cache_unmap(kvm, gpc); kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
gpc->active = false;
} }
} }
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment