Commit 0774a964 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Fix out of range accesses to memslots

Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.

Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.

Fixes: 36947254 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: default avatarQian Cai <cai@lca.pw>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320205546.2396-2-sean.j.christopherson@intel.com>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d5361678
...@@ -2002,6 +2002,9 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, ...@@ -2002,6 +2002,9 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *ms; struct kvm_memory_slot *ms;
if (unlikely(!slots->used_slots))
return 0;
cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
ms = gfn_to_memslot(kvm, cur_gfn); ms = gfn_to_memslot(kvm, cur_gfn);
args->count = 0; args->count = 0;
......
...@@ -1032,6 +1032,9 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) ...@@ -1032,6 +1032,9 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
int slot = atomic_read(&slots->lru_slot); int slot = atomic_read(&slots->lru_slot);
struct kvm_memory_slot *memslots = slots->memslots; struct kvm_memory_slot *memslots = slots->memslots;
if (unlikely(!slots->used_slots))
return NULL;
if (gfn >= memslots[slot].base_gfn && if (gfn >= memslots[slot].base_gfn &&
gfn < memslots[slot].base_gfn + memslots[slot].npages) gfn < memslots[slot].base_gfn + memslots[slot].npages)
return &memslots[slot]; return &memslots[slot];
......
...@@ -882,6 +882,9 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots, ...@@ -882,6 +882,9 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
slots->used_slots--; slots->used_slots--;
if (atomic_read(&slots->lru_slot) >= slots->used_slots)
atomic_set(&slots->lru_slot, 0);
for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
mslots[i] = mslots[i + 1]; mslots[i] = mslots[i + 1];
slots->id_to_index[mslots[i].id] = i; slots->id_to_index[mslots[i].id] = i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment