Commit 0a47cd85 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: Fix ubsan warnings

kvm_mmu_pages_init is doing some really yucky stuff.  It is setting
up a sentinel for mmu_page_clear_parents; however, because of a) the
way levels are numbered starting from 1 and b) the way mmu_page_path
sizes its arrays with PT64_ROOT_LEVEL-1 elements, the access can be
out of bounds.  This is harmless because the code overwrites up to the
first two elements of parents->idx and these are initialized, and
because the sentinel is not needed in this case---mmu_page_clear_parents
exits anyway when it gets to the end of the array.  However ubsan
complains, and everyone else should too.

This fix does three things.  First it makes the mmu_page_path arrays
PT64_ROOT_LEVEL elements in size, so that we can write to them without
checking the level in advance.  Second it disintegrates kvm_mmu_pages_init
between mmu_unsync_walk (to reset the struct kvm_mmu_pages) and
for_each_sp (to place the NULL sentinel at the end of the current path).
This is okay because the mmu_page_path is only used in
mmu_pages_clear_parents; mmu_pages_clear_parents itself is called within
a for_each_sp iterator, and hence always after a call to mmu_pages_next.
Third it changes mmu_pages_clear_parents to just use the sentinel to
stop iteration, without checking the bounds on level.
Reported-by: default avatarSasha Levin <sasha.levin@oracle.com>
Reported-by: default avatarMike Krinkin <krinkin.m.u@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 798e88b3
...@@ -1873,6 +1873,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, ...@@ -1873,6 +1873,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
static int mmu_unsync_walk(struct kvm_mmu_page *sp, static int mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec) struct kvm_mmu_pages *pvec)
{ {
pvec->nr = 0;
if (!sp->unsync_children) if (!sp->unsync_children)
return 0; return 0;
...@@ -1986,13 +1987,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -1986,13 +1987,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
} }
struct mmu_page_path { struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
unsigned int idx[PT64_ROOT_LEVEL-1]; unsigned int idx[PT64_ROOT_LEVEL];
}; };
#define for_each_sp(pvec, sp, parents, i) \ #define for_each_sp(pvec, sp, parents, i) \
for (i = mmu_pages_next(&pvec, &parents, -1), \ for (i = mmu_pages_first(&pvec, &parents); \
sp = pvec.page[i].sp; \
i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
i = mmu_pages_next(&pvec, &parents, i)) i = mmu_pages_next(&pvec, &parents, i))
...@@ -2004,19 +2004,41 @@ static int mmu_pages_next(struct kvm_mmu_pages *pvec, ...@@ -2004,19 +2004,41 @@ static int mmu_pages_next(struct kvm_mmu_pages *pvec,
for (n = i+1; n < pvec->nr; n++) { for (n = i+1; n < pvec->nr; n++) {
struct kvm_mmu_page *sp = pvec->page[n].sp; struct kvm_mmu_page *sp = pvec->page[n].sp;
unsigned idx = pvec->page[n].idx;
int level = sp->role.level;
if (sp->role.level == PT_PAGE_TABLE_LEVEL) { parents->idx[level-1] = idx;
parents->idx[0] = pvec->page[n].idx; if (level == PT_PAGE_TABLE_LEVEL)
return n; break;
}
parents->parent[sp->role.level-2] = sp; parents->parent[level-2] = sp;
parents->idx[sp->role.level-1] = pvec->page[n].idx;
} }
return n; return n;
} }
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents)
{
struct kvm_mmu_page *sp;
int level;
if (pvec->nr == 0)
return 0;
sp = pvec->page[0].sp;
level = sp->role.level;
WARN_ON(level == PT_PAGE_TABLE_LEVEL);
parents->parent[level-2] = sp;
/* Also set up a sentinel. Further entries in pvec are all
* children of sp, so this element is never overwritten.
*/
parents->parent[level-1] = NULL;
return mmu_pages_next(pvec, parents, 0);
}
static void mmu_pages_clear_parents(struct mmu_page_path *parents) static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
...@@ -2024,22 +2046,13 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents) ...@@ -2024,22 +2046,13 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
do { do {
unsigned int idx = parents->idx[level]; unsigned int idx = parents->idx[level];
sp = parents->parent[level]; sp = parents->parent[level];
if (!sp) if (!sp)
return; return;
clear_unsync_child_bit(sp, idx); clear_unsync_child_bit(sp, idx);
level++; level++;
} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); } while (!sp->unsync_children);
}
static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
struct mmu_page_path *parents,
struct kvm_mmu_pages *pvec)
{
parents->parent[parent->role.level-1] = NULL;
pvec->nr = 0;
} }
static void mmu_sync_children(struct kvm_vcpu *vcpu, static void mmu_sync_children(struct kvm_vcpu *vcpu,
...@@ -2051,7 +2064,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2051,7 +2064,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_pages pages; struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) { while (mmu_unsync_walk(parent, &pages)) {
bool protected = false; bool protected = false;
...@@ -2067,7 +2079,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2067,7 +2079,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
} }
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
cond_resched_lock(&vcpu->kvm->mmu_lock); cond_resched_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_pages_init(parent, &parents, &pages);
} }
} }
...@@ -2305,7 +2316,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm, ...@@ -2305,7 +2316,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
if (parent->role.level == PT_PAGE_TABLE_LEVEL) if (parent->role.level == PT_PAGE_TABLE_LEVEL)
return 0; return 0;
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) { while (mmu_unsync_walk(parent, &pages)) {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
...@@ -2314,7 +2324,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm, ...@@ -2314,7 +2324,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
mmu_pages_clear_parents(&parents); mmu_pages_clear_parents(&parents);
zapped++; zapped++;
} }
kvm_mmu_pages_init(parent, &parents, &pages);
} }
return zapped; return zapped;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment