Commit 018aabb5 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini

KVM: x86: MMU: Encapsulate the type of rmap-chain head in a new struct

New struct kvm_rmap_head makes the code type-safe to some extent.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 378b417d
...@@ -214,6 +214,10 @@ union kvm_mmu_page_role { ...@@ -214,6 +214,10 @@ union kvm_mmu_page_role {
}; };
}; };
struct kvm_rmap_head {
unsigned long val;
};
struct kvm_mmu_page { struct kvm_mmu_page {
struct list_head link; struct list_head link;
struct hlist_node hash_link; struct hlist_node hash_link;
...@@ -231,7 +235,7 @@ struct kvm_mmu_page { ...@@ -231,7 +235,7 @@ struct kvm_mmu_page {
bool unsync; bool unsync;
int root_count; /* Currently serving as active root */ int root_count; /* Currently serving as active root */
unsigned int unsync_children; unsigned int unsync_children;
unsigned long parent_ptes; /* Reverse mapping for parent_pte */ struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
/* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
unsigned long mmu_valid_gen; unsigned long mmu_valid_gen;
...@@ -606,7 +610,7 @@ struct kvm_lpage_info { ...@@ -606,7 +610,7 @@ struct kvm_lpage_info {
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
unsigned long *rmap[KVM_NR_PAGE_SIZES]; struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
}; };
......
...@@ -909,36 +909,35 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, ...@@ -909,36 +909,35 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
} }
/* /*
* Pte mapping structures: * About rmap_head encoding:
* *
* If pte_list bit zero is zero, then pte_list point to the spte. * If the bit zero of rmap_head->val is clear, then it points to the only spte
* * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
* If pte_list bit zero is one, (then pte_list & ~1) points to a struct
* pte_list_desc containing more mappings. * pte_list_desc containing more mappings.
* */
* Returns the number of pte entries before the spte was added or zero if
* the spte was not added. /*
* * Returns the number of pointers in the rmap chain, not counting the new one.
*/ */
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
unsigned long *pte_list) struct kvm_rmap_head *rmap_head)
{ {
struct pte_list_desc *desc; struct pte_list_desc *desc;
int i, count = 0; int i, count = 0;
if (!*pte_list) { if (!rmap_head->val) {
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
*pte_list = (unsigned long)spte; rmap_head->val = (unsigned long)spte;
} else if (!(*pte_list & 1)) { } else if (!(rmap_head->val & 1)) {
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu); desc = mmu_alloc_pte_list_desc(vcpu);
desc->sptes[0] = (u64 *)*pte_list; desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte; desc->sptes[1] = spte;
*pte_list = (unsigned long)desc | 1; rmap_head->val = (unsigned long)desc | 1;
++count; ++count;
} else { } else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(*pte_list & ~1ul); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more; desc = desc->more;
count += PTE_LIST_EXT; count += PTE_LIST_EXT;
...@@ -955,8 +954,9 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, ...@@ -955,8 +954,9 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
} }
static void static void
pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
int i, struct pte_list_desc *prev_desc) struct pte_list_desc *desc, int i,
struct pte_list_desc *prev_desc)
{ {
int j; int j;
...@@ -967,43 +967,43 @@ pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, ...@@ -967,43 +967,43 @@ pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
if (j != 0) if (j != 0)
return; return;
if (!prev_desc && !desc->more) if (!prev_desc && !desc->more)
*pte_list = (unsigned long)desc->sptes[0]; rmap_head->val = (unsigned long)desc->sptes[0];
else else
if (prev_desc) if (prev_desc)
prev_desc->more = desc->more; prev_desc->more = desc->more;
else else
*pte_list = (unsigned long)desc->more | 1; rmap_head->val = (unsigned long)desc->more | 1;
mmu_free_pte_list_desc(desc); mmu_free_pte_list_desc(desc);
} }
static void pte_list_remove(u64 *spte, unsigned long *pte_list) static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
{ {
struct pte_list_desc *desc; struct pte_list_desc *desc;
struct pte_list_desc *prev_desc; struct pte_list_desc *prev_desc;
int i; int i;
if (!*pte_list) { if (!rmap_head->val) {
printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
BUG(); BUG();
} else if (!(*pte_list & 1)) { } else if (!(rmap_head->val & 1)) {
rmap_printk("pte_list_remove: %p 1->0\n", spte); rmap_printk("pte_list_remove: %p 1->0\n", spte);
if ((u64 *)*pte_list != spte) { if ((u64 *)rmap_head->val != spte) {
printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
BUG(); BUG();
} }
*pte_list = 0; rmap_head->val = 0;
} else { } else {
rmap_printk("pte_list_remove: %p many->many\n", spte); rmap_printk("pte_list_remove: %p many->many\n", spte);
desc = (struct pte_list_desc *)(*pte_list & ~1ul); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
prev_desc = NULL; prev_desc = NULL;
while (desc) { while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
if (desc->sptes[i] == spte) { if (desc->sptes[i] == spte) {
pte_list_desc_remove_entry(pte_list, pte_list_desc_remove_entry(rmap_head,
desc, i, desc, i, prev_desc);
prev_desc);
return; return;
} }
}
prev_desc = desc; prev_desc = desc;
desc = desc->more; desc = desc->more;
} }
...@@ -1013,18 +1013,18 @@ static void pte_list_remove(u64 *spte, unsigned long *pte_list) ...@@ -1013,18 +1013,18 @@ static void pte_list_remove(u64 *spte, unsigned long *pte_list)
} }
typedef void (*pte_list_walk_fn) (u64 *spte); typedef void (*pte_list_walk_fn) (u64 *spte);
static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) static void pte_list_walk(struct kvm_rmap_head *rmap_head, pte_list_walk_fn fn)
{ {
struct pte_list_desc *desc; struct pte_list_desc *desc;
int i; int i;
if (!*pte_list) if (!rmap_head->val)
return; return;
if (!(*pte_list & 1)) if (!(rmap_head->val & 1))
return fn((u64 *)*pte_list); return fn((u64 *)rmap_head->val);
desc = (struct pte_list_desc *)(*pte_list & ~1ul); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc) { while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
fn(desc->sptes[i]); fn(desc->sptes[i]);
...@@ -1032,8 +1032,8 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) ...@@ -1032,8 +1032,8 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
} }
} }
static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot) struct kvm_memory_slot *slot)
{ {
unsigned long idx; unsigned long idx;
...@@ -1041,10 +1041,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, ...@@ -1041,10 +1041,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
} }
/* static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
* Take gfn and return the reverse mapping to it. struct kvm_mmu_page *sp)
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
...@@ -1065,24 +1063,24 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu) ...@@ -1065,24 +1063,24 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmapp); return pte_list_add(vcpu, spte, rmap_head);
} }
static void rmap_remove(struct kvm *kvm, u64 *spte) static void rmap_remove(struct kvm *kvm, u64 *spte)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
gfn_t gfn; gfn_t gfn;
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp); rmap_head = gfn_to_rmap(kvm, gfn, sp);
pte_list_remove(spte, rmapp); pte_list_remove(spte, rmap_head);
} }
/* /*
...@@ -1102,17 +1100,18 @@ struct rmap_iterator { ...@@ -1102,17 +1100,18 @@ struct rmap_iterator {
* *
* Returns sptep if found, NULL otherwise. * Returns sptep if found, NULL otherwise.
*/ */
static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter) static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{ {
if (!rmap) if (!rmap_head->val)
return NULL; return NULL;
if (!(rmap & 1)) { if (!(rmap_head->val & 1)) {
iter->desc = NULL; iter->desc = NULL;
return (u64 *)rmap; return (u64 *)rmap_head->val;
} }
iter->desc = (struct pte_list_desc *)(rmap & ~1ul); iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
iter->pos = 0; iter->pos = 0;
return iter->desc->sptes[iter->pos]; return iter->desc->sptes[iter->pos];
} }
...@@ -1146,10 +1145,10 @@ static u64 *rmap_get_next(struct rmap_iterator *iter) ...@@ -1146,10 +1145,10 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
return NULL; return NULL;
} }
#define for_each_rmap_spte(_rmap_, _iter_, _spte_) \ #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
for (_spte_ = rmap_get_first(*_rmap_, _iter_); \ for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
_spte_ && ({BUG_ON(!is_shadow_present_pte(*_spte_)); 1;}); \ _spte_ && ({BUG_ON(!is_shadow_present_pte(*_spte_)); 1;}); \
_spte_ = rmap_get_next(_iter_)) _spte_ = rmap_get_next(_iter_))
static void drop_spte(struct kvm *kvm, u64 *sptep) static void drop_spte(struct kvm *kvm, u64 *sptep)
{ {
...@@ -1207,14 +1206,15 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) ...@@ -1207,14 +1206,15 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
return mmu_spte_update(sptep, spte); return mmu_spte_update(sptep, spte);
} }
static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, static bool __rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
bool pt_protect) bool pt_protect)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
bool flush = false; bool flush = false;
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep)
flush |= spte_write_protect(kvm, sptep, pt_protect); flush |= spte_write_protect(kvm, sptep, pt_protect);
return flush; return flush;
...@@ -1231,13 +1231,13 @@ static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) ...@@ -1231,13 +1231,13 @@ static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
return mmu_spte_update(sptep, spte); return mmu_spte_update(sptep, spte);
} }
static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
bool flush = false; bool flush = false;
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep)
flush |= spte_clear_dirty(kvm, sptep); flush |= spte_clear_dirty(kvm, sptep);
return flush; return flush;
...@@ -1254,13 +1254,13 @@ static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) ...@@ -1254,13 +1254,13 @@ static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
return mmu_spte_update(sptep, spte); return mmu_spte_update(sptep, spte);
} }
static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
bool flush = false; bool flush = false;
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep)
flush |= spte_set_dirty(kvm, sptep); flush |= spte_set_dirty(kvm, sptep);
return flush; return flush;
...@@ -1280,12 +1280,12 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1280,12 +1280,12 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask) gfn_t gfn_offset, unsigned long mask)
{ {
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
while (mask) { while (mask) {
rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot); PT_PAGE_TABLE_LEVEL, slot);
__rmap_write_protect(kvm, rmapp, false); __rmap_write_protect(kvm, rmap_head, false);
/* clear the first set bit */ /* clear the first set bit */
mask &= mask - 1; mask &= mask - 1;
...@@ -1305,12 +1305,12 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1305,12 +1305,12 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask) gfn_t gfn_offset, unsigned long mask)
{ {
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
while (mask) { while (mask) {
rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot); PT_PAGE_TABLE_LEVEL, slot);
__rmap_clear_dirty(kvm, rmapp); __rmap_clear_dirty(kvm, rmap_head);
/* clear the first set bit */ /* clear the first set bit */
mask &= mask - 1; mask &= mask - 1;
...@@ -1342,27 +1342,27 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1342,27 +1342,27 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
int i; int i;
bool write_protected = false; bool write_protected = false;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot); rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true); write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
} }
return write_protected; return write_protected;
} }
static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp) static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
bool flush = false; bool flush = false;
while ((sptep = rmap_get_first(*rmapp, &iter))) { while ((sptep = rmap_get_first(rmap_head, &iter))) {
BUG_ON(!(*sptep & PT_PRESENT_MASK)); BUG_ON(!(*sptep & PT_PRESENT_MASK));
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
...@@ -1373,14 +1373,14 @@ static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp) ...@@ -1373,14 +1373,14 @@ static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp)
return flush; return flush;
} }
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level, struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data) unsigned long data)
{ {
return kvm_zap_rmapp(kvm, rmapp); return kvm_zap_rmapp(kvm, rmap_head);
} }
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level, struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data) unsigned long data)
{ {
...@@ -1395,7 +1395,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1395,7 +1395,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
new_pfn = pte_pfn(*ptep); new_pfn = pte_pfn(*ptep);
restart: restart:
for_each_rmap_spte(rmapp, &iter, sptep) { for_each_rmap_spte(rmap_head, &iter, sptep) {
rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level); sptep, *sptep, gfn, level);
...@@ -1433,11 +1433,11 @@ struct slot_rmap_walk_iterator { ...@@ -1433,11 +1433,11 @@ struct slot_rmap_walk_iterator {
/* output fields. */ /* output fields. */
gfn_t gfn; gfn_t gfn;
unsigned long *rmap; struct kvm_rmap_head *rmap;
int level; int level;
/* private field. */ /* private field. */
unsigned long *end_rmap; struct kvm_rmap_head *end_rmap;
}; };
static void static void
...@@ -1496,7 +1496,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1496,7 +1496,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long end, unsigned long end,
unsigned long data, unsigned long data,
int (*handler)(struct kvm *kvm, int (*handler)(struct kvm *kvm,
unsigned long *rmapp, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, gfn_t gfn,
int level, int level,
...@@ -1540,7 +1540,8 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1540,7 +1540,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data, unsigned long data,
int (*handler)(struct kvm *kvm, unsigned long *rmapp, int (*handler)(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, int level, gfn_t gfn, int level,
unsigned long data)) unsigned long data))
...@@ -1563,7 +1564,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -1563,7 +1564,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
} }
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level, struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data) unsigned long data)
{ {
...@@ -1573,18 +1574,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1573,18 +1574,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
BUG_ON(!shadow_accessed_mask); BUG_ON(!shadow_accessed_mask);
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep) {
if (*sptep & shadow_accessed_mask) { if (*sptep & shadow_accessed_mask) {
young = 1; young = 1;
clear_bit((ffs(shadow_accessed_mask) - 1), clear_bit((ffs(shadow_accessed_mask) - 1),
(unsigned long *)sptep); (unsigned long *)sptep);
} }
}
trace_kvm_age_page(gfn, level, slot, young); trace_kvm_age_page(gfn, level, slot, young);
return young; return young;
} }
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, struct kvm_memory_slot *slot, gfn_t gfn,
int level, unsigned long data) int level, unsigned long data)
{ {
...@@ -1600,11 +1602,12 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1600,11 +1602,12 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
if (!shadow_accessed_mask) if (!shadow_accessed_mask)
goto out; goto out;
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep) {
if (*sptep & shadow_accessed_mask) { if (*sptep & shadow_accessed_mask) {
young = 1; young = 1;
break; break;
} }
}
out: out:
return young; return young;
} }
...@@ -1613,14 +1616,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1613,14 +1616,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{ {
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
} }
...@@ -1737,7 +1740,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, ...@@ -1737,7 +1740,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
* this feature. See the comments in kvm_zap_obsolete_pages(). * this feature. See the comments in kvm_zap_obsolete_pages().
*/ */
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
sp->parent_ptes = 0; sp->parent_ptes.val = 0;
mmu_page_add_parent_pte(vcpu, sp, parent_pte); mmu_page_add_parent_pte(vcpu, sp, parent_pte);
kvm_mod_used_mmu_pages(vcpu->kvm, +1); kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp; return sp;
...@@ -2277,7 +2280,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -2277,7 +2280,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
drop_parent_pte(sp, sptep); drop_parent_pte(sp, sptep);
} }
...@@ -4492,7 +4495,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) ...@@ -4492,7 +4495,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
} }
/* The return value indicates if tlb flush on all vcpus is needed. */ /* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
/* The caller should hold mmu-lock before calling this function. */ /* The caller should hold mmu-lock before calling this function. */
static bool static bool
...@@ -4586,9 +4589,10 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -4586,9 +4589,10 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp) static bool slot_rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head)
{ {
return __rmap_write_protect(kvm, rmapp, false); return __rmap_write_protect(kvm, rmap_head, false);
} }
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
...@@ -4624,7 +4628,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -4624,7 +4628,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
} }
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
unsigned long *rmapp) struct kvm_rmap_head *rmap_head)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -4633,7 +4637,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -4633,7 +4637,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
restart: restart:
for_each_rmap_spte(rmapp, &iter, sptep) { for_each_rmap_spte(rmap_head, &iter, sptep) {
sp = page_header(__pa(sptep)); sp = page_header(__pa(sptep));
pfn = spte_to_pfn(*sptep); pfn = spte_to_pfn(*sptep);
......
...@@ -129,7 +129,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -129,7 +129,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{ {
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *rev_sp; struct kvm_mmu_page *rev_sp;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
...@@ -150,8 +150,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -150,8 +150,8 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return; return;
} }
rmapp = __gfn_to_rmap(gfn, rev_sp->role.level, slot); rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
if (!*rmapp) { if (!rmap_head->val) {
if (!__ratelimit(&ratelimit_state)) if (!__ratelimit(&ratelimit_state))
return; return;
audit_printk(kvm, "no rmap for writable spte %llx\n", audit_printk(kvm, "no rmap for writable spte %llx\n",
...@@ -192,7 +192,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -192,7 +192,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
unsigned long *rmapp; struct kvm_rmap_head *rmap_head;
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
struct kvm_memslots *slots; struct kvm_memslots *slots;
...@@ -203,13 +203,14 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -203,13 +203,14 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
slots = kvm_memslots_for_spte_role(kvm, sp->role); slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, sp->gfn); slot = __gfn_to_memslot(slots, sp->gfn);
rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot); rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep) {
if (is_writable_pte(*sptep)) if (is_writable_pte(*sptep))
audit_printk(kvm, "shadow page has writable " audit_printk(kvm, "shadow page has writable "
"mappings: gfn %llx role %x\n", "mappings: gfn %llx role %x\n",
sp->gfn, sp->role.word); sp->gfn, sp->role.word);
}
} }
static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp) static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment