Commit d4dbf470 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity

KVM: MMU: Make the way of accessing lpage_info more generic

Large page information has two elements but one of them, write_count, alone
is accessed by a helper function.

This patch replaces this helper function with more generic one which returns
newly named kvm_lpage_info structure and use it to access the other element
rmap_pde.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 443381a8
...@@ -477,46 +477,46 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) ...@@ -477,46 +477,46 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
} }
/* /*
* Return the pointer to the largepage write count for a given * Return the pointer to the large page information for a given gfn,
* gfn, handling slots that are not large page aligned. * handling slots that are not large page aligned.
*/ */
static int *slot_largepage_idx(gfn_t gfn, static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
int level) int level)
{ {
unsigned long idx; unsigned long idx;
idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].write_count; return &slot->lpage_info[level - 2][idx];
} }
static void account_shadowed(struct kvm *kvm, gfn_t gfn) static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *write_count; struct kvm_lpage_info *linfo;
int i; int i;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
write_count = slot_largepage_idx(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
*write_count += 1; linfo->write_count += 1;
} }
} }
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *write_count; struct kvm_lpage_info *linfo;
int i; int i;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
write_count = slot_largepage_idx(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
*write_count -= 1; linfo->write_count -= 1;
WARN_ON(*write_count < 0); WARN_ON(linfo->write_count < 0);
} }
} }
...@@ -525,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm, ...@@ -525,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm,
int level) int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *largepage_idx; struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (slot) { if (slot) {
largepage_idx = slot_largepage_idx(gfn, slot, level); linfo = lpage_info_slot(gfn, slot, level);
return *largepage_idx; return linfo->write_count;
} }
return 1; return 1;
...@@ -585,16 +585,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) ...@@ -585,16 +585,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long idx; struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (likely(level == PT_PAGE_TABLE_LEVEL)) if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn]; return &slot->rmap[gfn - slot->base_gfn];
idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - linfo = lpage_info_slot(gfn, slot, level);
(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].rmap_pde; return &linfo->rmap_pde;
} }
/* /*
...@@ -882,19 +881,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -882,19 +881,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
end = start + (memslot->npages << PAGE_SHIFT); end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) { if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
gfn_t gfn = memslot->base_gfn + gfn_offset;
ret = handler(kvm, &memslot->rmap[gfn_offset], data); ret = handler(kvm, &memslot->rmap[gfn_offset], data);
for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
unsigned long idx; struct kvm_lpage_info *linfo;
int sh;
linfo = lpage_info_slot(gfn, memslot,
sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j); PT_DIRECTORY_LEVEL + j);
idx = ((memslot->base_gfn+gfn_offset) >> sh) - ret |= handler(kvm, &linfo->rmap_pde, data);
(memslot->base_gfn >> sh);
ret |= handler(kvm,
&memslot->lpage_info[j][idx].rmap_pde,
data);
} }
trace_kvm_age_page(hva, memslot, ret); trace_kvm_age_page(hva, memslot, ret);
retval |= ret; retval |= ret;
......
...@@ -146,6 +146,11 @@ struct kvm_vcpu { ...@@ -146,6 +146,11 @@ struct kvm_vcpu {
*/ */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
struct kvm_lpage_info {
unsigned long rmap_pde;
int write_count;
};
struct kvm_memory_slot { struct kvm_memory_slot {
gfn_t base_gfn; gfn_t base_gfn;
unsigned long npages; unsigned long npages;
...@@ -153,10 +158,7 @@ struct kvm_memory_slot { ...@@ -153,10 +158,7 @@ struct kvm_memory_slot {
unsigned long *rmap; unsigned long *rmap;
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head; unsigned long *dirty_bitmap_head;
struct { struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long rmap_pde;
int write_count;
} *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr; unsigned long userspace_addr;
int user_alloc; int user_alloc;
int id; int id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment