Commit 82855413 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: Remove unnecessary divide operations

This patch converts unnecessary divide and modulo operations
in the KVM large page related code into logical operations.
This allows to convert gfn_t to u64 while not breaking 32
bit builds.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 95c87e2b
...@@ -235,6 +235,7 @@ struct kvm_vm_data { ...@@ -235,6 +235,7 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32 #define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33 #define KVM_REQ_RESUME 33
#define KVM_HPAGE_GFN_SHIFT(x) 0
#define KVM_NR_PAGE_SIZES 1 #define KVM_NR_PAGE_SIZES 1
#define KVM_PAGES_PER_HPAGE(x) 1 #define KVM_PAGES_PER_HPAGE(x) 1
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
/* We don't currently support large pages. */ /* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
#define KVM_NR_PAGE_SIZES 1 #define KVM_NR_PAGE_SIZES 1
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
......
...@@ -41,7 +41,8 @@ struct sca_block { ...@@ -41,7 +41,8 @@ struct sca_block {
} __attribute__((packed)); } __attribute__((packed));
#define KVM_NR_PAGE_SIZES 2 #define KVM_NR_PAGE_SIZES 2
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8) #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
......
...@@ -44,7 +44,8 @@ ...@@ -44,7 +44,8 @@
/* KVM Hugepage definitions for x86 */ /* KVM Hugepage definitions for x86 */
#define KVM_NR_PAGE_SIZES 3 #define KVM_NR_PAGE_SIZES 3
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
......
...@@ -423,8 +423,8 @@ static int *slot_largepage_idx(gfn_t gfn, ...@@ -423,8 +423,8 @@ static int *slot_largepage_idx(gfn_t gfn,
{ {
unsigned long idx; unsigned long idx;
idx = (gfn / KVM_PAGES_PER_HPAGE(level)) - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
(slot->base_gfn / KVM_PAGES_PER_HPAGE(level)); (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].write_count; return &slot->lpage_info[level - 2][idx].write_count;
} }
...@@ -528,8 +528,8 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) ...@@ -528,8 +528,8 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
if (likely(level == PT_PAGE_TABLE_LEVEL)) if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn]; return &slot->rmap[gfn - slot->base_gfn];
idx = (gfn / KVM_PAGES_PER_HPAGE(level)) - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
(slot->base_gfn / KVM_PAGES_PER_HPAGE(level)); (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].rmap_pde; return &slot->lpage_info[level - 2][idx].rmap_pde;
} }
......
...@@ -626,9 +626,9 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -626,9 +626,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (new.lpage_info[i]) if (new.lpage_info[i])
continue; continue;
lpages = 1 + (base_gfn + npages - 1) / lpages = 1 + ((base_gfn + npages - 1)
KVM_PAGES_PER_HPAGE(level); >> KVM_HPAGE_GFN_SHIFT(level));
lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
...@@ -638,9 +638,9 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -638,9 +638,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.lpage_info[i], 0, memset(new.lpage_info[i], 0,
lpages * sizeof(*new.lpage_info[i])); lpages * sizeof(*new.lpage_info[i]));
if (base_gfn % KVM_PAGES_PER_HPAGE(level)) if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][0].write_count = 1; new.lpage_info[i][0].write_count = 1;
if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][lpages - 1].write_count = 1; new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT; ugfn = new.userspace_addr >> PAGE_SHIFT;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment