Commit 8482644a authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Marcelo Tosatti

KVM: set_memory_region: Refactor commit_memory_region()

This patch makes the parameter old a const pointer to the old memory
slot and adds a new parameter named change to know the change being
requested: the former is for removing extra copying and the latter is
for cleaning up the code.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 7b6195a9
...@@ -238,7 +238,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -238,7 +238,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{ {
} }
......
...@@ -1591,7 +1591,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1591,7 +1591,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{ {
return; return;
} }
......
...@@ -152,7 +152,7 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -152,7 +152,7 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem); struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm, extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old); const struct kvm_memory_slot *old);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info); struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm, extern void kvmppc_core_flush_memslot(struct kvm *kvm,
......
...@@ -1639,12 +1639,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -1639,12 +1639,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old)
{ {
unsigned long npages = mem->memory_size >> PAGE_SHIFT; unsigned long npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
if (npages && old.npages) { if (npages && old->npages) {
/* /*
* If modifying a memslot, reset all the rmap dirty bits. * If modifying a memslot, reset all the rmap dirty bits.
* If this is a new memslot, we don't need to do anything * If this is a new memslot, we don't need to do anything
......
...@@ -1283,7 +1283,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -1283,7 +1283,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old)
{ {
} }
......
...@@ -1531,7 +1531,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -1531,7 +1531,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old)
{ {
} }
......
...@@ -420,7 +420,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -420,7 +420,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{ {
kvmppc_core_commit_memory_region(kvm, mem, old); kvmppc_core_commit_memory_region(kvm, mem, old);
} }
......
...@@ -1001,7 +1001,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1001,7 +1001,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{ {
int rc; int rc;
......
...@@ -6935,16 +6935,17 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -6935,16 +6935,17 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old) const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{ {
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0;
if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) { if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
int ret; int ret;
ret = vm_munmap(old.userspace_addr, ret = vm_munmap(old->userspace_addr,
old.npages * PAGE_SIZE); old->npages * PAGE_SIZE);
if (ret < 0) if (ret < 0)
printk(KERN_WARNING printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: " "kvm_vm_ioctl_set_memory_region: "
...@@ -6961,13 +6962,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -6961,13 +6962,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* Existing largepage mappings are destroyed here and new ones will * Existing largepage mappings are destroyed here and new ones will
* not be created until the end of the logging. * not be created until the end of the logging.
*/ */
if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm_mmu_slot_remove_write_access(kvm, mem->slot);
/* /*
* If memory slot is created, or moved, we need to clear all * If memory slot is created, or moved, we need to clear all
* mmio sptes. * mmio sptes.
*/ */
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) { if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
kvm_mmu_zap_all(kvm); kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm); kvm_reload_remote_mmus(kvm);
} }
......
...@@ -483,7 +483,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -483,7 +483,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change); enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old); const struct kvm_memory_slot *old,
enum kvm_mr_change change);
bool kvm_largepages_enabled(void); bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void); void kvm_disable_largepages(void);
/* flush all memory translations */ /* flush all memory translations */
......
...@@ -896,7 +896,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -896,7 +896,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
old_memslots = install_new_memslots(kvm, slots, &new); old_memslots = install_new_memslots(kvm, slots, &new);
kvm_arch_commit_memory_region(kvm, mem, old); kvm_arch_commit_memory_region(kvm, mem, &old, change);
kvm_free_physmem_slot(&old, &new); kvm_free_physmem_slot(&old, &new);
kfree(old_memslots); kfree(old_memslots);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment