Commit 7a905b14 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Gleb Natapov

KVM: Remove user_alloc from struct kvm_memory_slot

This field was needed to differentiate memory slots created by the new
API, KVM_SET_USER_MEMORY_REGION, from those by the old equivalent,
KVM_SET_MEMORY_REGION, whose support was dropped long before:

  commit b74a07be
  KVM: Remove kernel-allocated memory regions

Although we also have private memory slots to which KVM allocates
memory with vm_mmap(), !user_alloc slots in other words, the slot id
should be enough for differentiating them.

Note: corresponding function parameters will be removed later.
Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 257090f7
...@@ -6897,32 +6897,27 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -6897,32 +6897,27 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
bool user_alloc) bool user_alloc)
{ {
int npages = memslot->npages; int npages = memslot->npages;
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
/* Prevent internal slot pages from being moved by fork()/COW. */ /*
if (memslot->id >= KVM_USER_MEM_SLOTS) * Only private memory slots need to be mapped here since
map_flags = MAP_SHARED | MAP_ANONYMOUS; * KVM_SET_MEMORY_REGION ioctl is no longer supported.
/*To keep backward compatibility with older userspace,
*x86 needs to handle !user_alloc case.
*/ */
if (!user_alloc) { if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
if (npages && !old.npages) {
unsigned long userspace_addr; unsigned long userspace_addr;
userspace_addr = vm_mmap(NULL, 0, /*
npages * PAGE_SIZE, * MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
map_flags, MAP_SHARED | MAP_ANONYMOUS, 0);
0);
if (IS_ERR((void *)userspace_addr)) if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr); return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr; memslot->userspace_addr = userspace_addr;
} }
}
return 0; return 0;
} }
...@@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
if (!user_alloc && !old.user_alloc && old.npages && !npages) { if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
int ret; int ret;
ret = vm_munmap(old.userspace_addr, ret = vm_munmap(old.userspace_addr,
......
...@@ -273,7 +273,6 @@ struct kvm_memory_slot { ...@@ -273,7 +273,6 @@ struct kvm_memory_slot {
unsigned long userspace_addr; unsigned long userspace_addr;
u32 flags; u32 flags;
short id; short id;
bool user_alloc;
}; };
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
......
...@@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
r = -ENOMEM; r = -ENOMEM;
if (change == KVM_MR_CREATE) { if (change == KVM_MR_CREATE) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr; new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages)) if (kvm_arch_create_memslot(&new, npages))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment