Commit d01495d4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: RISC-V: Use "new" memslot instead of userspace memory region

Get the slot ID, hva, etc... from the "new" memslot instead of the
userspace memory region when preparing/committing a memory region.  This
will allow a future commit to drop @mem from the prepare/commit hooks
once all architectures convert to using "new".

Opportunistically wait to get the various "new" values until after
filtering out the DELETE case in anticipation of a future commit passing
NULL for @new when deleting a memslot.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <543608ab88a1190e73a958efffafc98d2652c067.1638817640.git.maciej.szmigiero@oracle.com>
parent 9d7d18ee
...@@ -472,8 +472,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -472,8 +472,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* allocated dirty_bitmap[], dirty pages will be tracked while * allocated dirty_bitmap[], dirty pages will be tracked while
* the memory slot is write protected. * the memory slot is write protected.
*/ */
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
stage2_wp_memory_region(kvm, mem->slot); stage2_wp_memory_region(kvm, new->id);
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
...@@ -482,9 +482,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -482,9 +482,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new, struct kvm_memory_slot *new,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
hva_t hva = mem->userspace_addr; hva_t hva, reg_end, size;
hva_t reg_end = hva + mem->memory_size; gpa_t base_gpa;
bool writable = !(mem->flags & KVM_MEM_READONLY); bool writable;
int ret = 0; int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
...@@ -499,6 +499,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -499,6 +499,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(stage2_gpa_size >> PAGE_SHIFT)) (stage2_gpa_size >> PAGE_SHIFT))
return -EFAULT; return -EFAULT;
hva = new->userspace_addr;
size = new->npages << PAGE_SHIFT;
reg_end = hva + size;
base_gpa = new->base_gfn << PAGE_SHIFT;
writable = !(new->flags & KVM_MEM_READONLY);
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
/* /*
...@@ -534,8 +540,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -534,8 +540,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
vm_end = min(reg_end, vma->vm_end); vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) { if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr + gpa_t gpa = base_gpa + (vm_start - hva);
(vm_start - mem->userspace_addr);
phys_addr_t pa; phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
...@@ -560,8 +565,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -560,8 +565,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (ret) if (ret)
stage2_unmap_range(kvm, mem->guest_phys_addr, stage2_unmap_range(kvm, base_gpa, size, false);
mem->memory_size, false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment