Commit 3d63ef4d authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/memslot-fixes' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents ac5ce245 10ba2d17
...@@ -644,7 +644,6 @@ void kvm_arm_resume_guest(struct kvm *kvm); ...@@ -644,7 +644,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
#endif /* __KVM_NVHE_HYPERVISOR__ */ #endif /* __KVM_NVHE_HYPERVISOR__ */
void force_vm_exit(const cpumask_t *mask); void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, int exception_index); int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
......
...@@ -504,10 +504,11 @@ static void stage2_unmap_memslot(struct kvm *kvm, ...@@ -504,10 +504,11 @@ static void stage2_unmap_memslot(struct kvm *kvm,
* +--------------------------------------------+ * +--------------------------------------------+
*/ */
do { do {
struct vm_area_struct *vma = find_vma(current->mm, hva); struct vm_area_struct *vma;
hva_t vm_start, vm_end; hva_t vm_start, vm_end;
if (!vma || vma->vm_start >= reg_end) vma = find_vma_intersection(current->mm, hva, reg_end);
if (!vma)
break; break;
/* /*
...@@ -638,7 +639,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_ ...@@ -638,7 +639,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_
* Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
* serializing operations for VM memory regions. * serializing operations for VM memory regions.
*/ */
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
...@@ -925,10 +926,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -925,10 +926,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* unmapped afterwards, the call to kvm_unmap_hva will take it away * unmapped afterwards, the call to kvm_unmap_hva will take it away
* from us again properly. This smp_rmb() interacts with the smp_wmb() * from us again properly. This smp_rmb() interacts with the smp_wmb()
* in kvm_mmu_notifier_invalidate_<page|range_end>. * in kvm_mmu_notifier_invalidate_<page|range_end>.
*
* Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
* used to avoid unnecessary overhead introduced to locate the memory
* slot because it's always fixed even @gfn is adjusted for huge pages.
*/ */
smp_rmb(); smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
write_fault, &writable, NULL);
if (pfn == KVM_PFN_ERR_HWPOISON) { if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift); kvm_send_hwpoison_signal(hva, vma_shift);
return 0; return 0;
...@@ -994,7 +1000,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -994,7 +1000,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Mark the page dirty only if the fault is handled successfully */ /* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) { if (writable && !ret) {
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn); mark_page_dirty_in_slot(kvm, memslot, gfn);
} }
out_unlock: out_unlock:
...@@ -1424,10 +1430,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1424,10 +1430,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* +--------------------------------------------+ * +--------------------------------------------+
*/ */
do { do {
struct vm_area_struct *vma = find_vma(current->mm, hva); struct vm_area_struct *vma;
hva_t vm_start, vm_end; hva_t vm_start, vm_end;
if (!vma || vma->vm_start >= reg_end) vma = find_vma_intersection(current->mm, hva, reg_end);
if (!vma)
break; break;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment