Commit 6a3c623b authored by Peter Xu's avatar Peter Xu Committed by Paolo Bonzini

KVM: X86: Drop x86_set_memory_region()

The helper x86_set_memory_region() is only used in vmx_set_tss_addr()
and kvm_arch_destroy_vm().  Push the lock upper in both cases.  With
that, drop x86_set_memory_region().

This prepares to allow __x86_set_memory_region() to return a HVA
mapped, because the HVA will need to be protected by the lock too even
after __x86_set_memory_region() returns.
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2a5755bb
...@@ -1627,7 +1627,6 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); ...@@ -1627,7 +1627,6 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
int kvm_is_in_guest(void); int kvm_is_in_guest(void);
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
......
...@@ -4491,8 +4491,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) ...@@ -4491,8 +4491,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
if (enable_unrestricted_guest) if (enable_unrestricted_guest)
return 0; return 0;
ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, mutex_lock(&kvm->slots_lock);
ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
PAGE_SIZE * 3); PAGE_SIZE * 3);
mutex_unlock(&kvm->slots_lock);
if (ret) if (ret)
return ret; return ret;
to_kvm_vmx(kvm)->tss_addr = addr; to_kvm_vmx(kvm)->tss_addr = addr;
......
...@@ -9732,18 +9732,6 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) ...@@ -9732,18 +9732,6 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
} }
EXPORT_SYMBOL_GPL(__x86_set_memory_region); EXPORT_SYMBOL_GPL(__x86_set_memory_region);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __x86_set_memory_region(kvm, id, gpa, size);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);
void kvm_arch_pre_destroy_vm(struct kvm *kvm) void kvm_arch_pre_destroy_vm(struct kvm *kvm)
{ {
kvm_mmu_pre_destroy_vm(kvm); kvm_mmu_pre_destroy_vm(kvm);
...@@ -9757,9 +9745,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -9757,9 +9745,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
* unless the the memory map has changed due to process exit * unless the the memory map has changed due to process exit
* or fd copying. * or fd copying.
*/ */
x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); mutex_lock(&kvm->slots_lock);
x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 0, 0);
__x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
0, 0);
__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
mutex_unlock(&kvm->slots_lock);
} }
if (kvm_x86_ops->vm_destroy) if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm); kvm_x86_ops->vm_destroy(kvm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment