Commit 8a3c1a33 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Gleb Natapov

KVM: mmu: change useless int return types to void

kvm_mmu initialization is mostly filling in function pointers, there is
no way for it to fail.  Clean up unused return values.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 95f93af4
...@@ -780,11 +780,11 @@ void kvm_mmu_module_exit(void); ...@@ -780,11 +780,11 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu); void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask); u64 dirty_mask, u64 nx_mask, u64 x_mask);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
......
...@@ -3419,8 +3419,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -3419,8 +3419,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
return 0; return 0;
} }
static int nonpaging_init_context(struct kvm_vcpu *vcpu, static void nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
context->page_fault = nonpaging_page_fault; context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa; context->gva_to_gpa = nonpaging_gva_to_gpa;
...@@ -3432,7 +3432,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, ...@@ -3432,7 +3432,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = true; context->direct_map = true;
context->nx = false; context->nx = false;
return 0;
} }
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
...@@ -3647,9 +3646,9 @@ static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) ...@@ -3647,9 +3646,9 @@ static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->last_pte_bitmap = map; mmu->last_pte_bitmap = map;
} }
static int paging64_init_context_common(struct kvm_vcpu *vcpu, static void paging64_init_context_common(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, struct kvm_mmu *context,
int level) int level)
{ {
context->nx = is_nx(vcpu); context->nx = is_nx(vcpu);
context->root_level = level; context->root_level = level;
...@@ -3667,17 +3666,16 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, ...@@ -3667,17 +3666,16 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->shadow_root_level = level; context->shadow_root_level = level;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = false; context->direct_map = false;
return 0;
} }
static int paging64_init_context(struct kvm_vcpu *vcpu, static void paging64_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
} }
static int paging32_init_context(struct kvm_vcpu *vcpu, static void paging32_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
context->nx = false; context->nx = false;
context->root_level = PT32_ROOT_LEVEL; context->root_level = PT32_ROOT_LEVEL;
...@@ -3694,16 +3692,15 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, ...@@ -3694,16 +3692,15 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->shadow_root_level = PT32E_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = false; context->direct_map = false;
return 0;
} }
static int paging32E_init_context(struct kvm_vcpu *vcpu, static void paging32E_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
} }
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = vcpu->arch.walk_mmu; struct kvm_mmu *context = vcpu->arch.walk_mmu;
...@@ -3743,37 +3740,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -3743,37 +3740,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, context, false); update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context); update_last_pte_bitmap(vcpu, context);
return 0;
} }
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{ {
int r;
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (!is_paging(vcpu)) if (!is_paging(vcpu))
r = nonpaging_init_context(vcpu, context); nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu)) else if (is_long_mode(vcpu))
r = paging64_init_context(vcpu, context); paging64_init_context(vcpu, context);
else if (is_pae(vcpu)) else if (is_pae(vcpu))
r = paging32E_init_context(vcpu, context); paging32E_init_context(vcpu, context);
else else
r = paging32_init_context(vcpu, context); paging32_init_context(vcpu, context);
vcpu->arch.mmu.base_role.nxe = is_nx(vcpu); vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
vcpu->arch.mmu.base_role.smep_andnot_wp vcpu->arch.mmu.base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu); = smep && !is_write_protection(vcpu);
return r;
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly) bool execonly)
{ {
ASSERT(vcpu); ASSERT(vcpu);
...@@ -3793,24 +3785,19 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, ...@@ -3793,24 +3785,19 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
update_permission_bitmask(vcpu, context, true); update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_rsvds_bits_mask_ept(vcpu, context, execonly);
return 0;
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static int init_kvm_softmmu(struct kvm_vcpu *vcpu) static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{ {
int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.walk_mmu->get_cr3 = get_cr3; vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read; vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
return r;
} }
static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
...@@ -3847,11 +3834,9 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -3847,11 +3834,9 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, g_context, false); update_permission_bitmask(vcpu, g_context, false);
update_last_pte_bitmap(vcpu, g_context); update_last_pte_bitmap(vcpu, g_context);
return 0;
} }
static int init_kvm_mmu(struct kvm_vcpu *vcpu) static void init_kvm_mmu(struct kvm_vcpu *vcpu)
{ {
if (mmu_is_nested(vcpu)) if (mmu_is_nested(vcpu))
return init_kvm_nested_mmu(vcpu); return init_kvm_nested_mmu(vcpu);
...@@ -3861,12 +3846,12 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu) ...@@ -3861,12 +3846,12 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return init_kvm_softmmu(vcpu); return init_kvm_softmmu(vcpu);
} }
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
return init_kvm_mmu(vcpu); init_kvm_mmu(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
...@@ -4250,12 +4235,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) ...@@ -4250,12 +4235,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
return alloc_mmu_pages(vcpu); return alloc_mmu_pages(vcpu);
} }
int kvm_mmu_setup(struct kvm_vcpu *vcpu) void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
return init_kvm_mmu(vcpu); init_kvm_mmu(vcpu);
} }
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
......
...@@ -70,8 +70,8 @@ enum { ...@@ -70,8 +70,8 @@ enum {
}; };
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly); bool execonly);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
......
...@@ -1959,11 +1959,9 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, ...@@ -1959,11 +1959,9 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm); nested_svm_vmexit(svm);
} }
static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
int r; kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
...@@ -1971,8 +1969,6 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -1971,8 +1969,6 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level(); vcpu->arch.mmu.shadow_root_level = get_npt_level();
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
return r;
} }
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
......
...@@ -7499,9 +7499,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) ...@@ -7499,9 +7499,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer; return get_vmcs12(vcpu)->ept_pointer;
} }
static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
...@@ -7509,8 +7509,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -7509,8 +7509,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
return r;
} }
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
......
...@@ -6699,7 +6699,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -6699,7 +6699,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (r) if (r)
return r; return r;
kvm_vcpu_reset(vcpu); kvm_vcpu_reset(vcpu);
r = kvm_mmu_setup(vcpu); kvm_mmu_setup(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment