Commit 8ca6f063 authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

KVM: x86/mmu: Re-add const qualifier in kvm_tdp_mmu_zap_collapsible_sptes

kvm_tdp_mmu_zap_collapsible_sptes unnecessarily removes the const
qualifier from its memlsot argument, leading to a compiler warning. Add
the const annotation and pass it to subsequent functions.
Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-2-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e1eed584
...@@ -715,8 +715,7 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) ...@@ -715,8 +715,7 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
* handling slots that are not large page aligned. * handling slots that are not large page aligned.
*/ */
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot, int level)
int level)
{ {
unsigned long idx; unsigned long idx;
...@@ -2702,7 +2701,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -2702,7 +2701,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
} }
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
unsigned long hva; unsigned long hva;
pte_t *pte; pte_t *pte;
...@@ -2728,8 +2727,9 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, ...@@ -2728,8 +2727,9 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
return level; return level;
} }
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, int kvm_mmu_max_mapping_level(struct kvm *kvm,
gfn_t gfn, kvm_pfn_t pfn, int max_level) const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t pfn, int max_level)
{ {
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
......
...@@ -161,8 +161,9 @@ enum { ...@@ -161,8 +161,9 @@ enum {
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
#define SET_SPTE_SPURIOUS BIT(2) #define SET_SPTE_SPURIOUS BIT(2)
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, int kvm_mmu_max_mapping_level(struct kvm *kvm,
gfn_t gfn, kvm_pfn_t pfn, int max_level); const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t pfn, int max_level);
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp, int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level); bool huge_page_disallowed, int *req_level);
......
...@@ -1190,7 +1190,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1190,7 +1190,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
*/ */
static bool zap_collapsible_spte_range(struct kvm *kvm, static bool zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root, struct kvm_mmu_page *root,
struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot,
bool flush) bool flush)
{ {
gfn_t start = slot->base_gfn; gfn_t start = slot->base_gfn;
...@@ -1231,7 +1231,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1231,7 +1231,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
* be replaced by large mappings, for GFNs within the slot. * be replaced by large mappings, for GFNs within the slot.
*/ */
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot, bool flush) const struct kvm_memory_slot *slot,
bool flush)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
......
...@@ -53,7 +53,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -53,7 +53,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn, unsigned long mask, gfn_t gfn, unsigned long mask,
bool wrprot); bool wrprot);
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot, bool flush); const struct kvm_memory_slot *slot,
bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn); struct kvm_memory_slot *slot, gfn_t gfn);
......
...@@ -1130,7 +1130,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) ...@@ -1130,7 +1130,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
} }
static inline unsigned long static inline unsigned long
__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{ {
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment