Commit f3d90f90 authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86/mmu: Clean up mmu.c functions that put return type on separate line

Adjust a variety of functions in mmu.c to put the function return type on
the same line as the function declaration.  As stated in the Linus
specification:

  But the "on their own line" is complete garbage to begin with. That
  will NEVER be a kernel rule. We should never have a rule that assumes
  things are so long that they need to be on multiple lines.

  We don't put function return types on their own lines either, even if
  some other projects have that rule (just to get function names at the
  beginning of lines or some other odd reason).

Leave the functions generated by BUILD_MMU_ROLE_REGS_ACCESSOR() as-is,
that code is basically illegible no matter how it's formatted.

No functional change intended.

Link: https://lore.kernel.org/mm-commits/CAHk-=wjS-Jg7sGMwUPpDsjv392nDOOs0CtUtVkp=S6Q7JzFJRw@mail.gmail.comSigned-off-by: default avatarBen Gardon <bgardon@google.com>
Link: https://lore.kernel.org/r/20230202182809.1929122-4-bgardon@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent eddd9e83
...@@ -895,9 +895,9 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -895,9 +895,9 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
untrack_possible_nx_huge_page(kvm, sp); untrack_possible_nx_huge_page(kvm, sp);
} }
static struct kvm_memory_slot * static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, gfn_t gfn,
bool no_dirty_log) bool no_dirty_log)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
...@@ -960,9 +960,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, ...@@ -960,9 +960,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
return count; return count;
} }
static void static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc, int i)
struct pte_list_desc *desc, int i)
{ {
struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
int j = head_desc->spte_count - 1; int j = head_desc->spte_count - 1;
...@@ -1510,8 +1509,8 @@ struct slot_rmap_walk_iterator { ...@@ -1510,8 +1509,8 @@ struct slot_rmap_walk_iterator {
struct kvm_rmap_head *end_rmap; struct kvm_rmap_head *end_rmap;
}; };
static void static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) int level)
{ {
iterator->level = level; iterator->level = level;
iterator->gfn = iterator->start_gfn; iterator->gfn = iterator->start_gfn;
...@@ -1519,10 +1518,10 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) ...@@ -1519,10 +1518,10 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot); iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
} }
static void static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, const struct kvm_memory_slot *slot,
const struct kvm_memory_slot *slot, int start_level, int start_level, int end_level,
int end_level, gfn_t start_gfn, gfn_t end_gfn) gfn_t start_gfn, gfn_t end_gfn)
{ {
iterator->slot = slot; iterator->slot = slot;
iterator->start_level = start_level; iterator->start_level = start_level;
...@@ -3373,9 +3372,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault) ...@@ -3373,9 +3372,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
* Returns true if the SPTE was fixed successfully. Otherwise, * Returns true if the SPTE was fixed successfully. Otherwise,
* someone else modified the SPTE from its original value. * someone else modified the SPTE from its original value.
*/ */
static bool static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct kvm_page_fault *fault,
u64 *sptep, u64 old_spte, u64 new_spte) u64 *sptep, u64 old_spte, u64 new_spte)
{ {
/* /*
* Theoretically we could also set dirty bit (and flush TLB) here in * Theoretically we could also set dirty bit (and flush TLB) here in
...@@ -4708,10 +4707,9 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, ...@@ -4708,10 +4707,9 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
#include "paging_tmpl.h" #include "paging_tmpl.h"
#undef PTTYPE #undef PTTYPE
static void static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, int level, bool nx,
u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool gbpages, bool pse, bool amd)
bool pse, bool amd)
{ {
u64 gbpages_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0;
u64 nonleaf_bit8_rsvd = 0; u64 nonleaf_bit8_rsvd = 0;
...@@ -4824,9 +4822,9 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4824,9 +4822,9 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
guest_cpuid_is_amd_or_hygon(vcpu)); guest_cpuid_is_amd_or_hygon(vcpu));
} }
static void static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, bool execonly,
u64 pa_bits_rsvd, bool execonly, int huge_page_level) int huge_page_level)
{ {
u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
u64 large_1g_rsvd = 0, large_2m_rsvd = 0; u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
...@@ -4926,8 +4924,7 @@ static inline bool boot_cpu_is_amd(void) ...@@ -4926,8 +4924,7 @@ static inline bool boot_cpu_is_amd(void)
* the direct page table on host, use as much mmu features as * the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection. * possible, however, kvm currently does not do execution-protection.
*/ */
static void static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
{ {
struct rsvd_bits_validate *shadow_zero_check; struct rsvd_bits_validate *shadow_zero_check;
int i; int i;
...@@ -5140,8 +5137,8 @@ static void paging32_init_context(struct kvm_mmu *context) ...@@ -5140,8 +5137,8 @@ static void paging32_init_context(struct kvm_mmu *context)
context->sync_spte = paging32_sync_spte; context->sync_spte = paging32_sync_spte;
} }
static union kvm_cpu_role static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) const struct kvm_mmu_role_regs *regs)
{ {
union kvm_cpu_role role = {0}; union kvm_cpu_role role = {0};
...@@ -6750,8 +6747,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) ...@@ -6750,8 +6747,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
} }
} }
static unsigned long static unsigned long mmu_shrink_scan(struct shrinker *shrink,
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) struct shrink_control *sc)
{ {
struct kvm *kvm; struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
...@@ -6809,8 +6806,8 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -6809,8 +6806,8 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return freed; return freed;
} }
static unsigned long static unsigned long mmu_shrink_count(struct shrinker *shrink,
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) struct shrink_control *sc)
{ {
return percpu_counter_read_positive(&kvm_total_used_mmu_pages); return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment