Commit 0b7cc254 authored by Vipin Sharma's avatar Vipin Sharma Committed by Sean Christopherson

KVM: x86/mmu: Remove "record_acc_track" in __tdp_mmu_set_spte()

Remove bool parameter "record_acc_track" from __tdp_mmu_set_spte() and
refactor the code. This variable is always set to true by its caller.

Remove single and double underscore prefix from tdp_mmu_set_spte()
related APIs:
1. Change __tdp_mmu_set_spte() to tdp_mmu_set_spte()
2. Change _tdp_mmu_set_spte() to tdp_mmu_iter_set_spte()
Signed-off-by: default avatarVipin Sharma <vipinsh@google.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20230321220021.2119033-12-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 891f1159
...@@ -695,7 +695,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, ...@@ -695,7 +695,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
/* /*
* __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
* @kvm: KVM instance * @kvm: KVM instance
* @as_id: Address space ID, i.e. regular vs. SMM * @as_id: Address space ID, i.e. regular vs. SMM
* @sptep: Pointer to the SPTE * @sptep: Pointer to the SPTE
...@@ -703,18 +703,12 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, ...@@ -703,18 +703,12 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* @new_spte: The new value that will be set for the SPTE * @new_spte: The new value that will be set for the SPTE
* @gfn: The base GFN that was (or will be) mapped by the SPTE * @gfn: The base GFN that was (or will be) mapped by the SPTE
* @level: The level _containing_ the SPTE (its parent PT's level) * @level: The level _containing_ the SPTE (its parent PT's level)
* @record_acc_track: Notify the MM subsystem of changes to the accessed state
* of the page. Should be set unless handling an MMU
* notifier for access tracking. Leaving record_acc_track
* unset in that case prevents page accesses from being
* double counted.
* *
* Returns the old SPTE value, which _may_ be different than @old_spte if the * Returns the old SPTE value, which _may_ be different than @old_spte if the
* SPTE had voldatile bits. * SPTE had voldatile bits.
*/ */
static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
u64 old_spte, u64 new_spte, gfn_t gfn, int level, u64 old_spte, u64 new_spte, gfn_t gfn, int level)
bool record_acc_track)
{ {
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
...@@ -730,30 +724,19 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, ...@@ -730,30 +724,19 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
handle_changed_spte_acc_track(old_spte, new_spte, level);
if (record_acc_track)
handle_changed_spte_acc_track(old_spte, new_spte, level);
handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte,
level); level);
return old_spte; return old_spte;
} }
static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track) u64 new_spte)
{ {
WARN_ON_ONCE(iter->yielded); WARN_ON_ONCE(iter->yielded);
iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte, new_spte,
iter->old_spte, new_spte, iter->gfn, iter->level);
iter->gfn, iter->level,
record_acc_track);
}
static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte)
{
_tdp_mmu_set_spte(kvm, iter, new_spte, true);
} }
#define tdp_root_for_each_pte(_iter, _root, _start, _end) \ #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
...@@ -845,7 +828,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -845,7 +828,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
continue; continue;
if (!shared) if (!shared)
tdp_mmu_set_spte(kvm, &iter, 0); tdp_mmu_iter_set_spte(kvm, &iter, 0);
else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
goto retry; goto retry;
} }
...@@ -902,8 +885,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -902,8 +885,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
return false; return false;
__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
sp->gfn, sp->role.level + 1, true); sp->gfn, sp->role.level + 1);
return true; return true;
} }
...@@ -937,7 +920,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -937,7 +920,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
continue; continue;
tdp_mmu_set_spte(kvm, &iter, 0); tdp_mmu_iter_set_spte(kvm, &iter, 0);
flush = true; flush = true;
} }
...@@ -1107,7 +1090,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, ...@@ -1107,7 +1090,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
if (ret) if (ret)
return ret; return ret;
} else { } else {
tdp_mmu_set_spte(kvm, iter, spte); tdp_mmu_iter_set_spte(kvm, iter, spte);
} }
tdp_account_mmu_page(kvm, sp); tdp_account_mmu_page(kvm, sp);
...@@ -1314,13 +1297,13 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, ...@@ -1314,13 +1297,13 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
* invariant that the PFN of a present * leaf SPTE can never change. * invariant that the PFN of a present * leaf SPTE can never change.
* See __handle_changed_spte(). * See __handle_changed_spte().
*/ */
tdp_mmu_set_spte(kvm, iter, 0); tdp_mmu_iter_set_spte(kvm, iter, 0);
if (!pte_write(range->pte)) { if (!pte_write(range->pte)) {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
pte_pfn(range->pte)); pte_pfn(range->pte));
tdp_mmu_set_spte(kvm, iter, new_spte); tdp_mmu_iter_set_spte(kvm, iter, new_spte);
} }
return true; return true;
...@@ -1805,7 +1788,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -1805,7 +1788,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
if (new_spte == iter.old_spte) if (new_spte == iter.old_spte)
break; break;
tdp_mmu_set_spte(kvm, &iter, new_spte); tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
spte_set = true; spte_set = true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment