Commit 89c313f2 authored by Vipin Sharma's avatar Vipin Sharma Committed by Sean Christopherson

KVM: x86/mmu: Atomically clear SPTE dirty state in the clear-dirty-log flow

Optimize the clearing of dirty state in TDP MMU SPTEs by doing an
atomic-AND (on SPTEs that have volatile bits) instead of the full XCHG
that currently ends up being invoked (see kvm_tdp_mmu_write_spte()).
Clearing _only_ the bit in question will allow KVM to skip the many
irrelevant checks in __handle_changed_spte() by avoiding any collateral
damage due to the XCHG writing all SPTE bits, e.g. the XCHG could race
with fast_page_fault() setting the W-bit and the CPU setting the D-bit,
and thus incorrectly drop the CPU's D-bit update.

Link: https://lore.kernel.org/all/Y9hXmz%2FnDOr1hQal@google.comSigned-off-by: default avatarVipin Sharma <vipinsh@google.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
[sean: split the switch to atomic-AND to a separate patch]
Link: https://lore.kernel.org/r/20230321220021.2119033-5-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 697c89be
...@@ -58,6 +58,20 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, ...@@ -58,6 +58,20 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
return old_spte; return old_spte;
} }
static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte,
u64 mask, int level)
{
atomic64_t *sptep_atomic;
if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) {
sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
return (u64)atomic64_fetch_and(~mask, sptep_atomic);
}
__kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask);
return old_spte;
}
/* /*
* A TDP iterator performs a pre-order walk over a TDP paging structure. * A TDP iterator performs a pre-order walk over a TDP paging structure.
*/ */
......
...@@ -770,13 +770,6 @@ static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, ...@@ -770,13 +770,6 @@ static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
_tdp_mmu_set_spte(kvm, iter, new_spte, false, true); _tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
} }
static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
}
#define tdp_root_for_each_pte(_iter, _root, _start, _end) \ #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
for_each_tdp_pte(_iter, _root, _start, _end) for_each_tdp_pte(_iter, _root, _start, _end)
...@@ -1692,7 +1685,14 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -1692,7 +1685,14 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!(iter.old_spte & dbit)) if (!(iter.old_spte & dbit))
continue; continue;
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, iter.old_spte & ~dbit); iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
iter.old_spte, dbit,
iter.level);
__handle_changed_spte(kvm, iter.as_id, iter.gfn, iter.old_spte,
iter.old_spte & ~dbit, iter.level, false);
handle_changed_spte_acc_track(iter.old_spte, iter.old_spte & ~dbit,
iter.level);
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment