Commit 115111ef authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Check SPTE writable invariants when setting leaf SPTEs

Check SPTE writable invariants when setting SPTEs rather than in
spte_can_locklessly_be_made_writable(). By the time KVM checks
spte_can_locklessly_be_made_writable(), the SPTE has long been since
corrupted.

Note that these invariants only apply to shadow-present leaf SPTEs (i.e.
not to MMIO SPTEs, non-leaf SPTEs, etc.). Add a comment explaining the
restriction and only instrument the code paths that set shadow-present
leaf SPTEs.

To account for access tracking, also check the SPTE writable invariants
when marking an SPTE as an access track SPTE. This also lets us remove
a redundant WARN from mark_spte_for_access_track().
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220125230518.1697048-3-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 932859a4
...@@ -529,6 +529,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) ...@@ -529,6 +529,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
u64 old_spte = *sptep; u64 old_spte = *sptep;
WARN_ON(!is_shadow_present_pte(new_spte)); WARN_ON(!is_shadow_present_pte(new_spte));
check_spte_writable_invariants(new_spte);
if (!is_shadow_present_pte(old_spte)) { if (!is_shadow_present_pte(old_spte)) {
mmu_spte_set(sptep, new_spte); mmu_spte_set(sptep, new_spte);
......
...@@ -250,14 +250,7 @@ u64 mark_spte_for_access_track(u64 spte) ...@@ -250,14 +250,7 @@ u64 mark_spte_for_access_track(u64 spte)
if (is_access_track_spte(spte)) if (is_access_track_spte(spte))
return spte; return spte;
/* check_spte_writable_invariants(spte);
* Making an Access Tracking PTE will result in removal of write access
* from the PTE. So, verify that we will be able to restore the write
* access in the fast page fault path later on.
*/
WARN_ONCE((spte & PT_WRITABLE_MASK) &&
!spte_can_locklessly_be_made_writable(spte),
"kvm: Writable SPTE is not locklessly dirty-trackable\n");
WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
......
...@@ -339,6 +339,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, ...@@ -339,6 +339,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
__is_rsvd_bits_set(rsvd_check, spte, level); __is_rsvd_bits_set(rsvd_check, spte, level);
} }
/* Note: spte must be a shadow-present leaf SPTE. */
static inline void check_spte_writable_invariants(u64 spte) static inline void check_spte_writable_invariants(u64 spte)
{ {
if (spte & shadow_mmu_writable_mask) if (spte & shadow_mmu_writable_mask)
...@@ -352,7 +353,6 @@ static inline void check_spte_writable_invariants(u64 spte) ...@@ -352,7 +353,6 @@ static inline void check_spte_writable_invariants(u64 spte)
static inline bool spte_can_locklessly_be_made_writable(u64 spte) static inline bool spte_can_locklessly_be_made_writable(u64 spte)
{ {
check_spte_writable_invariants(spte);
return spte & shadow_mmu_writable_mask; return spte & shadow_mmu_writable_mask;
} }
......
...@@ -452,6 +452,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -452,6 +452,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
if (is_leaf)
check_spte_writable_invariants(new_spte);
/* /*
* The only times a SPTE should be changed from a non-present to * The only times a SPTE should be changed from a non-present to
* non-present state is when an MMIO entry is installed/modified/ * non-present state is when an MMIO entry is installed/modified/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment