Commit 8120337a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Stop using software available bits to denote MMIO SPTEs

Stop tagging MMIO SPTEs with specific available bits and instead detect
MMIO SPTEs by checking for their unique SPTE value.  The value is
guaranteed to be unique on shadow paging and NPT as setting reserved
physical address bits on any other type of SPTE would consistute a KVM
bug.  Ditto for EPT, as creating a WX non-MMIO would also be a bug.

Note, this approach is also future-compatibile with TDX, which will need
to reflect MMIO EPT violations as #VEs into the guest.  To create an EPT
violation instead of a misconfig, TDX EPTs will need to have RWX=0,  But,
MMIO SPTEs will also be the only case where KVM clears SUPPRESS_VE, so
MMIO SPTEs will still be guaranteed to have a unique value within a given
MMU context.

The main motivation is to make it easier to reason about which types of
SPTEs use which available bits.  As a happy side effect, this frees up
two more bits for storing the MMIO generation.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210225204749.1512652-11-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c236d962
...@@ -59,7 +59,7 @@ static __always_inline u64 rsvd_bits(int s, int e) ...@@ -59,7 +59,7 @@ static __always_inline u64 rsvd_bits(int s, int e)
return ((2ULL << (e - s)) - 1) << s; return ((2ULL << (e - s)) - 1) << s;
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
......
...@@ -5770,7 +5770,7 @@ static void kvm_set_mmio_spte_mask(void) ...@@ -5770,7 +5770,7 @@ static void kvm_set_mmio_spte_mask(void)
else else
mask = 0; mask = 0;
kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
} }
static bool get_nx_auto_mode(void) static bool get_nx_auto_mode(void)
......
...@@ -23,6 +23,7 @@ u64 __read_mostly shadow_user_mask; ...@@ -23,6 +23,7 @@ u64 __read_mostly shadow_user_mask;
u64 __read_mostly shadow_accessed_mask; u64 __read_mostly shadow_accessed_mask;
u64 __read_mostly shadow_dirty_mask; u64 __read_mostly shadow_dirty_mask;
u64 __read_mostly shadow_mmio_value; u64 __read_mostly shadow_mmio_value;
u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask; u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask; u64 __read_mostly shadow_present_mask;
u64 __read_mostly shadow_me_mask; u64 __read_mostly shadow_me_mask;
...@@ -163,6 +164,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, ...@@ -163,6 +164,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte = mark_spte_for_access_track(spte); spte = mark_spte_for_access_track(spte);
out: out:
WARN_ON(is_mmio_spte(spte));
*new_spte = spte; *new_spte = spte;
return ret; return ret;
} }
...@@ -244,7 +246,7 @@ u64 mark_spte_for_access_track(u64 spte) ...@@ -244,7 +246,7 @@ u64 mark_spte_for_access_track(u64 spte)
return spte; return spte;
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
{ {
BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((u64)(unsigned)access_mask != access_mask);
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
...@@ -260,10 +262,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) ...@@ -260,10 +262,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
mmio_value = 0; mmio_value = 0;
if (mmio_value) WARN_ON((mmio_value & mmio_mask) != mmio_value);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_value = mmio_value;
else shadow_mmio_mask = mmio_mask;
shadow_mmio_value = 0;
shadow_mmio_access_mask = access_mask; shadow_mmio_access_mask = access_mask;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
......
...@@ -8,15 +8,11 @@ ...@@ -8,15 +8,11 @@
#define PT_FIRST_AVAIL_BITS_SHIFT 10 #define PT_FIRST_AVAIL_BITS_SHIFT 10
#define PT64_SECOND_AVAIL_BITS_SHIFT 54 #define PT64_SECOND_AVAIL_BITS_SHIFT 54
/* /* The mask used to denote Access Tracking SPTEs. Note, val=3 is available. */
* The mask used to denote special SPTEs, which can be either MMIO SPTEs or
* Access Tracking SPTEs.
*/
#define SPTE_SPECIAL_MASK (3ULL << 52) #define SPTE_SPECIAL_MASK (3ULL << 52)
#define SPTE_AD_ENABLED_MASK (0ULL << 52) #define SPTE_AD_ENABLED_MASK (0ULL << 52)
#define SPTE_AD_DISABLED_MASK (1ULL << 52) #define SPTE_AD_DISABLED_MASK (1ULL << 52)
#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) #define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
#define SPTE_MMIO_MASK (3ULL << 52)
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
...@@ -98,6 +94,7 @@ extern u64 __read_mostly shadow_user_mask; ...@@ -98,6 +94,7 @@ extern u64 __read_mostly shadow_user_mask;
extern u64 __read_mostly shadow_accessed_mask; extern u64 __read_mostly shadow_accessed_mask;
extern u64 __read_mostly shadow_dirty_mask; extern u64 __read_mostly shadow_dirty_mask;
extern u64 __read_mostly shadow_mmio_value; extern u64 __read_mostly shadow_mmio_value;
extern u64 __read_mostly shadow_mmio_mask;
extern u64 __read_mostly shadow_mmio_access_mask; extern u64 __read_mostly shadow_mmio_access_mask;
extern u64 __read_mostly shadow_present_mask; extern u64 __read_mostly shadow_present_mask;
extern u64 __read_mostly shadow_me_mask; extern u64 __read_mostly shadow_me_mask;
...@@ -167,7 +164,8 @@ extern u8 __read_mostly shadow_phys_bits; ...@@ -167,7 +164,8 @@ extern u8 __read_mostly shadow_phys_bits;
static inline bool is_mmio_spte(u64 spte) static inline bool is_mmio_spte(u64 spte)
{ {
return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; return (spte & shadow_mmio_mask) == shadow_mmio_value &&
likely(shadow_mmio_value);
} }
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
......
...@@ -881,7 +881,7 @@ static __init void svm_adjust_mmio_mask(void) ...@@ -881,7 +881,7 @@ static __init void svm_adjust_mmio_mask(void)
*/ */
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
} }
static void svm_hardware_teardown(void) static void svm_hardware_teardown(void)
......
...@@ -4320,7 +4320,8 @@ static void ept_set_mmio_spte_mask(void) ...@@ -4320,7 +4320,8 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0 * EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute). * of an EPT paging-structure entry is 110b (write/execute).
*/ */
kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0); kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
VMX_EPT_RWX_MASK, 0);
} }
#define VMX_XSS_EXIT_BITMAP 0 #define VMX_XSS_EXIT_BITMAP 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment