Commit c23e2b71 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Allow page-sized MMU caches to be initialized with custom 64-bit values

Add support to MMU caches for initializing a page with a custom 64-bit
value, e.g. to pre-fill an entire page table with non-zero PTE values.
The functionality will be used by x86 to support Intel's TDX, which needs
to set bit 63 in all non-present PTEs in order to prevent !PRESENT page
faults from getting reflected into the guest (Intel's EPT Violation #VE
architecture made the less than brilliant decision of having the per-PTE
behavior be opt-out instead of opt-in).
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Message-Id: <5919f685f109a1b0ebc6bd8fc4536ee94bcc172d.1705965635.git.isaku.yamahata@intel.com>
Reviewed-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: default avatarBinbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a96cb3bf
...@@ -86,6 +86,7 @@ struct gfn_to_pfn_cache { ...@@ -86,6 +86,7 @@ struct gfn_to_pfn_cache {
struct kvm_mmu_memory_cache { struct kvm_mmu_memory_cache {
gfp_t gfp_zero; gfp_t gfp_zero;
gfp_t gfp_custom; gfp_t gfp_custom;
u64 init_value;
struct kmem_cache *kmem_cache; struct kmem_cache *kmem_cache;
int capacity; int capacity;
int nobjs; int nobjs;
......
...@@ -401,12 +401,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm) ...@@ -401,12 +401,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
void *page;
gfp_flags |= mc->gfp_zero; gfp_flags |= mc->gfp_zero;
if (mc->kmem_cache) if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags); return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
else
return (void *)__get_free_page(gfp_flags); page = (void *)__get_free_page(gfp_flags);
if (page && mc->init_value)
memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
return page;
} }
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
...@@ -421,6 +426,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, ...@@ -421,6 +426,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
if (WARN_ON_ONCE(!capacity)) if (WARN_ON_ONCE(!capacity))
return -EIO; return -EIO;
/*
* Custom init values can be used only for page allocations,
* and obviously conflict with __GFP_ZERO.
*/
if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
return -EIO;
mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp); mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
if (!mc->objects) if (!mc->objects)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment