Commit e7bf7a49 authored by Ricardo Koller's avatar Ricardo Koller Committed by Oliver Upton

KVM: arm64: Split huge pages when dirty logging is enabled

Split huge pages eagerly when enabling dirty logging. The goal is to
avoid doing it while faulting on write-protected pages, which
negatively impacts guest performance.

A memslot marked for dirty logging is split in 1GB pieces at a time.
This is in order to release the mmu_lock and give other kernel threads
the opportunity to run, and also in order to allocate enough pages to
split a 1GB range worth of huge pages (or a single 1GB huge page).
Note that these page allocations can fail, so eager page splitting is
best-effort.  This is not a correctness issue though, as huge pages
can still be split on write-faults.

Eager page splitting only takes effect when the huge page mapping has
been existing in the stage-2 page table. Otherwise, the huge page will
be mapped to multiple non-huge pages on page fault.

The benefits of eager page splitting are the same as in x86, added
with commit a3fe5dbd ("KVM: x86/mmu: Split huge pages mapped by
the TDP MMU when dirty logging is enabled"). For example, when running
dirty_log_perf_test with 64 virtual CPUs (Ampere Altra), 1GB per vCPU,
50% reads, and 2MB HugeTLB memory, the time it takes vCPUs to access
all of their memory after dirty logging is enabled decreased by 44%
from 2.58s to 1.42s.
Signed-off-by: default avatarRicardo Koller <ricarkol@google.com>
Reviewed-by: default avatarShaoqin Huang <shahuang@redhat.com>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-10-ricarkol@google.comSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent ce2b6022
...@@ -31,14 +31,21 @@ static phys_addr_t __ro_after_init hyp_idmap_vector; ...@@ -31,14 +31,21 @@ static phys_addr_t __ro_after_init hyp_idmap_vector;
static unsigned long __ro_after_init io_map_base; static unsigned long __ro_after_init io_map_base;
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
phys_addr_t size)
{ {
phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
phys_addr_t boundary = ALIGN_DOWN(addr + size, size); phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
return (boundary - 1 < end - 1) ? boundary : end; return (boundary - 1 < end - 1) ? boundary : end;
} }
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
{
phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
return __stage2_range_addr_end(addr, end, size);
}
/* /*
* Release kvm_mmu_lock periodically if the memory region is large. Otherwise, * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
* we may see kernel panics with CONFIG_DETECT_HUNG_TASK, * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
...@@ -75,6 +82,79 @@ static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, ...@@ -75,6 +82,79 @@ static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr,
#define stage2_apply_range_resched(mmu, addr, end, fn) \ #define stage2_apply_range_resched(mmu, addr, end, fn) \
stage2_apply_range(mmu, addr, end, fn, true) stage2_apply_range(mmu, addr, end, fn, true)
/*
* Get the maximum number of page-tables pages needed to split a range
* of blocks into PAGE_SIZE PTEs. It assumes the range is already
* mapped at level 2, or at level 1 if allowed.
*/
static int kvm_mmu_split_nr_page_tables(u64 range)
{
int n = 0;
if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2)
n += DIV_ROUND_UP_ULL(range, PUD_SIZE);
n += DIV_ROUND_UP_ULL(range, PMD_SIZE);
return n;
}
static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
{
struct kvm_mmu_memory_cache *cache;
u64 chunk_size, min;
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
return true;
chunk_size = kvm->arch.mmu.split_page_chunk_size;
min = kvm_mmu_split_nr_page_tables(chunk_size);
cache = &kvm->arch.mmu.split_page_cache;
return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
}
static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
phys_addr_t end)
{
struct kvm_mmu_memory_cache *cache;
struct kvm_pgtable *pgt;
int ret, cache_capacity;
u64 next, chunk_size;
lockdep_assert_held_write(&kvm->mmu_lock);
chunk_size = kvm->arch.mmu.split_page_chunk_size;
cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size);
if (chunk_size == 0)
return 0;
cache = &kvm->arch.mmu.split_page_cache;
do {
if (need_split_memcache_topup_or_resched(kvm)) {
write_unlock(&kvm->mmu_lock);
cond_resched();
/* Eager page splitting is best-effort. */
ret = __kvm_mmu_topup_memory_cache(cache,
cache_capacity,
cache_capacity);
write_lock(&kvm->mmu_lock);
if (ret)
break;
}
pgt = kvm->arch.mmu.pgt;
if (!pgt)
return -EINVAL;
next = __stage2_range_addr_end(addr, end, chunk_size);
ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
if (ret)
break;
} while (addr = next, addr != end);
return ret;
}
static bool memslot_is_logging(struct kvm_memory_slot *memslot) static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{ {
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
...@@ -793,6 +873,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t ...@@ -793,6 +873,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
void kvm_uninit_stage2_mmu(struct kvm *kvm) void kvm_uninit_stage2_mmu(struct kvm *kvm)
{ {
kvm_free_stage2_pgd(&kvm->arch.mmu); kvm_free_stage2_pgd(&kvm->arch.mmu);
kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
} }
static void stage2_unmap_memslot(struct kvm *kvm, static void stage2_unmap_memslot(struct kvm *kvm,
...@@ -1019,6 +1100,34 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1019,6 +1100,34 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
stage2_wp_range(&kvm->arch.mmu, start, end); stage2_wp_range(&kvm->arch.mmu, start, end);
} }
/**
* kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
* pages for memory slot
* @kvm: The KVM pointer
* @slot: The memory slot to split
*
* Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired,
* serializing operations for VM memory regions.
*/
static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
phys_addr_t start, end;
lockdep_assert_held(&kvm->slots_lock);
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, slot);
start = memslot->base_gfn << PAGE_SHIFT;
end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
write_lock(&kvm->mmu_lock);
kvm_mmu_split_huge_pages(kvm, start, end);
write_unlock(&kvm->mmu_lock);
}
/* /*
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
* dirty pages. * dirty pages.
...@@ -1812,8 +1921,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -1812,8 +1921,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
return; return;
/* /*
* Pages are write-protected on either of these two * Huge and normal pages are write-protected and split
* cases: * on either of these two cases:
* *
* 1. with initial-all-set: gradually with CLEAR ioctls, * 1. with initial-all-set: gradually with CLEAR ioctls,
*/ */
...@@ -1825,6 +1934,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -1825,6 +1934,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* enabling dirty logging. * enabling dirty logging.
*/ */
kvm_mmu_wp_memory_region(kvm, new->id); kvm_mmu_wp_memory_region(kvm, new->id);
kvm_mmu_split_memory_region(kvm, new->id);
} else {
/*
* Free any leftovers from the eager page splitting cache. Do
* this when deleting, moving, disabling dirty logging, or
* creating the memslot (a nop). Doing it for deletes makes
* sure we don't leak memory, and there's no need to keep the
* cache around for any of the other cases.
*/
kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment