Commit 04e5de03 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Reserve memory for host stage 2

Extend the memory pool allocated for the hypervisor to include enough
pages to map all of memory at page granularity for the host stage 2.
While at it, also reserve some memory for device mappings.
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-27-qperret@google.com
parent e37f37a0
...@@ -53,7 +53,7 @@ static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) ...@@ -53,7 +53,7 @@ static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
return total; return total;
} }
static inline unsigned long hyp_s1_pgtable_pages(void) static inline unsigned long __hyp_pgtable_total_pages(void)
{ {
unsigned long res = 0, i; unsigned long res = 0, i;
...@@ -63,9 +63,34 @@ static inline unsigned long hyp_s1_pgtable_pages(void) ...@@ -63,9 +63,34 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
} }
return res;
}
static inline unsigned long hyp_s1_pgtable_pages(void)
{
unsigned long res;
res = __hyp_pgtable_total_pages();
/* Allow 1 GiB for private mappings */ /* Allow 1 GiB for private mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res; return res;
} }
static inline unsigned long host_s2_mem_pgtable_pages(void)
{
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
return __hyp_pgtable_total_pages() + 16;
}
static inline unsigned long host_s2_dev_pgtable_pages(void)
{
/* Allow 1 GiB for MMIO mappings */
return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
}
#endif /* __KVM_HYP_MM_H */ #endif /* __KVM_HYP_MM_H */
...@@ -24,6 +24,8 @@ unsigned long hyp_nr_cpus; ...@@ -24,6 +24,8 @@ unsigned long hyp_nr_cpus;
static void *vmemmap_base; static void *vmemmap_base;
static void *hyp_pgt_base; static void *hyp_pgt_base;
static void *host_s2_mem_pgt_base;
static void *host_s2_dev_pgt_base;
static int divide_memory_pool(void *virt, unsigned long size) static int divide_memory_pool(void *virt, unsigned long size)
{ {
...@@ -42,6 +44,16 @@ static int divide_memory_pool(void *virt, unsigned long size) ...@@ -42,6 +44,16 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!hyp_pgt_base) if (!hyp_pgt_base)
return -ENOMEM; return -ENOMEM;
nr_pages = host_s2_mem_pgtable_pages();
host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
if (!host_s2_mem_pgt_base)
return -ENOMEM;
nr_pages = host_s2_dev_pgtable_pages();
host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
if (!host_s2_dev_pgt_base)
return -ENOMEM;
return 0; return 0;
} }
......
...@@ -52,6 +52,8 @@ void __init kvm_hyp_reserve(void) ...@@ -52,6 +52,8 @@ void __init kvm_hyp_reserve(void)
} }
hyp_mem_pages += hyp_s1_pgtable_pages(); hyp_mem_pages += hyp_s1_pgtable_pages();
hyp_mem_pages += host_s2_mem_pgtable_pages();
hyp_mem_pages += host_s2_dev_pgtable_pages();
/* /*
* The hyp_vmemmap needs to be backed by pages, but these pages * The hyp_vmemmap needs to be backed by pages, but these pages
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment