Commit e2b6d941 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.15-2' of...

Merge tag 'kvmarm-fixes-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.15, take #2

- Properly refcount pages used as a concatenated stage-2 PGD
- Fix missing unlock when detecting the use of MTE+VM_SHARED
parents 019057bd 6e6a8ef0
...@@ -24,6 +24,7 @@ struct hyp_pool { ...@@ -24,6 +24,7 @@ struct hyp_pool {
/* Allocation */ /* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order); void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
void hyp_split_page(struct hyp_page *page);
void hyp_get_page(struct hyp_pool *pool, void *addr); void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(struct hyp_pool *pool, void *addr); void hyp_put_page(struct hyp_pool *pool, void *addr);
......
...@@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1; ...@@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size) static void *host_s2_zalloc_pages_exact(size_t size)
{ {
return hyp_alloc_pages(&host_s2_pool, get_order(size)); void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
hyp_split_page(hyp_virt_to_page(addr));
/*
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
* so there should be no need to free any of the tail pages to make the
* allocation exact.
*/
WARN_ON(size != (PAGE_SIZE << get_order(size)));
return addr;
} }
static void *host_s2_zalloc_page(void *pool) static void *host_s2_zalloc_page(void *pool)
......
...@@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p) ...@@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
{ {
BUG_ON(!p->refcount);
p->refcount--; p->refcount--;
return (p->refcount == 0); return (p->refcount == 0);
} }
...@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr) ...@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
hyp_spin_unlock(&pool->lock); hyp_spin_unlock(&pool->lock);
} }
void hyp_split_page(struct hyp_page *p)
{
unsigned short order = p->order;
unsigned int i;
p->order = 0;
for (i = 1; i < (1 << order); i++) {
struct hyp_page *tail = p + i;
tail->order = 0;
hyp_set_page_refcounted(tail);
}
}
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
{ {
unsigned short i = order; unsigned short i = order;
......
...@@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* when updating the PG_mte_tagged page flag, see * when updating the PG_mte_tagged page flag, see
* sanitise_mte_tags for more details. * sanitise_mte_tags for more details.
*/ */
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
return -EINVAL; ret = -EINVAL;
break;
}
if (vma->vm_flags & VM_PFNMAP) { if (vma->vm_flags & VM_PFNMAP) {
/* IO region dirty page logging not allowed */ /* IO region dirty page logging not allowed */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment