Commit a83e2191 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: pkvm: Refcount the pages shared with EL2

In order to simplify the page tracking infrastructure at EL2 in nVHE
protected mode, move the responsibility of refcounting pages that are
shared multiple times on the host. In order to do so, let's create a
red-black tree tracking all the PFNs that have been shared, along with
a refcount.
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211215161232.1480836-8-qperret@google.com
parent 3f868e14
...@@ -281,23 +281,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr) ...@@ -281,23 +281,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
} }
} }
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) struct hyp_shared_pfn {
u64 pfn;
int count;
struct rb_node node;
};
static DEFINE_MUTEX(hyp_shared_pfns_lock);
static struct rb_root hyp_shared_pfns = RB_ROOT;
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
struct rb_node **parent)
{ {
phys_addr_t addr; struct hyp_shared_pfn *this;
int ret;
*node = &hyp_shared_pfns.rb_node;
*parent = NULL;
while (**node) {
this = container_of(**node, struct hyp_shared_pfn, node);
*parent = **node;
if (this->pfn < pfn)
*node = &((**node)->rb_left);
else if (this->pfn > pfn)
*node = &((**node)->rb_right);
else
return this;
}
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) { return NULL;
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, }
__phys_to_pfn(addr));
if (ret) static int share_pfn_hyp(u64 pfn)
return ret; {
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
int ret = 0;
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
if (this) {
this->count++;
goto unlock;
} }
return 0; this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this) {
ret = -ENOMEM;
goto unlock;
}
this->pfn = pfn;
this->count = 1;
rb_link_node(&this->node, parent, node);
rb_insert_color(&this->node, &hyp_shared_pfns);
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
return ret;
} }
int kvm_share_hyp(void *from, void *to) int kvm_share_hyp(void *from, void *to)
{ {
phys_addr_t start, end, cur;
u64 pfn;
int ret;
if (is_kernel_in_hyp_mode()) if (is_kernel_in_hyp_mode())
return 0; return 0;
...@@ -312,7 +361,16 @@ int kvm_share_hyp(void *from, void *to) ...@@ -312,7 +361,16 @@ int kvm_share_hyp(void *from, void *to)
if (kvm_host_owns_hyp_mappings()) if (kvm_host_owns_hyp_mappings())
return create_hyp_mappings(from, to, PAGE_HYP); return create_hyp_mappings(from, to, PAGE_HYP);
return pkvm_share_hyp(__pa(from), __pa(to)); start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
end = PAGE_ALIGN(__pa(to));
for (cur = start; cur < end; cur += PAGE_SIZE) {
pfn = __phys_to_pfn(cur);
ret = share_pfn_hyp(pfn);
if (ret)
return ret;
}
return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment