Commit bc3888a0 authored by Will Deacon's avatar Will Deacon Committed by Oliver Upton

KVM: arm64: Allocate pages for hypervisor FF-A mailboxes

The FF-A proxy code needs to allocate its own buffer pair for
communication with EL3 and for forwarding calls from the host at EL1.

Reserve a couple of pages for this purpose and use them to initialise
the hypervisor's FF-A buffer structure.
Co-developed-by: default avatarAndrew Walbran <qwandor@google.com>
Signed-off-by: default avatarAndrew Walbran <qwandor@google.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230523101828.7328-4-will@kernel.orgSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 12bdce4f
...@@ -106,4 +106,12 @@ static inline unsigned long host_s2_pgtable_pages(void) ...@@ -106,4 +106,12 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res; return res;
} }
#define KVM_FFA_MBOX_NR_PAGES 1
static inline unsigned long hyp_ffa_proxy_pages(void)
{
/* A page each for the hypervisor's RX and TX mailboxes. */
return 2 * KVM_FFA_MBOX_NR_PAGES;
}
#endif /* __ARM64_KVM_PKVM_H__ */ #endif /* __ARM64_KVM_PKVM_H__ */
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define FFA_MIN_FUNC_NUM 0x60 #define FFA_MIN_FUNC_NUM 0x60
#define FFA_MAX_FUNC_NUM 0x7F #define FFA_MAX_FUNC_NUM 0x7F
int hyp_ffa_init(void); int hyp_ffa_init(void *pages);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt); bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
#endif /* __KVM_HYP_FFA_H */ #endif /* __KVM_HYP_FFA_H */
...@@ -28,8 +28,11 @@ ...@@ -28,8 +28,11 @@
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/arm_ffa.h> #include <linux/arm_ffa.h>
#include <asm/kvm_pkvm.h>
#include <nvhe/ffa.h> #include <nvhe/ffa.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
#include <nvhe/spinlock.h>
/* /*
* "ID value 0 must be returned at the Non-secure physical FF-A instance" * "ID value 0 must be returned at the Non-secure physical FF-A instance"
...@@ -37,6 +40,19 @@ ...@@ -37,6 +40,19 @@
*/ */
#define HOST_FFA_ID 0 #define HOST_FFA_ID 0
struct kvm_ffa_buffers {
hyp_spinlock_t lock;
void *tx;
void *rx;
};
/*
* Note that we don't currently lock these buffers explicitly, instead
* relying on the locking of the host FFA buffers as we only have one
* client.
*/
static struct kvm_ffa_buffers hyp_buffers;
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{ {
*res = (struct arm_smccc_res) { *res = (struct arm_smccc_res) {
...@@ -124,7 +140,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) ...@@ -124,7 +140,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
return true; return true;
} }
int hyp_ffa_init(void) int hyp_ffa_init(void *pages)
{ {
struct arm_smccc_res res; struct arm_smccc_res res;
...@@ -145,5 +161,11 @@ int hyp_ffa_init(void) ...@@ -145,5 +161,11 @@ int hyp_ffa_init(void)
if (res.a2 != HOST_FFA_ID) if (res.a2 != HOST_FFA_ID)
return -EINVAL; return -EINVAL;
hyp_buffers = (struct kvm_ffa_buffers) {
.lock = __HYP_SPIN_LOCK_UNLOCKED,
.tx = pages,
.rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE),
};
return 0; return 0;
} }
...@@ -29,6 +29,7 @@ static void *vmemmap_base; ...@@ -29,6 +29,7 @@ static void *vmemmap_base;
static void *vm_table_base; static void *vm_table_base;
static void *hyp_pgt_base; static void *hyp_pgt_base;
static void *host_s2_pgt_base; static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool; static struct hyp_pool hpool;
...@@ -58,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size) ...@@ -58,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!host_s2_pgt_base) if (!host_s2_pgt_base)
return -ENOMEM; return -ENOMEM;
nr_pages = hyp_ffa_proxy_pages();
ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
if (!ffa_proxy_pages)
return -ENOMEM;
return 0; return 0;
} }
...@@ -315,7 +321,7 @@ void __noreturn __pkvm_init_finalise(void) ...@@ -315,7 +321,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret) if (ret)
goto out; goto out;
ret = hyp_ffa_init(); ret = hyp_ffa_init(ffa_proxy_pages);
if (ret) if (ret)
goto out; goto out;
......
...@@ -78,6 +78,7 @@ void __init kvm_hyp_reserve(void) ...@@ -78,6 +78,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages(); hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages(); hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE); hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
/* /*
* Try to allocate a PMD-aligned region to reduce TLB pressure once * Try to allocate a PMD-aligned region to reduce TLB pressure once
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment