Commit 630a1685 authored by Andrew Murray's avatar Andrew Murray Committed by Marc Zyngier

arm64: KVM: Encapsulate kvm_cpu_context in kvm_host_data

The virt/arm core allocates a kvm_cpu_context_t percpu, at present this is
a typedef to kvm_cpu_context and is used to store host cpu context. The
kvm_cpu_context structure is also used elsewhere to hold vcpu context.
In order to use the percpu to hold additional future host information we
encapsulate kvm_cpu_context in a new structure and rename the typedef and
percpu to match.
Signed-off-by: default avatarAndrew Murray <andrew.murray@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 21bb0ebf
...@@ -153,9 +153,13 @@ struct kvm_cpu_context { ...@@ -153,9 +153,13 @@ struct kvm_cpu_context {
u32 cp15[NR_CP15_REGS]; u32 cp15[NR_CP15_REGS];
}; };
typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
};
typedef struct kvm_host_data kvm_host_data_t;
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu) int cpu)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
...@@ -185,7 +189,7 @@ struct kvm_vcpu_arch { ...@@ -185,7 +189,7 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* Host FP context */ /* Host FP context */
kvm_cpu_context_t *host_cpu_context; struct kvm_cpu_context *host_cpu_context;
/* VGIC state */ /* VGIC state */
struct vgic_cpu vgic_cpu; struct vgic_cpu vgic_cpu;
......
...@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void); ...@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void);
.endm .endm
.macro get_host_ctxt reg, tmp .macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp hyp_adr_this_cpu \reg, kvm_host_data, \tmp
add \reg, \reg, #HOST_DATA_CONTEXT
.endm .endm
.macro get_vcpu_ptr vcpu, ctxt .macro get_vcpu_ptr vcpu, ctxt
......
...@@ -233,7 +233,11 @@ struct kvm_cpu_context { ...@@ -233,7 +233,11 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu; struct kvm_vcpu *__hyp_running_vcpu;
}; };
typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
};
typedef struct kvm_host_data kvm_host_data_t;
struct vcpu_reset_state { struct vcpu_reset_state {
unsigned long pc; unsigned long pc;
...@@ -278,7 +282,7 @@ struct kvm_vcpu_arch { ...@@ -278,7 +282,7 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
/* Pointer to host CPU context */ /* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context; struct kvm_cpu_context *host_cpu_context;
struct thread_info *host_thread_info; /* hyp VA */ struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
...@@ -483,9 +487,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); ...@@ -483,9 +487,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu) int cpu)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
...@@ -503,8 +507,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, ...@@ -503,8 +507,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* kernel's mapping to the linear mapping, and store it in tpidr_el2 * kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2. * so that we can use adr_l to access per-cpu variables in EL2.
*/ */
u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
(u64)kvm_ksym_ref(kvm_host_cpu_state)); (u64)kvm_ksym_ref(kvm_host_data));
/* /*
* Call initialization code, and switch to the full blown HYP code. * Call initialization code, and switch to the full blown HYP code.
......
...@@ -134,6 +134,7 @@ int main(void) ...@@ -134,6 +134,7 @@ int main(void)
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif #endif
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
__asm__(".arch_extension virt"); __asm__(".arch_extension virt");
#endif #endif
DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
/* Per-CPU variable containing the currently running vcpu. */ /* Per-CPU variable containing the currently running vcpu. */
...@@ -360,8 +360,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -360,8 +360,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
int *last_ran; int *last_ran;
kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
cpu_data = this_cpu_ptr(&kvm_host_data);
/* /*
* We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but
...@@ -373,7 +375,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -373,7 +375,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
vcpu->cpu = cpu; vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_arm_set_running_vcpu(vcpu); kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu); kvm_vgic_load(vcpu);
...@@ -1569,11 +1571,11 @@ static int init_hyp_mode(void) ...@@ -1569,11 +1571,11 @@ static int init_hyp_mode(void)
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt; kvm_host_data_t *cpu_data;
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
kvm_init_host_cpu_context(cpu_ctxt, cpu); kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
if (err) { if (err) {
kvm_err("Cannot map host CPU state: %d\n", err); kvm_err("Cannot map host CPU state: %d\n", err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment