Commit 17f84520 authored by Marc Zyngier's avatar Marc Zyngier

Merge remote-tracking branch 'origin/kvm-arm64/misc-5.11' into kvmarm-master/queue

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents f86e5465 652d0b70
...@@ -19,8 +19,8 @@ Two new SMCCC compatible hypercalls are defined: ...@@ -19,8 +19,8 @@ Two new SMCCC compatible hypercalls are defined:
These are only available in the SMC64/HVC64 calling convention as These are only available in the SMC64/HVC64 calling convention as
paravirtualized time is not available to 32 bit Arm guests. The existence of paravirtualized time is not available to 32 bit Arm guests. The existence of
the PV_FEATURES hypercall should be probed using the SMCCC 1.1 ARCH_FEATURES the PV_TIME_FEATURES hypercall should be probed using the SMCCC 1.1
mechanism before calling it. ARCH_FEATURES mechanism before calling it.
PV_TIME_FEATURES PV_TIME_FEATURES
============= ======== ========== ============= ======== ==========
......
...@@ -53,7 +53,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) ...@@ -53,7 +53,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
struct pvclock_vcpu_stolen_time init_values = {}; struct pvclock_vcpu_stolen_time init_values = {};
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
u64 base = vcpu->arch.steal.base; u64 base = vcpu->arch.steal.base;
int idx;
if (base == GPA_INVALID) if (base == GPA_INVALID)
return base; return base;
...@@ -63,10 +62,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) ...@@ -63,10 +62,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* the feature enabled. * the feature enabled.
*/ */
vcpu->arch.steal.last_steal = current->sched_info.run_delay; vcpu->arch.steal.last_steal = current->sched_info.run_delay;
kvm_write_guest_lock(kvm, base, &init_values, sizeof(init_values));
idx = srcu_read_lock(&kvm->srcu);
kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
srcu_read_unlock(&kvm->srcu, idx);
return base; return base;
} }
......
...@@ -353,6 +353,18 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) ...@@ -353,6 +353,18 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
return err; return err;
} }
void vgic_v4_commit(struct kvm_vcpu *vcpu)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
/*
* No need to wait for the vPE to be ready across a shallow guest
* exit, as only a vcpu_put will invalidate it.
*/
if (!vpe->ready)
its_commit_vpe(vpe);
}
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *irq_entry) struct kvm_kernel_irq_routing_entry *irq_entry)
{ {
......
...@@ -915,6 +915,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -915,6 +915,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
if (can_access_vgic_from_kernel()) if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu); vgic_restore_state(vcpu);
if (vgic_supports_direct_msis(vcpu->kvm))
vgic_v4_commit(vcpu);
} }
void kvm_vgic_load(struct kvm_vcpu *vcpu) void kvm_vgic_load(struct kvm_vcpu *vcpu)
......
...@@ -3842,8 +3842,6 @@ static void its_vpe_schedule(struct its_vpe *vpe) ...@@ -3842,8 +3842,6 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid; val |= GICR_VPENDBASER_Valid;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
its_wait_vpt_parse_complete();
} }
static void its_vpe_deschedule(struct its_vpe *vpe) static void its_vpe_deschedule(struct its_vpe *vpe)
...@@ -3891,6 +3889,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) ...@@ -3891,6 +3889,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_deschedule(vpe); its_vpe_deschedule(vpe);
return 0; return 0;
case COMMIT_VPE:
its_wait_vpt_parse_complete();
return 0;
case INVALL_VPE: case INVALL_VPE:
its_vpe_invall(vpe); its_vpe_invall(vpe);
return 0; return 0;
...@@ -4052,8 +4054,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, ...@@ -4052,8 +4054,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
its_wait_vpt_parse_complete();
} }
static void its_vpe_4_1_deschedule(struct its_vpe *vpe, static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
...@@ -4128,6 +4128,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) ...@@ -4128,6 +4128,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_4_1_deschedule(vpe, info); its_vpe_4_1_deschedule(vpe, info);
return 0; return 0;
case COMMIT_VPE:
its_wait_vpt_parse_complete();
return 0;
case INVALL_VPE: case INVALL_VPE:
its_vpe_4_1_invall(vpe); its_vpe_4_1_invall(vpe);
return 0; return 0;
......
...@@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) ...@@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
if (!ret) if (!ret)
vpe->resident = false; vpe->resident = false;
vpe->ready = false;
return ret; return ret;
} }
...@@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) ...@@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
return ret; return ret;
} }
int its_commit_vpe(struct its_vpe *vpe)
{
struct its_cmd_info info = {
.cmd_type = COMMIT_VPE,
};
int ret;
WARN_ON(preemptible());
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->ready = true;
return ret;
}
int its_invall_vpe(struct its_vpe *vpe) int its_invall_vpe(struct its_vpe *vpe)
{ {
struct its_cmd_info info = { struct its_cmd_info info = {
......
...@@ -402,6 +402,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, ...@@ -402,6 +402,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry); struct kvm_kernel_irq_routing_entry *irq_entry);
int vgic_v4_load(struct kvm_vcpu *vcpu); int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
#endif /* __KVM_ARM_VGIC_H */ #endif /* __KVM_ARM_VGIC_H */
...@@ -39,6 +39,8 @@ struct its_vpe { ...@@ -39,6 +39,8 @@ struct its_vpe {
irq_hw_number_t vpe_db_lpi; irq_hw_number_t vpe_db_lpi;
/* VPE resident */ /* VPE resident */
bool resident; bool resident;
/* VPT parse complete */
bool ready;
union { union {
/* GICv4.0 implementations */ /* GICv4.0 implementations */
struct { struct {
...@@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type { ...@@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type {
PROP_UPDATE_AND_INV_VLPI, PROP_UPDATE_AND_INV_VLPI,
SCHEDULE_VPE, SCHEDULE_VPE,
DESCHEDULE_VPE, DESCHEDULE_VPE,
COMMIT_VPE,
INVALL_VPE, INVALL_VPE,
PROP_UPDATE_VSGI, PROP_UPDATE_VSGI,
}; };
...@@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm); ...@@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm);
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db); int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment