Commit d53a4c8e authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.4-1' of...

Merge tag 'kvmarm-fixes-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.4, take #1

- Remove the now obsolete hyp_alternate_select construct
- Fix the TRACE_INCLUDE_PATH macro in the vgic code
parents 24c29b7a aac60f1a
...@@ -47,30 +47,6 @@ ...@@ -47,30 +47,6 @@
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
/**
* hyp_alternate_select - Generates patchable code sequences that are
* used to switch between two implementations of a function, depending
* on the availability of a feature.
*
* @fname: a symbol name that will be defined as a function returning a
* function pointer whose type will match @orig and @alt
* @orig: A pointer to the default function, as returned by @fname when
* @cond doesn't hold
* @alt: A pointer to the alternate function, as returned by @fname
* when @cond holds
* @cond: a CPU feature (as described in asm/cpufeature.h)
*/
#define hyp_alternate_select(fname, orig, alt, cond) \
typeof(orig) * __hyp_text fname(void) \
{ \
typeof(alt) *val = orig; \
asm volatile(ALTERNATIVE("nop \n", \
"mov %0, %1 \n", \
cond) \
: "+r" (val) : "r" (alt)); \
return val; \
}
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
......
...@@ -229,20 +229,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) ...@@ -229,20 +229,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
} }
} }
static bool __hyp_text __true_value(void)
{
return true;
}
static bool __hyp_text __false_value(void)
{
return false;
}
static hyp_alternate_select(__check_arm_834220,
__false_value, __true_value,
ARM64_WORKAROUND_834220);
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
{ {
u64 par, tmp; u64 par, tmp;
...@@ -298,7 +284,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) ...@@ -298,7 +284,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
* resolve the IPA using the AT instruction. * resolve the IPA using the AT instruction.
*/ */
if (!(esr & ESR_ELx_S1PTW) && if (!(esr & ESR_ELx_S1PTW) &&
(__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar)) if (!__translate_far_to_hpfar(far, &hpfar))
return false; return false;
} else { } else {
......
...@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, ...@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
isb(); isb();
} }
static hyp_alternate_select(__tlb_switch_to_guest, static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
__tlb_switch_to_guest_nvhe, struct tlb_inv_context *cxt)
__tlb_switch_to_guest_vhe, {
ARM64_HAS_VIRT_HOST_EXTN); if (has_vhe())
__tlb_switch_to_guest_vhe(kvm, cxt);
else
__tlb_switch_to_guest_nvhe(kvm, cxt);
}
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt) struct tlb_inv_context *cxt)
...@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, ...@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
} }
static hyp_alternate_select(__tlb_switch_to_host, static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
__tlb_switch_to_host_nvhe, struct tlb_inv_context *cxt)
__tlb_switch_to_host_vhe, {
ARM64_HAS_VIRT_HOST_EXTN); if (has_vhe())
__tlb_switch_to_host_vhe(kvm, cxt);
else
__tlb_switch_to_host_nvhe(kvm, cxt);
}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{ {
...@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
/* Switch to requested VMID */ /* Switch to requested VMID */
kvm = kern_hyp_va(kvm); kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm, &cxt); __tlb_switch_to_guest(kvm, &cxt);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt()) if (!has_vhe() && icache_is_vpipt())
__flush_icache_all(); __flush_icache_all();
__tlb_switch_to_host()(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
...@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) ...@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */ /* Switch to requested VMID */
kvm = kern_hyp_va(kvm); kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm, &cxt); __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host()(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
...@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ...@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest()(kvm, &cxt); __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host()(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_flush_vm_context(void) void __hyp_text __kvm_flush_vm_context(void)
......
...@@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending, ...@@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
#endif /* _TRACE_VGIC_H */ #endif /* _TRACE_VGIC_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic #define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
#undef TRACE_INCLUDE_FILE #undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment