Commit e951445f authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.6-1' of...

Merge tag 'kvmarm-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.6, take #1

- Fix compilation on 32bit
- Move  VHE guest entry/exit into the VHE-specific entry code
- Make sure all functions called by the non-VHE HYP code is tagged as __always_inline
parents ef935c25 e43f1331
...@@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} ...@@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
#define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1 #define KVM_BP_HARDEN_NOT_REQUIRED 1
......
...@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq) ...@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
isb(); isb();
} }
static inline void gic_write_dir(u32 irq) static __always_inline void gic_write_dir(u32 irq)
{ {
write_sysreg_s(irq, SYS_ICC_DIR_EL1); write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb(); isb();
......
...@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void) ...@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags); return test_bit(ICACHEF_ALIASING, &__icache_flags);
} }
static inline int icache_is_vpipt(void) static __always_inline int icache_is_vpipt(void)
{ {
return test_bit(ICACHEF_VPIPT, &__icache_flags); return test_bit(ICACHEF_VPIPT, &__icache_flags);
} }
......
...@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static __always_inline void __flush_icache_all(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return; return;
......
...@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field) ...@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
return cpuid_feature_extract_signed_field_width(features, field, 4); return cpuid_feature_extract_signed_field_width(features, field, 4);
} }
static inline unsigned int __attribute_const__ static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{ {
return (u64)(features << (64 - width - field)) >> (64 - width); return (u64)(features << (64 - width - field)) >> (64 - width);
} }
static inline unsigned int __attribute_const__ static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field) cpuid_feature_extract_unsigned_field(u64 features, int field)
{ {
return cpuid_feature_extract_unsigned_field_width(features, field, 4); return cpuid_feature_extract_unsigned_field_width(features, field, 4);
...@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void) ...@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1; return val == 0x1;
} }
static inline bool system_supports_fpsimd(void) static __always_inline bool system_supports_fpsimd(void)
{ {
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
} }
...@@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void) ...@@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN); !cpus_have_const_cap(ARM64_HAS_PAN);
} }
static inline bool system_supports_sve(void) static __always_inline bool system_supports_sve(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SVE) && return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE); cpus_have_const_cap(ARM64_SVE);
} }
static inline bool system_supports_cnp(void) static __always_inline bool system_supports_cnp(void)
{ {
return IS_ENABLED(CONFIG_ARM64_CNP) && return IS_ENABLED(CONFIG_ARM64_CNP) &&
cpus_have_const_cap(ARM64_HAS_CNP); cpus_have_const_cap(ARM64_HAS_CNP);
......
...@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) ...@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
} }
#define __raw_writel __raw_writel #define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr) static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{ {
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
} }
...@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) ...@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
} }
#define __raw_readl __raw_readl #define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr) static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
{ {
u32 val; u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]", asm volatile(ALTERNATIVE("ldr %w0, [%1]",
......
...@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu); ...@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{ {
return !(vcpu->arch.hcr_el2 & HCR_RW); return !(vcpu->arch.hcr_el2 & HCR_RW);
} }
...@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) ...@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
vcpu->arch.vsesr_el2 = vsesr; vcpu->arch.vsesr_el2 = vsesr;
} }
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{ {
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
} }
...@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long ...@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
*__vcpu_elr_el1(vcpu) = v; *__vcpu_elr_el1(vcpu) = v;
} }
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{ {
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
} }
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{ {
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
} }
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
return kvm_condition_valid32(vcpu); return kvm_condition_valid32(vcpu);
...@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) ...@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
* AArch32 with banked registers. * AArch32 with banked registers.
*/ */
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num) u8 reg_num)
{ {
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
} }
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val) unsigned long val)
{ {
if (reg_num != 31) if (reg_num != 31)
...@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) ...@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
return mode != PSR_MODE_EL0t; return mode != PSR_MODE_EL0t;
} }
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.fault.esr_el2; return vcpu->arch.fault.esr_el2;
} }
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{ {
u32 esr = kvm_vcpu_get_hsr(vcpu); u32 esr = kvm_vcpu_get_hsr(vcpu);
...@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) ...@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
return -1; return -1;
} }
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.fault.far_el2; return vcpu->arch.fault.far_el2;
} }
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{ {
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
} }
...@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) ...@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
} }
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
} }
...@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) ...@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
} }
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{ {
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
} }
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
} }
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
...@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) ...@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
} }
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{ {
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
} }
/* This one is not specific to Data Abort */ /* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
} }
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{ {
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
} }
...@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) ...@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
} }
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
} }
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
} }
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{ {
switch (kvm_vcpu_trap_get_fault(vcpu)) { switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA: case FSC_SEA:
...@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) ...@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
} }
} }
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{ {
u32 esr = kvm_vcpu_get_hsr(vcpu); u32 esr = kvm_vcpu_get_hsr(vcpu);
return ESR_ELx_SYS64_ISS_RT(esr); return ESR_ELx_SYS64_ISS_RT(esr);
...@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, ...@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */ return data; /* Leave LE untouched */
} }
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
kvm_skip_instr32(vcpu, is_wide_instr); kvm_skip_instr32(vcpu, is_wide_instr);
...@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) ...@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
* Skip an instruction which has been emulated at hyp while most guest sysregs * Skip an instruction which has been emulated at hyp while most guest sysregs
* are live. * are live.
*/ */
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
......
...@@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} ...@@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {} static inline void kvm_clr_pmu_events(u32 clr) {}
#endif #endif
static inline void kvm_arm_vhe_guest_enter(void)
{
local_daif_mask();
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
pmr_sync();
}
static inline void kvm_arm_vhe_guest_exit(void)
{
/*
* local_daif_restore() takes care to properly restore PSTATE.DAIF
* and the GIC PMR if the host is using IRQ priorities.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* When we exit from the guest we change a number of CPU configuration
* parameters, such as traps. Make sure these changes take effect
* before running the host or additional guests.
*/
isb();
}
#define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1 #define KVM_BP_HARDEN_NOT_REQUIRED 1
......
...@@ -47,6 +47,13 @@ ...@@ -47,6 +47,13 @@
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
/*
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
* static inline can allow the compiler to out-of-line this. KVM always wants
* the macro version as its always inlined.
*/
#define __kvm_swab32(x) ___constant_swab32(x)
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
......
...@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt, ...@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst); __le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void); void kvm_compute_layout(void);
static inline unsigned long __kern_hyp_va(unsigned long v) static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{ {
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n" "ror %0, %0, #1\n"
...@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
extern void *__kvm_bp_vect_base; extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot; extern int __kvm_harden_el2_vector_slot;
/* This is only called on a VHE system */
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
struct bp_hardening_data *data = arm64_get_bp_hardening_data(); struct bp_hardening_data *data = arm64_get_bp_hardening_data();
......
...@@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void) ...@@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2; return read_sysreg(CurrentEL) == CurrentEL_EL2;
} }
static inline bool has_vhe(void) static __always_inline bool has_vhe(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
return true; return true;
......
...@@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) ...@@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
} }
/* Switch to the guest for VHE systems running in EL2 */ /* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
...@@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code; return exit_code;
} }
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
int ret;
local_daif_mask();
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
pmr_sync();
ret = __kvm_vcpu_run_vhe(vcpu);
/*
* local_daif_restore() takes care to properly restore PSTATE.DAIF
* and the GIC PMR if the host is using IRQ priorities.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* When we exit from the guest we change a number of CPU configuration
* parameters, such as traps. Make sure these changes take effect
* before running the host or additional guests.
*/
isb();
return ret;
}
/* Switch to the guest for legacy non-VHE systems */ /* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
......
...@@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) ...@@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
u32 data = vcpu_get_reg(vcpu, rd); u32 data = vcpu_get_reg(vcpu, rd);
if (__is_be(vcpu)) { if (__is_be(vcpu)) {
/* guest pre-swabbed data, undo this for writel() */ /* guest pre-swabbed data, undo this for writel() */
data = swab32(data); data = __kvm_swab32(data);
} }
writel_relaxed(data, addr); writel_relaxed(data, addr);
} else { } else {
u32 data = readl_relaxed(addr); u32 data = readl_relaxed(addr);
if (__is_be(vcpu)) { if (__is_be(vcpu)) {
/* guest expects swabbed data */ /* guest expects swabbed data */
data = swab32(data); data = __kvm_swab32(data);
} }
vcpu_set_reg(vcpu, rd, data); vcpu_set_reg(vcpu, rd, data);
} }
......
...@@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_enter_irqoff(); guest_enter_irqoff();
if (has_vhe()) { if (has_vhe()) {
kvm_arm_vhe_guest_enter();
ret = kvm_vcpu_run_vhe(vcpu); ret = kvm_vcpu_run_vhe(vcpu);
kvm_arm_vhe_guest_exit();
} else { } else {
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
} }
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <kvm/arm_arch_timer.h> #include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/kvm_arm.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm #define TRACE_SYSTEM kvm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment