Commit 2df36a5d authored by Christoffer Dall's avatar Christoffer Dall

arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs

The EIRSR and ELRSR registers are 32-bit registers on GICv2, and we
store these as an array of two such registers on the vgic vcpu struct.
However, we access them as a single 64-bit value or as a bitmap pointer
in the generic vgic code, which breaks BE support.

Instead, store them as u64 values on the vgic structure and do the
word-swapping in the assembly code, which already handles the byte order
for BE systems.
Tested-by: default avatarVictor Kamensky <victor.kamensky@linaro.org>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 3d08c629
...@@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 ) ...@@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 )
str r3, [r11, #VGIC_V2_CPU_HCR] str r3, [r11, #VGIC_V2_CPU_HCR]
str r4, [r11, #VGIC_V2_CPU_VMCR] str r4, [r11, #VGIC_V2_CPU_VMCR]
str r5, [r11, #VGIC_V2_CPU_MISR] str r5, [r11, #VGIC_V2_CPU_MISR]
#ifdef CONFIG_CPU_ENDIAN_BE8
str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r7, [r11, #VGIC_V2_CPU_EISR]
str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
str r9, [r11, #VGIC_V2_CPU_ELRSR]
#else
str r6, [r11, #VGIC_V2_CPU_EISR] str r6, [r11, #VGIC_V2_CPU_EISR]
str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r8, [r11, #VGIC_V2_CPU_ELRSR] str r8, [r11, #VGIC_V2_CPU_ELRSR]
str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
#endif
str r10, [r11, #VGIC_V2_CPU_APR] str r10, [r11, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */ /* Clear GICH_HCR */
......
...@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 ) ...@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_V2_CPU_HCR] str w4, [x3, #VGIC_V2_CPU_HCR]
str w5, [x3, #VGIC_V2_CPU_VMCR] str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR] str w6, [x3, #VGIC_V2_CPU_MISR]
str w7, [x3, #VGIC_V2_CPU_EISR] CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
str w9, [x3, #VGIC_V2_CPU_ELRSR] CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
str w11, [x3, #VGIC_V2_CPU_APR] str w11, [x3, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */ /* Clear GICH_HCR */
......
...@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if { ...@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if {
u32 vgic_hcr; u32 vgic_hcr;
u32 vgic_vmcr; u32 vgic_vmcr;
u32 vgic_misr; /* Saved only */ u32 vgic_misr; /* Saved only */
u32 vgic_eisr[2]; /* Saved only */ u64 vgic_eisr; /* Saved only */
u32 vgic_elrsr[2]; /* Saved only */ u64 vgic_elrsr; /* Saved only */
u32 vgic_apr; u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS]; u32 vgic_lr[VGIC_V2_MAX_LRS];
}; };
......
...@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ...@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc) struct vgic_lr lr_desc)
{ {
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
} }
static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
{ {
u64 val; return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
#if BITS_PER_LONG == 64
val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
val <<= 32;
val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
#else
val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
#endif
return val;
} }
static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
{ {
u64 val; return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
#if BITS_PER_LONG == 64
val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
val <<= 32;
val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
#else
val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
#endif
return val;
} }
static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
......
...@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b) ...@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
b->shared = NULL; b->shared = NULL;
} }
/*
* Call this function to convert a u64 value to an unsigned long * bitmask
* in a way that works on both 32-bit and 64-bit LE and BE platforms.
*
* Warning: Calling this function may modify *val.
*/
static unsigned long *u64_to_bitmask(u64 *val)
{
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
*val = (*val >> 32) | (*val << 32);
#endif
return (unsigned long *)val;
}
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset) int cpuid, u32 offset)
{ {
...@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) ...@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* active bit. * active bit.
*/ */
u64 eisr = vgic_get_eisr(vcpu); u64 eisr = vgic_get_eisr(vcpu);
unsigned long *eisr_ptr = (unsigned long *)&eisr; unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
int lr; int lr;
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
...@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu); level_pending = vgic_process_maintenance(vcpu);
elrsr = vgic_get_elrsr(vcpu); elrsr = vgic_get_elrsr(vcpu);
elrsr_ptr = (unsigned long *)&elrsr; elrsr_ptr = u64_to_bitmask(&elrsr);
/* Clear mappings for empty LRs */ /* Clear mappings for empty LRs */
for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment