Commit 7af0c253 authored by Akihiko Odaki's avatar Akihiko Odaki Committed by Oliver Upton

KVM: arm64: Normalize cache configuration

Before this change, the cache configuration of the physical CPU was
exposed to vcpus. This is problematic because the cache configuration a
vcpu sees varies when it migrates between vcpus with different cache
configurations.

Fabricate cache configuration from the sanitized value, which holds the
CTR_EL0 value the userspace sees regardless of which physical CPU it
resides on.

CLIDR_EL1 and CCSIDR_EL1 are now writable from the userspace so that
the VMM can restore the values saved with the old kernel.
Suggested-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarAkihiko Odaki <akihiko.odaki@daynix.com>
Link: https://lore.kernel.org/r/20230112023852.42012-8-akihiko.odaki@daynix.com
[ Oliver: Squash Marc's fix for CCSIDR_EL1.LineSize when set from userspace ]
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent bf48040c
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
#define CLIDR_CTYPE(clidr, level) \ #define CLIDR_CTYPE(clidr, level) \
(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
/* Ttypen, bits [2(n - 1) + 34 : 2(n - 1) + 33], for n = 1 to 7 */
#define CLIDR_TTYPE_SHIFT(level) (2 * ((level) - 1) + CLIDR_EL1_Ttypen_SHIFT)
/* /*
* Memory returned by kmalloc() may be used for DMA, so we must make * Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise, * sure that all such allocations are cache aligned. Otherwise,
......
...@@ -252,6 +252,7 @@ struct kvm_vcpu_fault_info { ...@@ -252,6 +252,7 @@ struct kvm_vcpu_fault_info {
enum vcpu_sysreg { enum vcpu_sysreg {
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */ __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
MPIDR_EL1, /* MultiProcessor Affinity Register */ MPIDR_EL1, /* MultiProcessor Affinity Register */
CLIDR_EL1, /* Cache Level ID Register */
CSSELR_EL1, /* Cache Size Selection Register */ CSSELR_EL1, /* Cache Size Selection Register */
SCTLR_EL1, /* System Control Register */ SCTLR_EL1, /* System Control Register */
ACTLR_EL1, /* Auxiliary Control Register */ ACTLR_EL1, /* Auxiliary Control Register */
...@@ -501,6 +502,9 @@ struct kvm_vcpu_arch { ...@@ -501,6 +502,9 @@ struct kvm_vcpu_arch {
u64 last_steal; u64 last_steal;
gpa_t base; gpa_t base;
} steal; } steal;
/* Per-vcpu CCSIDR override or NULL */
u32 *ccsidr;
}; };
/* /*
......
...@@ -157,6 +157,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -157,6 +157,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
if (sve_state) if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state); kfree(sve_state);
kfree(vcpu->arch.ccsidr);
} }
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment