Commit 1e530f13 authored by Julien Grall's avatar Julien Grall Committed by Will Deacon

arm64/sve: Implement a helper to flush SVE registers

Introduce a new helper that will zero all SVE registers but the first
128-bits of each vector. This will be used by subsequent patches to
avoid costly store/maipulate/reload sequences in places like do_sve_acc().
Signed-off-by: default avatarJulien Grall <julien.grall@arm.com>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarDave Martin <Dave.Martin@arm.com>
Link: https://lore.kernel.org/r/20200828181155.17745-6-broonie@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 6d40f05f
...@@ -69,6 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread) ...@@ -69,6 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr, extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1); unsigned long vq_minus_1);
extern void sve_flush_live(void);
extern unsigned int sve_get_vl(void); extern unsigned int sve_get_vl(void);
struct arm64_cpu_capabilities; struct arm64_cpu_capabilities;
......
...@@ -164,6 +164,13 @@ ...@@ -164,6 +164,13 @@
| ((\np) << 5) | ((\np) << 5)
.endm .endm
/* PFALSE P\np.B */
.macro _sve_pfalse np
_sve_check_preg \np
.inst 0x2518e400 \
| (\np)
.endm
.macro __for from:req, to:req .macro __for from:req, to:req
.if (\from) == (\to) .if (\from) == (\to)
_for__body %\from _for__body %\from
...@@ -198,6 +205,18 @@ ...@@ -198,6 +205,18 @@
921: 921:
.endm .endm
/* Preserve the first 128-bits of Znz and zero the rest. */
.macro _sve_flush_z nz
_sve_check_zreg \nz
mov v\nz\().16b, v\nz\().16b
.endm
.macro sve_flush
_for n, 0, 31, _sve_flush_z \n
_for n, 0, 15, _sve_pfalse \n
_sve_wrffr 0
.endm
.macro sve_save nxbase, xpfpsr, nxtmp .macro sve_save nxbase, xpfpsr, nxtmp
_for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
......
...@@ -32,6 +32,7 @@ SYM_FUNC_START(fpsimd_load_state) ...@@ -32,6 +32,7 @@ SYM_FUNC_START(fpsimd_load_state)
SYM_FUNC_END(fpsimd_load_state) SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
SYM_FUNC_START(sve_save_state) SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2 sve_save 0, x1, 2
ret ret
...@@ -46,4 +47,11 @@ SYM_FUNC_START(sve_get_vl) ...@@ -46,4 +47,11 @@ SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1 _sve_rdvl 0, 1
ret ret
SYM_FUNC_END(sve_get_vl) SYM_FUNC_END(sve_get_vl)
/* Zero all SVE registers but the first 128-bits of each vector */
SYM_FUNC_START(sve_flush_live)
sve_flush
ret
SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */ #endif /* CONFIG_ARM64_SVE */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment