Commit 39782210 authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64/sme: Implement ZA signal handling

Implement support for ZA in signal handling in a very similar way to how
we implement support for SVE registers, using a signal context structure
with optional register state after it. Where present this register state
stores the ZA matrix as a series of horizontal vectors numbered from 0 to
VL/8 in the endinanness independent format used for vectors.

As with SVE we do not allow changes in the vector length during signal
return but we do allow ZA to be enabled or disabled.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-20-broonie@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 85ed24da
...@@ -140,6 +140,14 @@ struct sve_context { ...@@ -140,6 +140,14 @@ struct sve_context {
#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
#define ZA_MAGIC 0x54366345
struct za_context {
struct _aarch64_ctx head;
__u16 vl;
__u16 __reserved[3];
};
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#include <asm/sve_context.h> #include <asm/sve_context.h>
...@@ -259,4 +267,37 @@ struct sve_context { ...@@ -259,4 +267,37 @@ struct sve_context {
#define SVE_SIG_CONTEXT_SIZE(vq) \ #define SVE_SIG_CONTEXT_SIZE(vq) \
(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
/*
* If the ZA register is enabled for the thread at signal delivery then,
* za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
* and the register data may be accessed using the ZA_SIG_*() macros.
*
* If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
* then ZA was not enabled and no register data was included in which case
* ZA register was not enabled for the thread and no register data
* the ZA_SIG_*() macros should not be used except for this check.
*
* The same convention applies when returning from a signal: a caller
* will need to remove or resize the za_context block if it wants to
* enable the ZA register when it was previously non-live or vice-versa.
* This may require the caller to allocate fresh memory and/or move other
* context blocks in the signal frame.
*
* Changing the vector length during signal return is not permitted:
* za_context.vl must equal the thread's current SME vector length when
* doing a sigreturn.
*/
#define ZA_SIG_REGS_OFFSET \
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
(SVE_SIG_ZREG_SIZE(vq) * n))
#define ZA_SIG_CONTEXT_SIZE(vq) \
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
#endif /* _UAPI__ASM_SIGCONTEXT_H */ #endif /* _UAPI__ASM_SIGCONTEXT_H */
...@@ -1181,9 +1181,6 @@ void fpsimd_release_task(struct task_struct *dead_task) ...@@ -1181,9 +1181,6 @@ void fpsimd_release_task(struct task_struct *dead_task)
#ifdef CONFIG_ARM64_SME #ifdef CONFIG_ARM64_SME
/* This will move to uapi/asm/sigcontext.h when signals are implemented */
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
/* /*
* Ensure that task->thread.za_state is allocated and sufficiently large. * Ensure that task->thread.za_state is allocated and sufficiently large.
* *
......
...@@ -56,6 +56,7 @@ struct rt_sigframe_user_layout { ...@@ -56,6 +56,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset; unsigned long fpsimd_offset;
unsigned long esr_offset; unsigned long esr_offset;
unsigned long sve_offset; unsigned long sve_offset;
unsigned long za_offset;
unsigned long extra_offset; unsigned long extra_offset;
unsigned long end_offset; unsigned long end_offset;
}; };
...@@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) ...@@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
struct user_ctxs { struct user_ctxs {
struct fpsimd_context __user *fpsimd; struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve; struct sve_context __user *sve;
struct za_context __user *za;
}; };
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
...@@ -346,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user); ...@@ -346,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SVE */ #endif /* ! CONFIG_ARM64_SVE */
#ifdef CONFIG_ARM64_SME
static int preserve_za_context(struct za_context __user *ctx)
{
int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)];
unsigned int vl = task_get_sme_vl(current);
unsigned int vq;
if (thread_za_enabled(&current->thread))
vq = sve_vq_from_vl(vl);
else
vq = 0;
memset(reserved, 0, sizeof(reserved));
__put_user_error(ZA_MAGIC, &ctx->head.magic, err);
__put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
&ctx->head.size, err);
__put_user_error(vl, &ctx->vl, err);
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
if (vq) {
/*
* This assumes that the ZA state has already been saved to
* the task struct by calling the function
* fpsimd_signal_preserve_current_state().
*/
err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
current->thread.za_state,
ZA_SIG_REGS_SIZE(vq));
}
return err ? -EFAULT : 0;
}
static int restore_za_context(struct user_ctxs __user *user)
{
int err;
unsigned int vq;
struct za_context za;
if (__copy_from_user(&za, user->za, sizeof(za)))
return -EFAULT;
if (za.vl != task_get_sme_vl(current))
return -EINVAL;
if (za.head.size <= sizeof(*user->za)) {
current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
return 0;
}
vq = sve_vq_from_vl(za.vl);
if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
return -EINVAL;
/*
* Careful: we are about __copy_from_user() directly into
* thread.za_state with preemption enabled, so protection is
* needed to prevent a racing context switch from writing stale
* registers back over the new data.
*/
fpsimd_flush_task_state(current);
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sme_alloc(current);
if (!current->thread.za_state) {
current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
clear_thread_flag(TIF_SME);
return -ENOMEM;
}
err = __copy_from_user(current->thread.za_state,
(char __user const *)user->za +
ZA_SIG_REGS_OFFSET,
ZA_SIG_REGS_SIZE(vq));
if (err)
return -EFAULT;
set_thread_flag(TIF_SME);
current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
return 0;
}
#else /* ! CONFIG_ARM64_SME */
/* Turn any non-optimised out attempts to use these into a link error: */
extern int preserve_za_context(void __user *ctx);
extern int restore_za_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SME */
static int parse_user_sigframe(struct user_ctxs *user, static int parse_user_sigframe(struct user_ctxs *user,
struct rt_sigframe __user *sf) struct rt_sigframe __user *sf)
...@@ -360,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user, ...@@ -360,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->fpsimd = NULL; user->fpsimd = NULL;
user->sve = NULL; user->sve = NULL;
user->za = NULL;
if (!IS_ALIGNED((unsigned long)base, 16)) if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid; goto invalid;
...@@ -425,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user, ...@@ -425,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->sve = (struct sve_context __user *)head; user->sve = (struct sve_context __user *)head;
break; break;
case ZA_MAGIC:
if (!system_supports_sme())
goto invalid;
if (user->za)
goto invalid;
if (size < sizeof(*user->za))
goto invalid;
user->za = (struct za_context __user *)head;
break;
case EXTRA_MAGIC: case EXTRA_MAGIC:
if (have_extra_context) if (have_extra_context)
goto invalid; goto invalid;
...@@ -548,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs, ...@@ -548,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs,
} }
} }
if (err == 0 && system_supports_sme() && user.za)
err = restore_za_context(&user);
return err; return err;
} }
...@@ -630,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -630,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err; return err;
} }
if (system_supports_sme()) {
unsigned int vl;
unsigned int vq = 0;
if (add_all)
vl = sme_max_vl();
else
vl = task_get_sme_vl(current);
if (thread_za_enabled(&current->thread))
vq = sve_vq_from_vl(vl);
err = sigframe_alloc(user, &user->za_offset,
ZA_SIG_CONTEXT_SIZE(vq));
if (err)
return err;
}
return sigframe_alloc_end(user); return sigframe_alloc_end(user);
} }
...@@ -678,6 +810,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, ...@@ -678,6 +810,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
err |= preserve_sve_context(sve_ctx); err |= preserve_sve_context(sve_ctx);
} }
/* ZA state if present */
if (system_supports_sme() && err == 0 && user->za_offset) {
struct za_context __user *za_ctx =
apply_user_offset(user, user->za_offset);
err |= preserve_za_context(za_ctx);
}
if (err == 0 && user->extra_offset) { if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe; char __user *sfp = (char __user *)user->sigframe;
char __user *userp = char __user *userp =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment