Commit ec028836 authored by Vincenzo Frascino's avatar Vincenzo Frascino Committed by Will Deacon

arm64: mte: Add asymmetric mode support

MTE provides an asymmetric mode for detecting tag exceptions. In
particular, when such a mode is present, the CPU triggers a fault
on a tag mismatch during a load operation and asynchronously updates
a register when a tag mismatch is detected during a store operation.

Add support for MTE asymmetric mode.

Note: If the CPU does not support MTE asymmetric mode the kernel falls
back on synchronous mode which is the default for kasan=on.

Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarAndrey Konovalov <andreyknvl@gmail.com>
Link: https://lore.kernel.org/r/20211006154751.4463-5-vincenzo.frascino@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent d73c162e
...@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) ...@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync() #define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async() #define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_force_async_tag_fault() mte_check_tfsr_exit() #define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag() #define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) #define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
......
...@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, ...@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void); void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void); void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */ #else /* CONFIG_ARM64_MTE */
...@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void) ...@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void)
{ {
} }
static inline void mte_enable_kernel_asymm(void)
{
}
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -88,11 +88,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child, ...@@ -88,11 +88,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */ /* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_mode); DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_mode(void) static inline bool system_uses_mte_async_or_asymm_mode(void)
{ {
return static_branch_unlikely(&mte_async_mode); return static_branch_unlikely(&mte_async_or_asymm_mode);
} }
void mte_check_tfsr_el1(void); void mte_check_tfsr_el1(void);
...@@ -121,7 +121,7 @@ static inline void mte_check_tfsr_exit(void) ...@@ -121,7 +121,7 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
#else #else
static inline bool system_uses_mte_async_mode(void) static inline bool system_uses_mte_async_or_asymm_mode(void)
{ {
return false; return false;
} }
......
...@@ -196,13 +196,13 @@ static inline void __uaccess_enable_tco(void) ...@@ -196,13 +196,13 @@ static inline void __uaccess_enable_tco(void)
*/ */
static inline void __uaccess_disable_tco_async(void) static inline void __uaccess_disable_tco_async(void)
{ {
if (system_uses_mte_async_mode()) if (system_uses_mte_async_or_asymm_mode())
__uaccess_disable_tco(); __uaccess_disable_tco();
} }
static inline void __uaccess_enable_tco_async(void) static inline void __uaccess_enable_tco_async(void)
{ {
if (system_uses_mte_async_mode()) if (system_uses_mte_async_or_asymm_mode())
__uaccess_enable_tco(); __uaccess_enable_tco();
} }
......
...@@ -26,9 +26,12 @@ ...@@ -26,9 +26,12 @@
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */ /*
DEFINE_STATIC_KEY_FALSE(mte_async_mode); * The asynchronous and asymmetric MTE modes have the same behavior for
EXPORT_SYMBOL_GPL(mte_async_mode); * store operations. This flag is set when either of these modes is enabled.
*/
DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif #endif
static void mte_sync_page_tags(struct page *page, pte_t old_pte, static void mte_sync_page_tags(struct page *page, pte_t old_pte,
...@@ -116,7 +119,7 @@ void mte_enable_kernel_sync(void) ...@@ -116,7 +119,7 @@ void mte_enable_kernel_sync(void)
* Make sure we enter this function when no PE has set * Make sure we enter this function when no PE has set
* async mode previously. * async mode previously.
*/ */
WARN_ONCE(system_uses_mte_async_mode(), WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!"); "MTE async mode enabled system wide!");
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
...@@ -134,8 +137,34 @@ void mte_enable_kernel_async(void) ...@@ -134,8 +137,34 @@ void mte_enable_kernel_async(void)
* mode in between sync and async, this strategy needs * mode in between sync and async, this strategy needs
* to be reviewed. * to be reviewed.
*/ */
if (!system_uses_mte_async_mode()) if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_mode); static_branch_enable(&mte_async_or_asymm_mode);
}
void mte_enable_kernel_asymm(void)
{
if (cpus_have_cap(ARM64_MTE_ASYMM)) {
__mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);
/*
* MTE asymm mode behaves as async mode for store
* operations. The mode is set system wide by the
* first PE that executes this function.
*
* Note: If in future KASAN acquires a runtime switching
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_or_asymm_mode);
} else {
/*
* If the CPU does not support MTE asymmetric mode the
* kernel falls back on synchronous mode which is the
* default for kasan=on.
*/
mte_enable_kernel_sync();
}
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment