Commit 69e3b846 authored by Steven Price's avatar Steven Price Committed by Marc Zyngier

arm64: mte: Sync tags for pages where PTE is untagged

A KVM guest could store tags in a page even if the VMM hasn't mapped
the page with PROT_MTE. So when restoring pages from swap we will
need to check to see if there are any saved tags even if !pte_tagged().

However don't check pages for which pte_access_permitted() returns false
as these will not have been swapped out.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210621111716.37157-2-steven.price@arm.com
parent 8124c8a6
...@@ -37,7 +37,7 @@ void mte_free_tag_storage(char *storage); ...@@ -37,7 +37,7 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */ /* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2 #define PG_mte_tagged PG_arch_2
void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom); void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void); void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next); void mte_thread_switch(struct task_struct *next);
...@@ -53,7 +53,7 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request, ...@@ -53,7 +53,7 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
/* unused if !CONFIG_ARM64_MTE, silence the compiler */ /* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0 #define PG_mte_tagged 0
static inline void mte_sync_tags(pte_t *ptep, pte_t pte) static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
{ {
} }
static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void mte_copy_page_tags(void *kto, const void *kfrom)
......
...@@ -314,9 +314,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -314,9 +314,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte); __sync_icache_dcache(pte);
if (system_supports_mte() && /*
pte_present(pte) && pte_tagged(pte) && !pte_special(pte)) * If the PTE would provide user space access to the tags associated
mte_sync_tags(ptep, pte); * with it then ensure that the MTE tags are synchronised. Although
* pte_access_permitted() returns false for exec only mappings, they
* don't expose tags (instruction fetches don't check tags).
*/
if (system_supports_mte() && pte_access_permitted(pte, false) &&
!pte_special(pte)) {
pte_t old_pte = READ_ONCE(*ptep);
/*
* We only need to synchronise if the new PTE has tags enabled
* or if swapping in (in which case another mapping may have
* set tags in the past even if this PTE isn't tagged).
* (!pte_none() && !pte_present()) is an open coded version of
* is_swap_pte()
*/
if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
mte_sync_tags(old_pte, pte);
}
__check_racy_pte_update(mm, ptep, pte); __check_racy_pte_update(mm, ptep, pte);
......
...@@ -32,10 +32,9 @@ DEFINE_STATIC_KEY_FALSE(mte_async_mode); ...@@ -32,10 +32,9 @@ DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode); EXPORT_SYMBOL_GPL(mte_async_mode);
#endif #endif
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) static void mte_sync_page_tags(struct page *page, pte_t old_pte,
bool check_swap, bool pte_is_tagged)
{ {
pte_t old_pte = READ_ONCE(*ptep);
if (check_swap && is_swap_pte(old_pte)) { if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte); swp_entry_t entry = pte_to_swp_entry(old_pte);
...@@ -43,6 +42,9 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) ...@@ -43,6 +42,9 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return; return;
} }
if (!pte_is_tagged)
return;
page_kasan_tag_reset(page); page_kasan_tag_reset(page);
/* /*
* We need smp_wmb() in between setting the flags and clearing the * We need smp_wmb() in between setting the flags and clearing the
...@@ -55,16 +57,22 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) ...@@ -55,16 +57,22 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
mte_clear_page_tags(page_address(page)); mte_clear_page_tags(page_address(page));
} }
void mte_sync_tags(pte_t *ptep, pte_t pte) void mte_sync_tags(pte_t old_pte, pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page); long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1; bool check_swap = nr_pages == 1;
bool pte_is_tagged = pte_tagged(pte);
/* Early out if there's nothing to do */
if (!check_swap && !pte_is_tagged)
return;
/* if PG_mte_tagged is set, tags have already been initialised */ /* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) { for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags)) if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_sync_page_tags(page, ptep, check_swap); mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment