Commit ec4abf1e authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/hash64: use _PAGE_PTE when checking for pte_present

This makes the pte_present check stricter by checking for additional _PAGE_PTE
bit. A level 1 pte pointer (THP pte) can be switched to a pointer to level 0 pte
page table page by following two operations.

1) THP split.
2) madvise(MADV_DONTNEED) in parallel to page fault.

A lockless page table walk need to make sure we can handle such changes
gracefully.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-4-aneesh.kumar@linux.ibm.com
parent c46241a3
...@@ -553,6 +553,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) ...@@ -553,6 +553,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
} }
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
static inline bool pte_hw_valid(pte_t pte)
{
return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
}
static inline int pte_present(pte_t pte) static inline int pte_present(pte_t pte)
{ {
/* /*
...@@ -561,12 +567,11 @@ static inline int pte_present(pte_t pte) ...@@ -561,12 +567,11 @@ static inline int pte_present(pte_t pte)
* invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
* if we find _PAGE_PRESENT cleared. * if we find _PAGE_PRESENT cleared.
*/ */
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
}
static inline bool pte_hw_valid(pte_t pte) if (pte_hw_valid(pte))
{ return true;
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT)); return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
} }
#ifdef CONFIG_PPC_MEM_KEYS #ifdef CONFIG_PPC_MEM_KEYS
......
...@@ -1350,8 +1350,15 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1350,8 +1350,15 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
goto bail; goto bail;
} }
/* Add _PAGE_PRESENT to the required access perm */ /*
access |= _PAGE_PRESENT; * Add _PAGE_PRESENT to the required access perm. If there are parallel
* updates to the pte that can possibly clear _PAGE_PTE, catch that too.
*
* We can safely use the return pte address in rest of the function
* because we do set H_PAGE_BUSY which prevents further updates to pte
* from generic code.
*/
access |= _PAGE_PRESENT | _PAGE_PTE;
/* /*
* Pre-check access permissions (will be re-checked atomically * Pre-check access permissions (will be re-checked atomically
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment