Commit f72a85e3 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/book3s/64: Add proper pte access check helper

pte_access_premitted get called in get_user_pages_fast path. If we
have marked the pte PROT_NONE, we should not allow a read access on
the address. With the current implementation we are not checking the
READ and only check for WRITE. This is needed on archs like ppc64 that
implement PROT_NONE using RWX access instead of _PAGE_PRESENT. Also
add pte_user check just to make sure we are not accessing kernel
mapping.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 5fa5b16b
...@@ -546,6 +546,30 @@ static inline int pte_present(pte_t pte) ...@@ -546,6 +546,30 @@ static inline int pte_present(pte_t pte)
{ {
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT)); return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
} }
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
unsigned long pteval = pte_val(pte);
/* Also check for pte_user */
unsigned long clear_pte_bits = _PAGE_PRIVILEGED;
/*
* _PAGE_READ is needed for any access and will be
* cleared for PROT_NONE
*/
unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ;
if (write)
need_pte_bits |= _PAGE_WRITE;
if ((pteval & need_pte_bits) != need_pte_bits)
return false;
if ((pteval & clear_pte_bits) == clear_pte_bits)
return false;
return true;
}
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
...@@ -850,6 +874,11 @@ static inline int pud_bad(pud_t pud) ...@@ -850,6 +874,11 @@ static inline int pud_bad(pud_t pud)
return hash__pud_bad(pud); return hash__pud_bad(pud);
} }
#define pud_access_permitted pud_access_permitted
static inline bool pud_access_permitted(pud_t pud, bool write)
{
return pte_access_permitted(pud_pte(pud), write);
}
#define pgd_write(pgd) pte_write(pgd_pte(pgd)) #define pgd_write(pgd) pte_write(pgd_pte(pgd))
static inline void pgd_set(pgd_t *pgdp, unsigned long val) static inline void pgd_set(pgd_t *pgdp, unsigned long val)
...@@ -889,6 +918,12 @@ static inline int pgd_bad(pgd_t pgd) ...@@ -889,6 +918,12 @@ static inline int pgd_bad(pgd_t pgd)
return hash__pgd_bad(pgd); return hash__pgd_bad(pgd);
} }
#define pgd_access_permitted pgd_access_permitted
static inline bool pgd_access_permitted(pgd_t pgd, bool write)
{
return pte_access_permitted(pgd_pte(pgd), write);
}
extern struct page *pgd_page(pgd_t pgd); extern struct page *pgd_page(pgd_t pgd);
/* Pointers in the page table tree are physical addresses */ /* Pointers in the page table tree are physical addresses */
...@@ -1009,6 +1044,12 @@ static inline int pmd_protnone(pmd_t pmd) ...@@ -1009,6 +1044,12 @@ static inline int pmd_protnone(pmd_t pmd)
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) #define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
return pte_access_permitted(pmd_pte(pmd), write);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment