Commit 6a119eae authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Add a _PAGE_PTE bit

For a pte entry we will have _PAGE_PTE set. Our pte page
address have a minimum alignment requirement of HUGEPD_SHIFT_MASK + 1.
We use the lower 7 bits to indicate hugepd. ie.

For pmd and pgd we can find:
1) _PAGE_PTE set pte -> indicate PTE
2) bits [2..6] non zero -> indicate hugepd.
   They also encode the size. We skip bit 1 (_PAGE_PRESENT).
3) othewise pointer to next table.
Acked-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent e34aa03c
...@@ -116,10 +116,13 @@ static inline int pgd_huge(pgd_t pgd) ...@@ -116,10 +116,13 @@ static inline int pgd_huge(pgd_t pgd)
static inline int hugepd_ok(hugepd_t hpd) static inline int hugepd_ok(hugepd_t hpd)
{ {
/* /*
* hugepd pointer, bottom two bits == 00 and next 4 bits * if it is not a pte and have hugepd shift mask
* indicate size of table * set, then it is a hugepd directory pointer
*/ */
return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); if (!(hpd.pd & _PAGE_PTE) &&
((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
} }
#define is_hugepd(hpd) (hugepd_ok(hpd)) #define is_hugepd(hpd) (hugepd_ok(hpd))
#endif #endif
......
...@@ -130,25 +130,25 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); ...@@ -130,25 +130,25 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
static inline int pmd_huge(pmd_t pmd) static inline int pmd_huge(pmd_t pmd)
{ {
/* /*
* leaf pte for huge page, bottom two bits != 00 * leaf pte for huge page
*/ */
return ((pmd_val(pmd) & 0x3) != 0x0); return !!(pmd_val(pmd) & _PAGE_PTE);
} }
static inline int pud_huge(pud_t pud) static inline int pud_huge(pud_t pud)
{ {
/* /*
* leaf pte for huge page, bottom two bits != 00 * leaf pte for huge page
*/ */
return ((pud_val(pud) & 0x3) != 0x0); return !!(pud_val(pud) & _PAGE_PTE);
} }
static inline int pgd_huge(pgd_t pgd) static inline int pgd_huge(pgd_t pgd)
{ {
/* /*
* leaf pte for huge page, bottom two bits != 00 * leaf pte for huge page
*/ */
return ((pgd_val(pgd) & 0x3) != 0x0); return !!(pgd_val(pgd) & _PAGE_PTE);
} }
#define pgd_huge pgd_huge #define pgd_huge pgd_huge
...@@ -236,10 +236,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, ...@@ -236,10 +236,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/ */
static inline int pmd_trans_huge(pmd_t pmd) static inline int pmd_trans_huge(pmd_t pmd)
{ {
/* return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) ==
* leaf pte for huge page, bottom two bits != 00 (_PAGE_PTE | _PAGE_THP_HUGE));
*/
return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
} }
static inline int pmd_trans_splitting(pmd_t pmd) static inline int pmd_trans_splitting(pmd_t pmd)
...@@ -251,10 +249,7 @@ static inline int pmd_trans_splitting(pmd_t pmd) ...@@ -251,10 +249,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
static inline int pmd_large(pmd_t pmd) static inline int pmd_large(pmd_t pmd)
{ {
/* return !!(pmd_val(pmd) & _PAGE_PTE);
* leaf pte for huge page, bottom two bits != 00
*/
return ((pmd_val(pmd) & 0x3) != 0x0);
} }
static inline pmd_t pmd_mknotpresent(pmd_t pmd) static inline pmd_t pmd_mknotpresent(pmd_t pmd)
......
...@@ -14,11 +14,12 @@ ...@@ -14,11 +14,12 @@
* We could create separate kernel read-only if we used the 3 PP bits * We could create separate kernel read-only if we used the 3 PP bits
* combinations that newer processors provide but we currently don't. * combinations that newer processors provide but we currently don't.
*/ */
#define _PAGE_PRESENT 0x00001 /* software: pte contains a translation */ #define _PAGE_PTE 0x00001
#define _PAGE_USER 0x00002 /* matches one of the PP bits */ #define _PAGE_PRESENT 0x00002 /* software: pte contains a translation */
#define _PAGE_BIT_SWAP_TYPE 2 #define _PAGE_BIT_SWAP_TYPE 2
#define _PAGE_EXEC 0x00004 /* No execute on POWER4 and newer (we invert) */ #define _PAGE_USER 0x00004 /* matches one of the PP bits */
#define _PAGE_GUARDED 0x00008 #define _PAGE_EXEC 0x00008 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x00010
/* We can derive Memory coherence from _PAGE_NO_CACHE */ /* We can derive Memory coherence from _PAGE_NO_CACHE */
#define _PAGE_COHERENT 0x0 #define _PAGE_COHERENT 0x0
#define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */
...@@ -49,7 +50,7 @@ ...@@ -49,7 +50,7 @@
*/ */
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
_PAGE_THP_HUGE) _PAGE_THP_HUGE | _PAGE_PTE)
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/hash-64k.h> #include <asm/book3s/64/hash-64k.h>
...@@ -135,7 +136,7 @@ ...@@ -135,7 +136,7 @@
* pgprot changes * pgprot changes
*/ */
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL) _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE)
/* /*
* Mask of bits returned by pte_pgprot() * Mask of bits returned by pte_pgprot()
*/ */
......
...@@ -213,8 +213,7 @@ static inline int pmd_protnone(pmd_t pmd) ...@@ -213,8 +213,7 @@ static inline int pmd_protnone(pmd_t pmd)
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
/* Do nothing, mk_pmd() does this part. */ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
return pmd;
} }
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
......
...@@ -40,6 +40,11 @@ ...@@ -40,6 +40,11 @@
#else #else
#define _PAGE_RW 0 #define _PAGE_RW 0
#endif #endif
#ifndef _PAGE_PTE
#define _PAGE_PTE 0
#endif
#ifndef _PMD_PRESENT_MASK #ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_PRESENT_MASK _PMD_PRESENT
#endif #endif
......
...@@ -894,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page) ...@@ -894,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page)
* We have 4 cases for pgds and pmds: * We have 4 cases for pgds and pmds:
* (1) invalid (all zeroes) * (1) invalid (all zeroes)
* (2) pointer to next table, as normal; bottom 6 bits == 0 * (2) pointer to next table, as normal; bottom 6 bits == 0
* (3) leaf pte for huge page, bottom two bits != 00 * (3) leaf pte for huge page _PAGE_PTE set
* (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
* *
* So long as we atomically load page table pointers we are safe against teardown, * So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it. * we can follow the address down to the the page and take a ref on it.
......
...@@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, ...@@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
*/ */
VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) == VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
(_PAGE_PRESENT | _PAGE_USER)); (_PAGE_PRESENT | _PAGE_USER));
/*
* Add the pte bit when tryint set a pte
*/
pte = __pte(pte_val(pte) | _PAGE_PTE);
/* Note: mm->context.id might not yet have been assigned as /* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this * this context might not have been activated yet when this
......
...@@ -765,13 +765,8 @@ static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) ...@@ -765,13 +765,8 @@ static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
{ {
unsigned long pmdv; unsigned long pmdv;
/*
* For a valid pte, we would have _PAGE_PRESENT always
* set. We use this to check THP page at pmd level.
* leaf pte for huge page, bottom two bits != 00
*/
pmdv = pfn << PTE_RPN_SHIFT; pmdv = pfn << PTE_RPN_SHIFT;
pmdv |= _PAGE_THP_HUGE;
return pmd_set_protbits(__pmd(pmdv), pgprot); return pmd_set_protbits(__pmd(pmdv), pgprot);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment