Commit 6fdc05d4 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

x86: unify pgtable accessors which use

Make users of supported_pte_mask common.  This has the side-effect of
introducing the variable for 32-bit non-PAE, but I think its a pretty
small cost to simplify the code.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c3bcfb57
...@@ -478,11 +478,12 @@ void zap_low_mappings (void) ...@@ -478,11 +478,12 @@ void zap_low_mappings (void)
int nx_enabled = 0; int nx_enabled = 0;
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
static int disable_nx __initdata = 0; static int disable_nx __initdata = 0;
u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* /*
* noexec = on|off * noexec = on|off
......
...@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) ...@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low) #define pte_none(x) (!(x).pte_low)
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/* /*
* All present pages are kernel-executable: * All present pages are kernel-executable:
......
...@@ -158,20 +158,6 @@ static inline unsigned long pte_pfn(pte_t pte) ...@@ -158,20 +158,6 @@ static inline unsigned long pte_pfn(pte_t pte)
return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
} }
extern unsigned long long __supported_pte_mask;
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask);
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask);
}
/* /*
* Bits 0, 6 and 7 are taken in the low part of the pte, * Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part. * put the 32 bits of offset into the high part.
......
...@@ -141,6 +141,20 @@ static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_R ...@@ -141,6 +141,20 @@ static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_R
static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); } static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); }
extern pteval_t __supported_pte_mask;
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask);
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -17,7 +17,6 @@ extern pud_t level3_kernel_pgt[512]; ...@@ -17,7 +17,6 @@ extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512]; extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_kernel_pgt[512];
extern pgd_t init_level4_pgt[]; extern pgd_t init_level4_pgt[];
extern unsigned long __supported_pte_mask;
#define swapper_pg_dir init_level4_pgt #define swapper_pg_dir init_level4_pgt
...@@ -165,14 +164,6 @@ static inline unsigned long pmd_bad(pmd_t pmd) ...@@ -165,14 +164,6 @@ static inline unsigned long pmd_bad(pmd_t pmd)
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = (page_nr << PAGE_SHIFT);
pte_val(pte) |= pgprot_val(pgprot);
pte_val(pte) &= __supported_pte_mask;
return pte;
}
struct vm_area_struct; struct vm_area_struct;
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
...@@ -239,15 +230,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, ...@@ -239,15 +230,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
/* page, protection -> pte */ /* page, protection -> pte */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/* Change flags of a PTE */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
pte_val(pte) &= __supported_pte_mask;
return pte;
}
#define pte_index(address) \ #define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment