Commit a501e324 authored by Catalin Marinas's avatar Catalin Marinas

arm64: Clean up the default pgprot setting

The primary aim of this patchset is to remove the pgprot_default and
prot_sect_default global variables and rely strictly on predefined
values. The original goal was to be able to run SMP kernels on UP
hardware by not setting the Shareability bit. However, it is unlikely to
see UP ARMv8 hardware and even if we do, the Shareability bit is no
longer assumed to disable cacheable accesses.

A side effect is that the device mappings now have the Shareability
attribute set. The hardware, however, should ignore it since Device
accesses are always Outer Shareable.

Following the removal of the two global variables, there is some PROT_*
macro reshuffling and cleanup, including the __PAGE_* macros (replaced
by PAGE_*).
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
parent bc07c2c6
...@@ -230,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot ...@@ -230,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr); extern void __iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap #define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define ARCH_HAS_IOREMAP_WC #define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h> #include <asm-generic/iomap.h>
......
...@@ -52,67 +52,60 @@ extern void __pgd_error(const char *file, int line, unsigned long val); ...@@ -52,67 +52,60 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#endif #endif
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
/* #ifdef CONFIG_SMP
* The pgprot_* and protection_map entries will be fixed up at runtime to #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
* include the cachable and bufferable bits based on memory policy, as well as #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
* any architecture dependent bits like global/ASID and SMP shared mapping #else
* bits. #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
*/ #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF #endif
extern pgprot_t pgprot_default; #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
#define __pgprot_modify(prot,mask,bits) \ #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
__pgprot((pgprot_val(prot) & ~(mask)) | (bits)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
#endif /* __ASSEMBLY__ */ #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P000 __PAGE_NONE #define __P010 PAGE_COPY
#define __P001 __PAGE_READONLY #define __P011 PAGE_COPY
#define __P010 __PAGE_COPY #define __P100 PAGE_EXECONLY
#define __P011 __PAGE_COPY #define __P101 PAGE_READONLY_EXEC
#define __P100 __PAGE_EXECONLY #define __P110 PAGE_COPY_EXEC
#define __P101 __PAGE_READONLY_EXEC #define __P111 PAGE_COPY_EXEC
#define __P110 __PAGE_COPY_EXEC
#define __P111 __PAGE_COPY_EXEC #define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S000 __PAGE_NONE #define __S010 PAGE_SHARED
#define __S001 __PAGE_READONLY #define __S011 PAGE_SHARED
#define __S010 __PAGE_SHARED #define __S100 PAGE_EXECONLY
#define __S011 __PAGE_SHARED #define __S101 PAGE_READONLY_EXEC
#define __S100 __PAGE_EXECONLY #define __S110 PAGE_SHARED_EXEC
#define __S101 __PAGE_READONLY_EXEC #define __S111 PAGE_SHARED_EXEC
#define __S110 __PAGE_SHARED_EXEC
#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
...@@ -274,6 +267,9 @@ static inline int has_transparent_hugepage(void) ...@@ -274,6 +267,9 @@ static inline int has_transparent_hugepage(void)
return 1; return 1;
} }
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
/* /*
* Mark the prot value as uncacheable and unbufferable. * Mark the prot value as uncacheable and unbufferable.
*/ */
......
...@@ -376,7 +376,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -376,7 +376,6 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line; *cmdline_p = boot_command_line;
init_mem_pgprot();
early_ioremap_init(); early_ioremap_init();
parse_early_param(); parse_early_param();
......
...@@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, ...@@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
for (i = 0; i < (size >> PAGE_SHIFT); i++) for (i = 0; i < (size >> PAGE_SHIFT); i++)
map[i] = page + i; map[i] = page + i;
coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
__get_dma_pgprot(attrs, pgprot_default, false)); __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
kfree(map); kfree(map);
if (!coherent_ptr) if (!coherent_ptr)
goto no_map; goto no_map;
......
...@@ -43,11 +43,6 @@ ...@@ -43,11 +43,6 @@
struct page *empty_zero_page; struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
pgprot_t pgprot_default;
EXPORT_SYMBOL(pgprot_default);
static pmdval_t prot_sect_kernel;
struct cachepolicy { struct cachepolicy {
const char policy[16]; const char policy[16];
u64 mair; u64 mair;
...@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p) ...@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
} }
early_param("cachepolicy", early_cachepolicy); early_param("cachepolicy", early_cachepolicy);
/*
* Adjust the PMD section entries according to the CPU in use.
*/
void __init init_mem_pgprot(void)
{
pteval_t default_pgprot;
int i;
default_pgprot = PTE_ATTRINDX(MT_NORMAL);
prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
#ifdef CONFIG_SMP
/*
* Mark memory with the "shared" attribute for SMP systems
*/
default_pgprot |= PTE_SHARED;
prot_sect_kernel |= PMD_SECT_S;
#endif
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | default_pgprot);
}
pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
}
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot) unsigned long size, pgprot_t vma_prot)
{ {
...@@ -205,7 +173,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ...@@ -205,7 +173,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
/* try section mapping first */ /* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) { if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd; pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel)); set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC));
/* /*
* Check for previous table entries created during * Check for previous table entries created during
* boot (__create_page_tables) and flush them. * boot (__create_page_tables) and flush them.
...@@ -417,7 +385,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -417,7 +385,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else } else
vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end); } while (addr = next, addr != end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment