Commit a7b9f671 authored by LEROY Christophe's avatar LEROY Christophe Committed by Scott Wood

powerpc32: adds handling of _PAGE_RO

Some powerpc like the 8xx don't have a RW bit in PTE bits but a RO
(Read Only) bit.  This patch implements the handling of a _PAGE_RO flag
to be used in place of _PAGE_RW
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
[scottwood@freescale.com: fix whitespace]
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
parent d2caa3ce
...@@ -275,7 +275,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -275,7 +275,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
} }
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
...@@ -286,9 +286,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, ...@@ -286,9 +286,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{ {
unsigned long bits = pte_val(entry) & unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
pte_update(ptep, 0, bits); unsigned long clr = ~pte_val(entry) & _PAGE_RO;
pte_update(ptep, clr, set);
} }
#define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PTE_SAME
......
...@@ -30,7 +30,8 @@ struct mm_struct; ...@@ -30,7 +30,8 @@ struct mm_struct;
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/* Generic accessors to PTE bits */ /* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_write(pte_t pte)
{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
...@@ -115,12 +116,14 @@ static inline unsigned long pte_pfn(pte_t pte) { ...@@ -115,12 +116,14 @@ static inline unsigned long pte_pfn(pte_t pte) {
/* Generic modifiers for PTE bits */ /* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte) { static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE);
pte_val(pte) |= _PAGE_RO; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) { static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { static inline pte_t pte_mkwrite(pte_t pte) {
pte_val(pte) &= ~_PAGE_RO;
pte_val(pte) |= _PAGE_RW; return pte; } pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; } pte_val(pte) |= _PAGE_DIRTY; return pte; }
......
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
#ifndef _PAGE_PSIZE #ifndef _PAGE_PSIZE
#define _PAGE_PSIZE 0 #define _PAGE_PSIZE 0
#endif #endif
/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */
#ifndef _PAGE_RO
#define _PAGE_RO 0
#else
#define _PAGE_RW 0
#endif
#ifndef _PMD_PRESENT_MASK #ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_PRESENT_MASK _PMD_PRESENT
#endif #endif
...@@ -42,10 +48,10 @@ ...@@ -42,10 +48,10 @@
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif #endif
#ifndef _PAGE_KERNEL_RO #ifndef _PAGE_KERNEL_RO
#define _PAGE_KERNEL_RO 0 #define _PAGE_KERNEL_RO (_PAGE_RO)
#endif #endif
#ifndef _PAGE_KERNEL_ROX #ifndef _PAGE_KERNEL_ROX
#define _PAGE_KERNEL_ROX (_PAGE_EXEC) #define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO)
#endif #endif
#ifndef _PAGE_KERNEL_RW #ifndef _PAGE_KERNEL_RW
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
...@@ -95,7 +101,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); ...@@ -95,7 +101,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/* Mask of bits returned by pte_pgprot() */ /* Mask of bits returned by pte_pgprot() */
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
_PAGE_USER | _PAGE_ACCESSED | \ _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
...@@ -128,11 +134,14 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); ...@@ -128,11 +134,14 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
*/ */
#define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) _PAGE_EXEC)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
_PAGE_EXEC)
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY #define __P001 PAGE_READONLY
......
...@@ -146,7 +146,7 @@ void __iomem * ...@@ -146,7 +146,7 @@ void __iomem *
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{ {
/* writeable implies dirty for kernel addresses */ /* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW) if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
flags |= _PAGE_DIRTY | _PAGE_HWWRITE; flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment