Commit de0f9387 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level

As Linux kernel separates KERNEL and USER address spaces, there is
therefore no need to flag USER access at page level.

Today, the 8xx TLB handlers already handle user access in the L1 entry
through Access Protection Groups, it is then natural to move the user
access handling at PMD level once _PAGE_NA allows to handle PAGE_NONE
protection without _PAGE_USER

In the mean time, as we free up one bit in the PTE, we can use it to
include SPS (page size flag) in the PTE and avoid handling it at every
TLB miss hence removing special handling based on compiled page size.

For _PAGE_EXEC, we rework it to use PP PTE bits, avoiding the copy
of _PAGE_EXEC bit into the L1 entry. Unfortunatly we are not
able to put it at the correct location as it conflicts with
NA/RO/RW bits for data entries.

Upper bits of APG in L1 entry overlap with PMD base address. In
order to avoid having to filter that out, we set up all groups so that
upper bits can have any value.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 35175033
...@@ -47,8 +47,7 @@ static inline pte_t *hugepd_page(hugepd_t hpd) ...@@ -47,8 +47,7 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
{ {
BUG_ON(!hugepd_ok(hpd)); BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx #ifdef CONFIG_PPC_8xx
return (pte_t *)__va(hpd_val(hpd) & return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else #else
return (pte_t *)((hpd_val(hpd) & return (pte_t *)((hpd_val(hpd) &
~HUGEPD_SHIFT_MASK) | PD_HUGE); ~HUGEPD_SHIFT_MASK) | PD_HUGE);
......
...@@ -29,17 +29,17 @@ ...@@ -29,17 +29,17 @@
#define MI_Kp 0x40000000 /* Should always be set */ #define MI_Kp 0x40000000 /* Should always be set */
/* /*
* All pages' PP exec bits are set to 000, which means Execute for Supervisor * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
* and no Execute for User. * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
* Then we use the APG to say whether accesses are according to Page rules, * respectively NA for All or X for Supervisor and no access for User.
* "all Supervisor" rules (Exec for all) and "all User" rules (Exec for noone) * Then we use the APG to say whether accesses are according to Page rules or
* Therefore, we define 4 APG groups. msb is _PAGE_EXEC, lsb is _PAGE_USER * "all Supervisor" rules (Access to all)
* 0 (00) => Not User, no exec => 11 (all accesses performed as user) * Therefore, we define 2 APG groups. lsb is _PMD_USER
* 1 (01) => User but no exec => 11 (all accesses performed as user) * 0 => No user => 01 (all accesses performed according to page definition)
* 2 (10) => Not User, exec => 01 (rights according to page definition) * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* 3 (11) => User, exec => 00 (all accesses performed as supervisor) * We define all 16 groups so that all other bits of APG can take any value
*/ */
#define MI_APG_INIT 0xf4ffffff #define MI_APG_INIT 0x44444444
/* The effective page number register. When read, contains the information /* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in * about the last instruction TLB miss. When MI_RPN is written, bits in
...@@ -102,17 +102,17 @@ ...@@ -102,17 +102,17 @@
#define MD_Kp 0x40000000 /* Should always be set */ #define MD_Kp 0x40000000 /* Should always be set */
/* /*
* All pages' PP data bits are set to either 000 or 011, which means * All pages' PP data bits are set to either 000 or 011 or 001, which means
* respectively RW for Supervisor and no access for User, or RO for * respectively RW for Supervisor and no access for User, or RO for
* Supervisor and no access for user. * Supervisor and no access for user and NA for ALL.
* Then we use the APG to say whether accesses are according to Page rules or * Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all) * "all Supervisor" rules (Access to all)
* Therefore, we define 2 APG groups. lsb is _PAGE_USER * Therefore, we define 2 APG groups. lsb is _PMD_USER
* 0 => No user => 01 (all accesses performed according to page definition) * 0 => No user => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* according to page definition) * We define all 16 groups so that all other bits of APG can take any value
*/ */
#define MD_APG_INIT 0x4fffffff #define MD_APG_INIT 0x44444444
/* The effective page number register. When read, contains the information /* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in * about the last instruction TLB miss. When MD_RPN is written, bits in
......
...@@ -61,7 +61,8 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, ...@@ -61,7 +61,8 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page) pgtable_t pte_page)
{ {
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
_PMD_PRESENT);
} }
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
......
...@@ -32,27 +32,33 @@ ...@@ -32,27 +32,33 @@
#define _PAGE_PRESENT 0x0001 /* Page is valid */ #define _PAGE_PRESENT 0x0001 /* Page is valid */
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */ #define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */ #define _PAGE_HUGE 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
#define _PAGE_DIRTY 0x0100 /* C: page changed */ #define _PAGE_DIRTY 0x0100 /* C: page changed */
/* These 4 software bits must be masked out when the L2 entry is loaded /* These 4 software bits must be masked out when the L2 entry is loaded
* into the TLB. * into the TLB.
*/ */
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */ #define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
#define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */ #define _PAGE_SPECIAL 0x0020 /* SW entry */
#define _PAGE_EXEC 0x0040 /* Copied to L1 APG */ #define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
#define _PAGE_ACCESSED 0x0080 /* software: page referenced */ #define _PAGE_ACCESSED 0x0080 /* software: page referenced */
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
#define _PMD_PRESENT 0x0001 #define _PMD_PRESENT 0x0001
#define _PMD_BAD 0x0ff0 #define _PMD_BAD 0x0fd0
#define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004 #define _PMD_PAGE_512K 0x0004
#define _PMD_USER 0x0020 /* APG 1 */
/* Until my rework is finished, 8xx still needs atomic PTE updates */ /* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1 #define PTE_ATOMIC_UPDATES 1
#ifdef CONFIG_PPC_16K_PAGES
#define _PAGE_PSIZE _PAGE_HUGE
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
...@@ -126,7 +126,7 @@ static inline pte_t pte_mkspecial(pte_t pte) ...@@ -126,7 +126,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
return pte; return __pte(pte_val(pte) | _PAGE_HUGE);
} }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
......
...@@ -53,6 +53,9 @@ ...@@ -53,6 +53,9 @@
#ifndef _PAGE_NA #ifndef _PAGE_NA
#define _PAGE_NA 0 #define _PAGE_NA 0
#endif #endif
#ifndef _PAGE_HUGE
#define _PAGE_HUGE 0
#endif
#ifndef _PMD_PRESENT_MASK #ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_PRESENT_MASK _PMD_PRESENT
...@@ -61,6 +64,9 @@ ...@@ -61,6 +64,9 @@
#define _PMD_SIZE 0 #define _PMD_SIZE 0
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif #endif
#ifndef _PMD_USER
#define _PMD_USER 0
#endif
#ifndef _PAGE_KERNEL_RO #ifndef _PAGE_KERNEL_RO
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO) #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO)
#endif #endif
......
...@@ -52,11 +52,7 @@ ...@@ -52,11 +52,7 @@
* Value for the bits that have fixed value in RPN entries. * Value for the bits that have fixed value in RPN entries.
* Also used for tagging DAR for DTLBerror. * Also used for tagging DAR for DTLBerror.
*/ */
#ifdef CONFIG_PPC_16K_PAGES
#define RPN_PATTERN (0x00f0 | MD_SPS16K)
#else
#define RPN_PATTERN 0x00f0 #define RPN_PATTERN 0x00f0
#endif
#define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23 #define PAGE_SHIFT_8M 23
...@@ -358,31 +354,23 @@ _ENTRY(ITLBMiss_cmp) ...@@ -358,31 +354,23 @@ _ENTRY(ITLBMiss_cmp)
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mtcr r12 mtcr r12
#endif #endif
/* Insert the APG into the TWC from the Linux PTE. */
rlwimi r11, r10, 0, 25, 26
/* Load the MI_TWC with the attributes for this "segment." */ /* Load the MI_TWC with the attributes for this "segment." */
mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 1, MI_SPS16K
#endif
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10 and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT rlwimi r10, r11, 0, _PAGE_PRESENT
#endif #endif
li r11, RPN_PATTERN li r11, RPN_PATTERN | 0x200
/* The Linux PTE won't go exactly into the MMU TLB. /* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20-23 and 28 must be clear. * Software indicator bits 20 and 23 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be * Software indicator bits 22, 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior * set. All other Linux PTE bits control the behavior
* of the MMU. * of the MMU.
*/ */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) rlwimi r11, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */ rlwimi r10, r11, 0, 0x0ff0 /* Set 22, 24-27, clear 20,23 */
#else
rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */
#endif
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */ /* Restore registers */
...@@ -419,7 +407,6 @@ _ENTRY(itlb_miss_perf) ...@@ -419,7 +407,6 @@ _ENTRY(itlb_miss_perf)
rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif #endif
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b b 4b
20: /* 512k pages */ 20: /* 512k pages */
...@@ -428,7 +415,6 @@ _ENTRY(itlb_miss_perf) ...@@ -428,7 +415,6 @@ _ENTRY(itlb_miss_perf)
/* Add level 2 base */ /* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b b 4b
#endif #endif
...@@ -479,20 +465,15 @@ _ENTRY(DTLBMiss_jmp) ...@@ -479,20 +465,15 @@ _ENTRY(DTLBMiss_jmp)
4: 4:
mtcr r12 mtcr r12
/* Insert the Guarded flag and APG into the TWC from the Linux PTE. /* Insert the Guarded flag into the TWC from the Linux PTE.
* It is bit 26-27 of both the Linux PTE and the TWC (at least * It is bit 27 of both the Linux PTE and the TWC (at least
* I got that right :-). It will be better when we can put * I got that right :-). It will be better when we can put
* this into the Linux pgd/pmd and load it in the operation * this into the Linux pgd/pmd and load it in the operation
* above. * above.
*/ */
rlwimi r11, r10, 0, 26, 27 rlwimi r11, r10, 0, _PAGE_GUARDED
mtspr SPRN_MD_TWC, r11 mtspr SPRN_MD_TWC, r11
/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
* In 16k pages mode, SPS is always 1 */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 1, MD_SPS16K
#endif
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so: * We also need to know if the insn is a load/store, so:
* Clear _PAGE_PRESENT and load that which will * Clear _PAGE_PRESENT and load that which will
...@@ -508,17 +489,12 @@ _ENTRY(DTLBMiss_jmp) ...@@ -508,17 +489,12 @@ _ENTRY(DTLBMiss_jmp)
rlwimi r10, r11, 0, _PAGE_PRESENT rlwimi r10, r11, 0, _PAGE_PRESENT
#endif #endif
/* The Linux PTE won't go exactly into the MMU TLB. /* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be * Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior * set. All other Linux PTE bits control the behavior
* of the MMU. * of the MMU.
*/ */
li r11, RPN_PATTERN li r11, RPN_PATTERN
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */ rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
#else
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
#endif
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
/* Restore registers */ /* Restore registers */
...@@ -552,7 +528,6 @@ _ENTRY(dtlb_miss_perf) ...@@ -552,7 +528,6 @@ _ENTRY(dtlb_miss_perf)
rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif #endif
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b b 4b
20: /* 512k pages */ 20: /* 512k pages */
...@@ -561,7 +536,6 @@ _ENTRY(dtlb_miss_perf) ...@@ -561,7 +536,6 @@ _ENTRY(dtlb_miss_perf)
/* Add level 2 base */ /* Add level 2 base */
rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
lwz r10, 0(r10) /* Get the pte */ lwz r10, 0(r10) /* Get the pte */
rlwinm r11, r11, 0, 0xf
b 4b b 4b
#endif #endif
...@@ -712,7 +686,7 @@ _ENTRY(dtlb_miss_exit_3) ...@@ -712,7 +686,7 @@ _ENTRY(dtlb_miss_exit_3)
ITLBMissLinear: ITLBMissLinear:
mtcr r12 mtcr r12
/* Set 8M byte page and mark it valid */ /* Set 8M byte page and mark it valid */
li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC li r11, MI_PS8MEG | MI_SVALID
mtspr SPRN_MI_TWC, r11 mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
...@@ -994,7 +968,7 @@ initial_mmu: ...@@ -994,7 +968,7 @@ initial_mmu:
lis r8, KERNELBASE@h /* Create vaddr for TLB */ lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */ ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8 mtspr SPRN_MI_EPN, r8
li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */ li r8, MI_PS8MEG /* Set 8M byte page */
ori r8, r8, MI_SVALID /* Make it valid */ ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8 mtspr SPRN_MI_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */ li r8, MI_BOOTINIT /* Create RPN for address 0 */
......
...@@ -96,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -96,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
*hpdp = __hugepd(__pa(new) | *hpdp = __hugepd(__pa(new) |
(shift_to_mmu_psize(pshift) << 2)); (shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx) #elif defined(CONFIG_PPC_8xx)
*hpdp = __hugepd(__pa(new) | *hpdp = __hugepd(__pa(new) | _PMD_USER |
(pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
_PMD_PAGE_512K) | _PMD_PRESENT); _PMD_PAGE_512K) | _PMD_PRESENT);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment