Commit 76acc2c1 authored by Kumar Gala's avatar Kumar Gala Committed by Benjamin Herrenschmidt

powerpc/fsl-booke: Use HW PTE format if CONFIG_PTE_64BIT

Switch to using the Power ISA defined PTE format when we have a 64-bit
PTE.  This makes the code handling between fsl-booke and book3e-64
similiar for TLB faults.

Additionally this lets use take advantage of the page size encodings and
full permissions that the HW PTE defines.

Also defined _PMD_PRESENT, _PMD_PRESENT_MASK, and _PMD_BAD since the
32-bit ppc arch code expects them.
Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 1d5d9527
...@@ -111,6 +111,8 @@ extern int icache_44x_need_flush; ...@@ -111,6 +111,8 @@ extern int icache_44x_need_flush;
#include <asm/pte-40x.h> #include <asm/pte-40x.h>
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x)
#include <asm/pte-44x.h> #include <asm/pte-44x.h>
#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
#include <asm/pte-book3e.h>
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
#include <asm/pte-fsl-booke.h> #include <asm/pte-fsl-booke.h>
#elif defined(CONFIG_8xx) #elif defined(CONFIG_8xx)
......
...@@ -75,6 +75,9 @@ ...@@ -75,6 +75,9 @@
/* On 32-bit, we never clear the top part of the PTE */ /* On 32-bit, we never clear the top part of the PTE */
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
#define _PTE_NONE_MASK 0xffffffff00000000ULL #define _PTE_NONE_MASK 0xffffffff00000000ULL
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
#endif #endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -33,13 +33,6 @@ ...@@ -33,13 +33,6 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */
#ifdef CONFIG_PTE_64BIT
/* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffffffff0000ULL
/* We extend the size of the PTE flags area when using 64-bit PTEs */
#define PTE_RPN_SHIFT (PAGE_SHIFT + 8)
#endif
#define _PMD_PRESENT 0 #define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK) #define _PMD_BAD (~PAGE_MASK)
......
...@@ -575,7 +575,12 @@ interrupt_base: ...@@ -575,7 +575,12 @@ interrupt_base:
* place or can we save a couple of instructions here ? * place or can we save a couple of instructions here ?
*/ */
mfspr r12,SPRN_ESR mfspr r12,SPRN_ESR
#ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT
oris r13,r13,_PAGE_ACCESSED@h
#else
li r13,_PAGE_PRESENT|_PAGE_ACCESSED li r13,_PAGE_PRESENT|_PAGE_ACCESSED
#endif
rlwimi r13,r12,11,29,29 rlwimi r13,r12,11,29,29
FIND_PTE FIND_PTE
...@@ -643,7 +648,12 @@ interrupt_base: ...@@ -643,7 +648,12 @@ interrupt_base:
4: 4:
/* Make up the required permissions */ /* Make up the required permissions */
#ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT | _PAGE_EXEC
oris r13,r13,_PAGE_ACCESSED@h
#else
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif
FIND_PTE FIND_PTE
andc. r13,r13,r11 /* Check permission */ andc. r13,r13,r11 /* Check permission */
...@@ -733,7 +743,7 @@ finish_tlb_load: ...@@ -733,7 +743,7 @@ finish_tlb_load:
mfspr r12, SPRN_MAS2 mfspr r12, SPRN_MAS2
#ifdef CONFIG_PTE_64BIT #ifdef CONFIG_PTE_64BIT
rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
#else #else
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
#endif #endif
...@@ -742,6 +752,20 @@ finish_tlb_load: ...@@ -742,6 +752,20 @@ finish_tlb_load:
#endif #endif
mtspr SPRN_MAS2, r12 mtspr SPRN_MAS2, r12
#ifdef CONFIG_PTE_64BIT
rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
andi. r10, r11, _PAGE_DIRTY
bne 1f
li r10, MAS3_SW | MAS3_UW
andc r12, r12, r10
1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
mtspr SPRN_MAS3, r12
BEGIN_MMU_FTR_SECTION
srwi r10, r13, 12 /* grab RPN[12:31] */
mtspr SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
li r10, (_PAGE_EXEC | _PAGE_PRESENT) li r10, (_PAGE_EXEC | _PAGE_PRESENT)
rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
and r12, r11, r10 and r12, r11, r10
...@@ -749,16 +773,6 @@ finish_tlb_load: ...@@ -749,16 +773,6 @@ finish_tlb_load:
slwi r10, r12, 1 slwi r10, r12, 1
or r10, r10, r12 or r10, r10, r12
iseleq r12, r12, r10 iseleq r12, r12, r10
#ifdef CONFIG_PTE_64BIT
rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
mtspr SPRN_MAS3, r12
BEGIN_MMU_FTR_SECTION
srwi r10, r13, 8 /* grab RPN[8:31] */
mtspr SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11 mtspr SPRN_MAS3, r11
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment