Commit 3adfb457 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/64e: Remove MMU_FTR_USE_TLBRSRV and MMU_FTR_USE_PAIRED_MAS

Commit fb5a5157 ("powerpc: Remove platforms/wsp and associated
pieces") removed the last CPU having features MMU_FTRS_A2 and
commit cd68098b ("powerpc: Clean up MMU_FTRS_A2 and
MMU_FTR_TYPE_3E") removed MMU_FTRS_A2 which was the last user of
MMU_FTR_USE_TLBRSRV and MMU_FTR_USE_PAIRED_MAS.

Remove all code that relies on MMU_FTR_USE_TLBRSRV and
MMU_FTR_USE_PAIRED_MAS.

With this change done, TLB miss can happen before the mmu feature
fixups.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/cfd5a0ecdb1598da968832e1bddf7431ec267200.1656427701.git.christophe.leroy@csgroup.eu
parent 09317643
...@@ -96,15 +96,6 @@ ...@@ -96,15 +96,6 @@
*/ */
#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
/* Enable use of TLB reservation. Processor should support tlbsrx.
* instruction and MAS0[WQ].
*/
#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
/* Use paired MAS registers (MAS7||MAS3, etc.)
*/
#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
/* Doesn't support the B bit (1T segment) in SLBIE /* Doesn't support the B bit (1T segment) in SLBIE
*/ */
#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000) #define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
...@@ -180,9 +171,6 @@ enum { ...@@ -180,9 +171,6 @@ enum {
#ifdef CONFIG_PPC_83xx #ifdef CONFIG_PPC_83xx
MMU_FTR_NEED_DTLB_SW_LRU | MMU_FTR_NEED_DTLB_SW_LRU |
#endif #endif
#ifdef CONFIG_PPC_BOOK3E_64
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
#endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
MMU_FTR_KERNEL_RO | MMU_FTR_KERNEL_RO |
#ifdef CONFIG_PPC_64S_HASH_MMU #ifdef CONFIG_PPC_64S_HASH_MMU
......
...@@ -113,7 +113,6 @@ void __init setup_tlb_core_data(void) ...@@ -113,7 +113,6 @@ void __init setup_tlb_core_data(void)
* Should we panic instead? * Should we panic instead?
*/ */
WARN_ONCE(smt_enabled_at_boot >= 2 && WARN_ONCE(smt_enabled_at_boot >= 2 &&
!mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
book3e_htw_mode != PPC_HTW_E6500, book3e_htw_mode != PPC_HTW_E6500,
"%s: unsupported MMU configuration\n", __func__); "%s: unsupported MMU configuration\n", __func__);
} }
......
...@@ -103,21 +103,11 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) ...@@ -103,21 +103,11 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
int found = 0; int found = 0;
mtspr(SPRN_MAS6, pid << 16); mtspr(SPRN_MAS6, pid << 16);
if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) { asm volatile(
asm volatile( "tlbsx 0,%1\n"
"li %0,0\n" "mfspr %0,0x271\n"
"tlbsx. 0,%1\n" "srwi %0,%0,31\n"
"bne 1f\n" : "=&r"(found) : "r"(ea));
"li %0,1\n"
"1:\n"
: "=&r"(found) : "r"(ea));
} else {
asm volatile(
"tlbsx 0,%1\n"
"mfspr %0,0x271\n"
"srwi %0,%0,31\n"
: "=&r"(found) : "r"(ea));
}
return found; return found;
} }
...@@ -169,13 +159,9 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) ...@@ -169,13 +159,9 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
mtspr(SPRN_MAS1, mas1); mtspr(SPRN_MAS1, mas1);
mtspr(SPRN_MAS2, mas2); mtspr(SPRN_MAS2, mas2);
if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) { if (mmu_has_feature(MMU_FTR_BIG_PHYS))
mtspr(SPRN_MAS7_MAS3, mas7_3); mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
} else { mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
}
asm volatile ("tlbwe"); asm volatile ("tlbwe");
......
...@@ -152,16 +152,7 @@ tlb_miss_common_bolted: ...@@ -152,16 +152,7 @@ tlb_miss_common_bolted:
clrrdi r15,r15,3 clrrdi r15,r15,3
beq tlb_miss_fault_bolted /* No PGDIR, bail */ beq tlb_miss_fault_bolted /* No PGDIR, bail */
BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,R16)
ldx r14,r14,r15 /* grab pgd entry */
beq tlb_miss_done_bolted /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */ ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
...@@ -674,16 +665,7 @@ normal_tlb_miss: ...@@ -674,16 +665,7 @@ normal_tlb_miss:
clrrdi r14,r14,3 clrrdi r14,r14,3
or r10,r15,r14 or r10,r15,r14
BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,R16)
ld r14,0(r10) ld r14,0(r10)
beq normal_tlb_miss_done
MMU_FTR_SECTION_ELSE
ld r14,0(r10)
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
finish_normal_tlb_miss: finish_normal_tlb_miss:
/* Check if required permissions are met */ /* Check if required permissions are met */
...@@ -727,13 +709,9 @@ finish_normal_tlb_miss: ...@@ -727,13 +709,9 @@ finish_normal_tlb_miss:
li r11,MAS3_SW|MAS3_UW li r11,MAS3_SW|MAS3_UW
andc r15,r15,r11 andc r15,r15,r11
1: 1:
BEGIN_MMU_FTR_SECTION
srdi r16,r15,32 srdi r16,r15,32
mtspr SPRN_MAS3,r15 mtspr SPRN_MAS3,r15
mtspr SPRN_MAS7,r16 mtspr SPRN_MAS7,r16
MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r15
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe tlbwe
...@@ -809,13 +787,6 @@ virt_page_table_tlb_miss: ...@@ -809,13 +787,6 @@ virt_page_table_tlb_miss:
#else #else
1: 1:
#endif #endif
BEGIN_MMU_FTR_SECTION
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*/
PPC_TLBSRX_DOT(0,R16)
beq virt_page_table_tlb_miss_done
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Now, we need to walk the page tables. First check if we are in /* Now, we need to walk the page tables. First check if we are in
* range. * range.
...@@ -866,41 +837,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) ...@@ -866,41 +837,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
clrldi r11,r15,4 /* remove region ID from RPN */ clrldi r11,r15,4 /* remove region ID from RPN */
ori r10,r11,1 /* Or-in SR */ ori r10,r11,1 /* Or-in SR */
BEGIN_MMU_FTR_SECTION
srdi r16,r10,32 srdi r16,r10,32
mtspr SPRN_MAS3,r10 mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16 mtspr SPRN_MAS7,r16
MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe tlbwe
BEGIN_MMU_FTR_SECTION
virt_page_table_tlb_miss_done:
/* We have overridden MAS2:EPN but currently our primary TLB miss
* handler will always restore it so that should not be an issue,
* if we ever optimize the primary handler to not write MAS2 on
* some cases, we'll have to restore MAS2:EPN here based on the
* original fault's DEAR. If we do that we have to modify the
* ITLB miss handler to also store SRR0 in the exception frame
* as DEAR.
*
* However, one nasty thing we did is we cleared the reservation
* (well, potentially we did). We do a trick here thus if we
* are not a level 0 exception (we interrupted the TLB miss) we
* offset the return address by -4 in order to replay the tlbsrx
* instruction there
*/
subf r10,r13,r12
cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
bne- 1f
ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
addi r10,r11,-4
std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
1:
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Return to caller, normal case */ /* Return to caller, normal case */
TLB_MISS_EPILOG_SUCCESS TLB_MISS_EPILOG_SUCCESS
rfi rfi
...@@ -1115,13 +1057,9 @@ htw_tlb_miss: ...@@ -1115,13 +1057,9 @@ htw_tlb_miss:
*/ */
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
BEGIN_MMU_FTR_SECTION
srdi r16,r10,32 srdi r16,r10,32
mtspr SPRN_MAS3,r10 mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16 mtspr SPRN_MAS7,r16
MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe tlbwe
...@@ -1202,13 +1140,9 @@ tlb_load_linear: ...@@ -1202,13 +1140,9 @@ tlb_load_linear:
clrldi r10,r10,4 /* clear region bits */ clrldi r10,r10,4 /* clear region bits */
ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
BEGIN_MMU_FTR_SECTION
srdi r16,r10,32 srdi r16,r10,32
mtspr SPRN_MAS3,r10 mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16 mtspr SPRN_MAS7,r16
MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe tlbwe
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment