Commit 7c03d653 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

powerpc/mm: Introduce MMU features

We're soon running out of CPU features and I need to add some new
ones for various MMU related bits, so this patch separates the MMU
features from the CPU features.  I moved over the 32-bit MMU related
ones, added base features for MMU type families, but didn't move
over any 64-bit only feature yet.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: default avatarKumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 2ca8cf73
This diff is collapsed.
...@@ -81,6 +81,36 @@ label##5: \ ...@@ -81,6 +81,36 @@ label##5: \
#define ALT_FTR_SECTION_END_IFCLR(msk) \ #define ALT_FTR_SECTION_END_IFCLR(msk) \
ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
/* MMU feature dependent sections */
#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
FTR_SECTION_ELSE_NESTED(label) \
MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
#define END_MMU_FTR_SECTION(msk, val) \
END_MMU_FTR_SECTION_NESTED(msk, val, 97)
#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
#define ALT_MMU_FTR_SECTION_END(msk, val) \
ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
/* Firmware feature dependent sections */ /* Firmware feature dependent sections */
#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) #define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) #define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
......
...@@ -2,6 +2,47 @@ ...@@ -2,6 +2,47 @@
#define _ASM_POWERPC_MMU_H_ #define _ASM_POWERPC_MMU_H_
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
/*
* MMU features bit definitions
*/
/*
* First half is MMU families
*/
#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
/*
* This is individual features
*/
/* Enable use of high BAT registers */
#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
/* Enable >32-bit physical addresses on 32-bit processor, only used
* by CONFIG_6xx currently as BookE supports that from day 1
*/
#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
static inline int mmu_has_feature(unsigned long feature)
{
return (cur_cpu_spec->mmu_features & feature);
}
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* 64-bit classic hash table MMU */ /* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h> # include <asm/mmu-hash64.h>
......
This diff is collapsed.
...@@ -990,12 +990,12 @@ load_up_mmu: ...@@ -990,12 +990,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5) LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5) LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5) LOAD_BAT(3,r3,r4,r5)
BEGIN_FTR_SECTION BEGIN_MMU_FTR_SECTION
LOAD_BAT(4,r3,r4,r5) LOAD_BAT(4,r3,r4,r5)
LOAD_BAT(5,r3,r4,r5) LOAD_BAT(5,r3,r4,r5)
LOAD_BAT(6,r3,r4,r5) LOAD_BAT(6,r3,r4,r5)
LOAD_BAT(7,r3,r4,r5) LOAD_BAT(7,r3,r4,r5)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
/* /*
...@@ -1141,7 +1141,7 @@ clear_bats: ...@@ -1141,7 +1141,7 @@ clear_bats:
mtspr SPRN_IBAT2L,r10 mtspr SPRN_IBAT2L,r10
mtspr SPRN_IBAT3U,r10 mtspr SPRN_IBAT3U,r10
mtspr SPRN_IBAT3L,r10 mtspr SPRN_IBAT3L,r10
BEGIN_FTR_SECTION BEGIN_MMU_FTR_SECTION
/* Here's a tweak: at this point, CPU setup have /* Here's a tweak: at this point, CPU setup have
* not been called yet, so HIGH_BAT_EN may not be * not been called yet, so HIGH_BAT_EN may not be
* set in HID0 for the 745x processors. However, it * set in HID0 for the 745x processors. However, it
...@@ -1164,7 +1164,7 @@ BEGIN_FTR_SECTION ...@@ -1164,7 +1164,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r10 mtspr SPRN_IBAT6L,r10
mtspr SPRN_IBAT7U,r10 mtspr SPRN_IBAT7U,r10
mtspr SPRN_IBAT7L,r10 mtspr SPRN_IBAT7L,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
flush_tlbs: flush_tlbs:
......
...@@ -767,10 +767,10 @@ finish_tlb_load: ...@@ -767,10 +767,10 @@ finish_tlb_load:
rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
mtspr SPRN_MAS3, r12 mtspr SPRN_MAS3, r12
BEGIN_FTR_SECTION BEGIN_MMU_FTR_SECTION
srwi r10, r13, 8 /* grab RPN[8:31] */ srwi r10, r13, 8 /* grab RPN[8:31] */
mtspr SPRN_MAS7, r10 mtspr SPRN_MAS7, r10
END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else #else
rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11 mtspr SPRN_MAS3, r11
......
...@@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr, (void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size); (void *)sect->sh_addr + sect->sh_size);
sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
if (sect != NULL)
do_feature_fixups(cur_cpu_spec->mmu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
if (sect != NULL) if (sect != NULL)
......
...@@ -97,6 +97,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) ...@@ -97,6 +97,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup)); PTRRELOC(&__stop___ftr_fixup));
do_feature_fixups(spec->mmu_features,
PTRRELOC(&__start___mmu_ftr_fixup),
PTRRELOC(&__stop___mmu_ftr_fixup));
do_lwsync_fixups(spec->cpu_features, do_lwsync_fixups(spec->cpu_features,
PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__start___lwsync_fixup),
PTRRELOC(&__stop___lwsync_fixup)); PTRRELOC(&__stop___lwsync_fixup));
......
...@@ -361,6 +361,8 @@ void __init setup_system(void) ...@@ -361,6 +361,8 @@ void __init setup_system(void)
*/ */
do_feature_fixups(cur_cpu_spec->cpu_features, do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup); &__start___ftr_fixup, &__stop___ftr_fixup);
do_feature_fixups(cur_cpu_spec->mmu_features,
&__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
do_feature_fixups(powerpc_firmware_features, do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features, do_lwsync_fixups(cur_cpu_spec->cpu_features,
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/mmu.h>
/* /*
* Structure for storing CPU registers on the save area. * Structure for storing CPU registers on the save area.
...@@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatl 3,r4 mtibatl 3,r4
#endif #endif
BEGIN_FTR_SECTION BEGIN_MMU_FTR_SECTION
li r4,0 li r4,0
mtspr SPRN_DBAT4U,r4 mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4 mtspr SPRN_DBAT4L,r4
...@@ -297,7 +297,7 @@ BEGIN_FTR_SECTION ...@@ -297,7 +297,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4 mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4 mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4 mtspr SPRN_IBAT7L,r4
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */ /* Flush all TLBs */
lis r4,0x1000 lis r4,0x1000
......
...@@ -567,6 +567,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, ...@@ -567,6 +567,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features, do_feature_fixups(cur_cpu_spec->cpu_features,
start64, start64 + size64); start64, start64 + size64);
start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
if (start64)
do_feature_fixups(cur_cpu_spec->mmu_features,
start64, start64 + size64);
start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
if (start64) if (start64)
do_feature_fixups(powerpc_firmware_features, do_feature_fixups(powerpc_firmware_features,
...@@ -583,6 +588,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, ...@@ -583,6 +588,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features, do_feature_fixups(cur_cpu_spec->cpu_features,
start32, start32 + size32); start32, start32 + size32);
start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
if (start32)
do_feature_fixups(cur_cpu_spec->mmu_features,
start32, start32 + size32);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
if (start32) if (start32)
......
...@@ -33,6 +33,9 @@ SECTIONS ...@@ -33,6 +33,9 @@ SECTIONS
. = ALIGN(8); . = ALIGN(8);
__ftr_fixup : { *(__ftr_fixup) } __ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
__mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
. = ALIGN(8); . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) } __lwsync_fixup : { *(__lwsync_fixup) }
......
...@@ -34,6 +34,9 @@ SECTIONS ...@@ -34,6 +34,9 @@ SECTIONS
. = ALIGN(8); . = ALIGN(8);
__ftr_fixup : { *(__ftr_fixup) } __ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
__mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
. = ALIGN(8); . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) } __lwsync_fixup : { *(__lwsync_fixup) }
......
...@@ -152,6 +152,12 @@ SECTIONS ...@@ -152,6 +152,12 @@ SECTIONS
__stop___ftr_fixup = .; __stop___ftr_fixup = .;
} }
. = ALIGN(8); . = ALIGN(8);
__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
__start___mmu_ftr_fixup = .;
*(__mmu_ftr_fixup)
__stop___mmu_ftr_fixup = .;
}
. = ALIGN(8);
__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
__start___lwsync_fixup = .; __start___lwsync_fixup = .;
*(__lwsync_fixup) *(__lwsync_fixup)
......
...@@ -192,7 +192,7 @@ void __init MMU_init_hw(void) ...@@ -192,7 +192,7 @@ void __init MMU_init_hw(void)
extern unsigned int hash_page[]; extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) { if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
/* /*
* Put a blr (procedure return) instruction at the * Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI * start of hash_page, since we can still get DSI
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/mmu.h>
#define MAGIC 0x4c617273 /* 'Lars' */ #define MAGIC 0x4c617273 /* 'Lars' */
...@@ -323,7 +324,7 @@ grackle_wake_up: ...@@ -323,7 +324,7 @@ grackle_wake_up:
lwz r4,SL_IBAT3+4(r1) lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4 mtibatl 3,r4
BEGIN_FTR_SECTION BEGIN_MMU_FTR_SECTION
li r4,0 li r4,0
mtspr SPRN_DBAT4U,r4 mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4 mtspr SPRN_DBAT4L,r4
...@@ -341,7 +342,7 @@ BEGIN_FTR_SECTION ...@@ -341,7 +342,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4 mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4 mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4 mtspr SPRN_IBAT7L,r4
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */ /* Flush all TLBs */
lis r4,0x1000 lis r4,0x1000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment