Commit f00ec48f authored by Russell King's avatar Russell King

ARM: Allow SMP kernels to boot on UP systems

UP systems do not implement all the instructions that SMP systems have,
so in order to boot a SMP kernel on a UP system, we need to rewrite
parts of the kernel.

Do this using an 'alternatives' scheme, where the kernel code and data
is modified prior to initialization to replace the SMP instructions,
thereby rendering the problematical code ineffectual.  We use the linker
to generate a list of 32-bit word locations and their replacement values,
and run through these replacements when we detect a UP system.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 06717352
...@@ -1191,6 +1191,19 @@ config SMP ...@@ -1191,6 +1191,19 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config SMP_ON_UP
bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on SMP && !XIP && !THUMB2_KERNEL
default y
help
SMP kernels contain instructions which fail on non-SMP processors.
Enabling this option allows the kernel to modify itself to make
these instructions safe. Disabling it allows about 1K of space
savings.
If you don't know what to do here, say Y.
config HAVE_ARM_SCU config HAVE_ARM_SCU
bool bool
depends on SMP depends on SMP
......
...@@ -154,16 +154,39 @@ ...@@ -154,16 +154,39 @@
.long 9999b,9001f; \ .long 9999b,9001f; \
.popsection .popsection
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
instr ;\
.popsection
#define ALT_UP_B(label) \
.equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
b . + up_b_offset ;\
.popsection
#else
#define ALT_SMP(instr...)
#define ALT_UP(instr...) instr
#define ALT_UP_B(label) b label
#endif
/* /*
* SMP data memory barrier * SMP data memory barrier
*/ */
.macro smp_dmb .macro smp_dmb
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7 #if __LINUX_ARM_ARCH__ >= 7
dmb ALT_SMP(dmb)
#elif __LINUX_ARM_ARCH__ == 6 #elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
#else
#error Incompatible SMP platform
#endif #endif
ALT_UP(nop)
#endif #endif
.endm .endm
......
...@@ -4,7 +4,12 @@ ...@@ -4,7 +4,12 @@
#define hard_smp_processor_id() \ #define hard_smp_processor_id() \
({ \ ({ \
unsigned int cpunum; \ unsigned int cpunum; \
__asm__("mrc p15, 0, %0, c0, c0, 5\n" \ __asm__("\n" \
"1: mrc p15, 0, %0, c0, c0, 5\n" \
" .pushsection \".alt.smp.init\", \"a\"\n"\
" .long 1b\n" \
" mov %0, #0\n" \
" .popsection" \
: "=r" (cpunum)); \ : "=r" (cpunum)); \
cpunum &= 0x0F; \ cpunum &= 0x0F; \
}) })
......
...@@ -18,4 +18,19 @@ static inline int cache_ops_need_broadcast(void) ...@@ -18,4 +18,19 @@ static inline int cache_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
} }
/*
* Return true if we are running on a SMP platform
*/
static inline bool is_smp(void)
{
#ifndef CONFIG_SMP
return false;
#elif defined(CONFIG_SMP_ON_UP)
extern unsigned int smp_on_up;
return !!smp_on_up;
#else
return true;
#endif
}
#endif #endif
...@@ -70,6 +70,10 @@ ...@@ -70,6 +70,10 @@
#undef _TLB #undef _TLB
#undef MULTI_TLB #undef MULTI_TLB
#ifdef CONFIG_SMP_ON_UP
#define MULTI_TLB 1
#endif
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#ifdef CONFIG_CPU_TLB_V3 #ifdef CONFIG_CPU_TLB_V3
...@@ -185,17 +189,23 @@ ...@@ -185,17 +189,23 @@
# define v6wbi_always_flags (-1UL) # define v6wbi_always_flags (-1UL)
#endif #endif
#ifdef CONFIG_SMP #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
#endif
#ifdef CONFIG_CPU_TLB_V7 #ifdef CONFIG_CPU_TLB_V7
# define v7wbi_possible_flags v7wbi_tlb_flags
# define v7wbi_always_flags v7wbi_tlb_flags # ifdef CONFIG_SMP_ON_UP
# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
# elif defined(CONFIG_SMP)
# define v7wbi_possible_flags v7wbi_tlb_flags_smp
# define v7wbi_always_flags v7wbi_tlb_flags_smp
# else
# define v7wbi_possible_flags v7wbi_tlb_flags_up
# define v7wbi_always_flags v7wbi_tlb_flags_up
# endif
# ifdef _TLB # ifdef _TLB
# define MULTI_TLB 1 # define MULTI_TLB 1
# else # else
......
...@@ -46,7 +46,8 @@ ...@@ -46,7 +46,8 @@
* this macro assumes that irqstat (r6) and base (r5) are * this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above * preserved from get_irqnr_and_base above
*/ */
test_for_ipi r0, r6, r5, lr ALT_SMP(test_for_ipi r0, r6, r5, lr)
ALT_UP_B(9997f)
movne r0, sp movne r0, sp
adrne lr, BSYM(1b) adrne lr, BSYM(1b)
bne do_IPI bne do_IPI
...@@ -57,6 +58,7 @@ ...@@ -57,6 +58,7 @@
adrne lr, BSYM(1b) adrne lr, BSYM(1b)
bne do_local_timer bne do_local_timer
#endif #endif
9997:
#endif #endif
.endm .endm
...@@ -965,11 +967,8 @@ kuser_cmpxchg_fixup: ...@@ -965,11 +967,8 @@ kuser_cmpxchg_fixup:
beq 1b beq 1b
rsbs r0, r3, #0 rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */ /* beware -- each __kuser slot must be 8 instructions max */
#ifdef CONFIG_SMP ALT_SMP(b __kuser_memory_barrier)
b __kuser_memory_barrier ALT_UP(usr_ret lr)
#else
usr_ret lr
#endif
#endif #endif
......
...@@ -86,6 +86,9 @@ ENTRY(stext) ...@@ -86,6 +86,9 @@ ENTRY(stext)
movs r8, r5 @ invalid machine (r5=0)? movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a' beq __error_a @ yes, error 'a'
bl __vet_atags bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
bl __create_page_tables bl __create_page_tables
/* /*
...@@ -333,4 +336,51 @@ __create_page_tables: ...@@ -333,4 +336,51 @@ __create_page_tables:
ENDPROC(__create_page_tables) ENDPROC(__create_page_tables)
.ltorg .ltorg
#ifdef CONFIG_SMP_ON_UP
__fixup_smp:
mov r7, #0x00070000
orr r6, r7, #0xff000000 @ mask 0xff070000
orr r7, r7, #0x41000000 @ val 0x41070000
and r0, r9, r6
teq r0, r7 @ ARM CPU and ARMv6/v7?
bne __fixup_smp_on_up @ no, assume UP
orr r6, r6, #0x0000ff00
orr r6, r6, #0x000000f0 @ mask 0xff07fff0
orr r7, r7, #0x0000b000
orr r7, r7, #0x00000020 @ val 0x4107b020
and r0, r9, r6
teq r0, r7 @ ARM 11MPCore?
moveq pc, lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
tst r0, #1 << 31
movne pc, lr @ bit 31 => SMP
__fixup_smp_on_up:
adr r0, 1f
ldmia r0, {r3, r6, r7}
sub r3, r0, r3
add r6, r6, r3
add r7, r7, r3
2: cmp r6, r7
ldmia r6!, {r0, r4}
strlo r4, [r0, r3]
blo 2b
mov pc, lr
ENDPROC(__fixup_smp)
1: .word .
.word __smpalt_begin
.word __smpalt_end
.pushsection .data
.globl smp_on_up
smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
#endif
#include "head-common.S" #include "head-common.S"
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/smp_plat.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
...@@ -825,7 +826,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -825,7 +826,8 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_init_cpus(); if (is_smp())
smp_init_cpus();
#endif #endif
reserve_crashkernel(); reserve_crashkernel();
......
...@@ -40,6 +40,11 @@ SECTIONS ...@@ -40,6 +40,11 @@ SECTIONS
__tagtable_begin = .; __tagtable_begin = .;
*(.taglist.init) *(.taglist.init)
__tagtable_end = .; __tagtable_end = .;
#ifdef CONFIG_SMP_ON_UP
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
#endif
INIT_SETUP(16) INIT_SETUP(16)
...@@ -237,6 +242,12 @@ SECTIONS ...@@ -237,6 +242,12 @@ SECTIONS
/* Default discards */ /* Default discards */
DISCARDS DISCARDS
#ifndef CONFIG_SMP_ON_UP
/DISCARD/ : {
*(.alt.smp.init)
}
#endif
} }
/* /*
......
...@@ -91,11 +91,8 @@ ENTRY(v7_flush_kern_cache_all) ...@@ -91,11 +91,8 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all bl v7_flush_dcache_all
mov r0, #0 mov r0, #0
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
#else
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#endif
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr mov pc, lr
...@@ -171,11 +168,8 @@ ENTRY(v7_coherent_user_range) ...@@ -171,11 +168,8 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mov r0, #0 mov r0, #0
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
#endif
dsb dsb
isb isb
mov pc, lr mov pc, lr
......
...@@ -310,9 +310,8 @@ static void __init build_mem_type_table(void) ...@@ -310,9 +310,8 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK; cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0; ecc_mask = 0;
} }
#ifdef CONFIG_SMP if (is_smp())
cachepolicy = CPOLICY_WRITEALLOC; cachepolicy = CPOLICY_WRITEALLOC;
#endif
/* /*
* Strip out features not present on earlier architectures. * Strip out features not present on earlier architectures.
...@@ -406,13 +405,11 @@ static void __init build_mem_type_table(void) ...@@ -406,13 +405,11 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
#ifndef CONFIG_SMP
/* /*
* Only use write-through for non-SMP systems * Only use write-through for non-SMP systems
*/ */
if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
#endif
/* /*
* Enable CPU-specific coherency if supported. * Enable CPU-specific coherency if supported.
...@@ -436,22 +433,23 @@ static void __init build_mem_type_table(void) ...@@ -436,22 +433,23 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#ifdef CONFIG_SMP if (is_smp()) {
/* /*
* Mark memory with the "shared" attribute for SMP systems * Mark memory with the "shared" attribute
*/ * for SMP systems
user_pgprot |= L_PTE_SHARED; */
kern_pgprot |= L_PTE_SHARED; user_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
} }
/* /*
...@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void) ...@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void)
* rather difficult. * rather difficult.
*/ */
reason = "with VIPT aliasing cache"; reason = "with VIPT aliasing cache";
#ifdef CONFIG_SMP } else if (is_smp() && tlb_ops_need_broadcast()) {
} else if (tlb_ops_need_broadcast()) {
/* /*
* kmap_high needs to occasionally flush TLB entries, * kmap_high needs to occasionally flush TLB entries,
* however, if the TLB entries need to be broadcast * however, if the TLB entries need to be broadcast
...@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void) ...@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void)
* (must not be called with irqs off) * (must not be called with irqs off)
*/ */
reason = "without hardware TLB ops broadcasting"; reason = "without hardware TLB ops broadcasting";
#endif
} }
if (reason) { if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
......
...@@ -30,13 +30,10 @@ ...@@ -30,13 +30,10 @@
#define TTB_RGN_WT (2 << 3) #define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3) #define TTB_RGN_WB (3 << 3)
#ifndef CONFIG_SMP #define TTB_FLAGS_UP TTB_RGN_WBWA
#define TTB_FLAGS TTB_RGN_WBWA #define PMD_FLAGS_UP PMD_SECT_WB
#define PMD_FLAGS PMD_SECT_WB #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
#else #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v6_proc_init) ENTRY(cpu_v6_proc_init)
mov pc, lr mov pc, lr
...@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm) ...@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
orr r0, r0, #TTB_FLAGS ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
...@@ -156,9 +154,11 @@ cpu_pj4_name: ...@@ -156,9 +154,11 @@ cpu_pj4_name:
*/ */
__v6_setup: __v6_setup:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
ALT_UP(nop)
orr r0, r0, #0x20 orr r0, r0, #0x20
mcr p15, 0, r0, c1, c0, 1 ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
ALT_UP(nop)
#endif #endif
mov r0, #0 mov r0, #0
...@@ -169,7 +169,8 @@ __v6_setup: ...@@ -169,7 +169,8 @@ __v6_setup:
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register mcr p15, 0, r0, c2, c0, 2 @ TTB control register
orr r4, r4, #TTB_FLAGS ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
adr r5, v6_crval adr r5, v6_crval
...@@ -225,10 +226,16 @@ cpu_elf_name: ...@@ -225,10 +226,16 @@ cpu_elf_name:
__v6_proc_info: __v6_proc_info:
.long 0x0007b000 .long 0x0007b000
.long 0x0007f000 .long 0x0007f000
.long PMD_TYPE_SECT | \ ALT_SMP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS_SMP)
ALT_UP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \ PMD_SECT_AP_READ | \
PMD_FLAGS PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_XN | \ PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
...@@ -249,10 +256,16 @@ __v6_proc_info: ...@@ -249,10 +256,16 @@ __v6_proc_info:
__pj4_v6_proc_info: __pj4_v6_proc_info:
.long 0x560f5810 .long 0x560f5810
.long 0xff0ffff0 .long 0xff0ffff0
.long PMD_TYPE_SECT | \ ALT_SMP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS_SMP)
ALT_UP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \ PMD_SECT_AP_READ | \
PMD_FLAGS PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_XN | \ PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
......
...@@ -30,15 +30,13 @@ ...@@ -30,15 +30,13 @@
#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) #define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) #define TTB_IRGN_WB ((1 << 0) | (1 << 6))
#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ /* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
#define PMD_FLAGS PMD_SECT_WB #define PMD_FLAGS_UP PMD_SECT_WB
#else
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v7_proc_init) ENTRY(cpu_v7_proc_init)
mov pc, lr mov pc, lr
...@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm) ...@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
orr r0, r0, #TTB_FLAGS ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973 #ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif #endif
...@@ -188,7 +187,8 @@ cpu_v7_name: ...@@ -188,7 +187,8 @@ cpu_v7_name:
*/ */
__v7_ca9mp_setup: __v7_ca9mp_setup:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled? tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
...@@ -262,7 +262,8 @@ __v7_setup: ...@@ -262,7 +262,8 @@ __v7_setup:
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register mcr p15, 0, r10, c2, c0, 2 @ TTB control register
orr r4, r4, #TTB_FLAGS ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register mcr p15, 0, r10, c3, c0, 0 @ load domain access register
...@@ -354,10 +355,16 @@ cpu_elf_name: ...@@ -354,10 +355,16 @@ cpu_elf_name:
__v7_ca9mp_proc_info: __v7_ca9mp_proc_info:
.long 0x410fc090 @ Required ID value .long 0x410fc090 @ Required ID value
.long 0xff0ffff0 @ Mask for ID .long 0xff0ffff0 @ Mask for ID
.long PMD_TYPE_SECT | \ ALT_SMP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS_SMP)
ALT_UP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \ PMD_SECT_AP_READ | \
PMD_FLAGS PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_XN | \ PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
...@@ -380,10 +387,16 @@ __v7_ca9mp_proc_info: ...@@ -380,10 +387,16 @@ __v7_ca9mp_proc_info:
__v7_proc_info: __v7_proc_info:
.long 0x000f0000 @ Required ID value .long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID .long 0x000f0000 @ Mask for ID
.long PMD_TYPE_SECT | \ ALT_SMP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS_SMP)
ALT_UP(.long \
PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \ PMD_SECT_AP_READ | \
PMD_FLAGS PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_XN | \ PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT
1: 1:
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
#endif
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mov ip, #0 mov ip, #0
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
#endif
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range) ENDPROC(v7wbi_flush_user_tlb_range)
...@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range) ...@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT
1: 1:
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
#endif
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mov r2, #0 mov r2, #0
#ifdef CONFIG_SMP ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
dsb dsb
isb isb
mov pc, lr mov pc, lr
...@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range) ...@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_tlb_fns) ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range .long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range .long v7wbi_flush_kern_tlb_range
.long v7wbi_tlb_flags ALT_SMP(.long v7wbi_tlb_flags_smp)
ALT_UP(.long v7wbi_tlb_flags_up)
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns .size v7wbi_tlb_fns, . - v7wbi_tlb_fns
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment