Commit faa7bc51 authored by Catalin Marinas's avatar Catalin Marinas

Check whether the TLB operations need broadcasting on SMP systems

ARMv7 SMP hardware can handle the TLB maintenance operations
broadcasting in hardware so that the software can avoid the costly IPIs.
This patch adds the necessary checks (the MMFR3 CPUID register) to avoid
the broadcasting if already supported by the hardware.

(this patch is based on the work done by Tony Thompson @ ARM)
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent da055eb5
...@@ -8,6 +8,21 @@ ...@@ -8,6 +8,21 @@
#define CPUID_TCM 2 #define CPUID_TCM 2
#define CPUID_TLBTYPE 3 #define CPUID_TLBTYPE 3
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
#define CPUID_EXT_DFR0 "c1, 2"
#define CPUID_EXT_AFR0 "c1, 3"
#define CPUID_EXT_MMFR0 "c1, 4"
#define CPUID_EXT_MMFR1 "c1, 5"
#define CPUID_EXT_MMFR2 "c1, 6"
#define CPUID_EXT_MMFR3 "c1, 7"
#define CPUID_EXT_ISAR0 "c2, 0"
#define CPUID_EXT_ISAR1 "c2, 1"
#define CPUID_EXT_ISAR2 "c2, 2"
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \ #define read_cpuid(reg) \
({ \ ({ \
...@@ -18,9 +33,19 @@ ...@@ -18,9 +33,19 @@
: "cc"); \ : "cc"); \
__val; \ __val; \
}) })
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
: "cc"); \
__val; \
})
#else #else
extern unsigned int processor_id; extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id) #define read_cpuid(reg) (processor_id)
#define read_cpuid_ext(reg) 0
#endif #endif
/* /*
......
...@@ -40,6 +40,12 @@ ...@@ -40,6 +40,12 @@
#define TLB_V6_I_ASID (1 << 18) #define TLB_V6_I_ASID (1 << 18)
#define TLB_BTB (1 << 28) #define TLB_BTB (1 << 28)
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
#define TLB_V7_UIS_PAGE (1 << 19)
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30) #define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31) #define TLB_WB (1 << 31)
...@@ -176,9 +182,17 @@ ...@@ -176,9 +182,17 @@
# define v6wbi_always_flags (-1UL) # define v6wbi_always_flags (-1UL)
#endif #endif
#ifdef CONFIG_SMP
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
#endif
#ifdef CONFIG_CPU_TLB_V7 #ifdef CONFIG_CPU_TLB_V7
# define v7wbi_possible_flags v6wbi_tlb_flags # define v7wbi_possible_flags v7wbi_tlb_flags
# define v7wbi_always_flags v6wbi_tlb_flags # define v7wbi_always_flags v7wbi_tlb_flags
# ifdef _TLB # ifdef _TLB
# define MULTI_TLB 1 # define MULTI_TLB 1
# else # else
...@@ -316,6 +330,8 @@ static inline void local_flush_tlb_all(void) ...@@ -316,6 +330,8 @@ static inline void local_flush_tlb_all(void)
asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V7_UIS_FULL))
asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_BTB)) { if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */ /* flush the branch target cache */
...@@ -351,6 +367,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -351,6 +367,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V6_I_ASID)) if (tlb_flag(TLB_V6_I_ASID))
asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V7_UIS_ASID))
asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_BTB)) { if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */ /* flush the branch target cache */
...@@ -389,6 +407,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -389,6 +407,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE)) if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V7_UIS_PAGE))
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_BTB)) { if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */ /* flush the branch target cache */
...@@ -424,6 +444,8 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) ...@@ -424,6 +444,8 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE)) if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
if (tlb_flag(TLB_V7_UIS_PAGE))
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
if (tlb_flag(TLB_BTB)) { if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */ /* flush the branch target cache */
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cputype.h>
/* /*
* as from 2.5, kernels no longer have an init_tasks structure * as from 2.5, kernels no longer have an init_tasks structure
...@@ -545,6 +546,12 @@ struct tlb_args { ...@@ -545,6 +546,12 @@ struct tlb_args {
unsigned long ta_end; unsigned long ta_end;
}; };
/* all SMP configurations have the extended CPUID registers */
static inline int tlb_ops_need_broadcast(void)
{
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
static inline void ipi_flush_tlb_all(void *ignored) static inline void ipi_flush_tlb_all(void *ignored)
{ {
local_flush_tlb_all(); local_flush_tlb_all();
...@@ -587,51 +594,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) ...@@ -587,51 +594,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1); on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
local_flush_tlb_all();
} }
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
if (tlb_ops_need_broadcast())
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
else
local_flush_tlb_mm(mm);
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{ {
if (tlb_ops_need_broadcast()) {
struct tlb_args ta; struct tlb_args ta;
ta.ta_vma = vma; ta.ta_vma = vma;
ta.ta_start = uaddr; ta.ta_start = uaddr;
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
} else
local_flush_tlb_page(vma, uaddr);
} }
void flush_tlb_kernel_page(unsigned long kaddr) void flush_tlb_kernel_page(unsigned long kaddr)
{ {
if (tlb_ops_need_broadcast()) {
struct tlb_args ta; struct tlb_args ta;
ta.ta_start = kaddr; ta.ta_start = kaddr;
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
local_flush_tlb_kernel_page(kaddr);
} }
void flush_tlb_range(struct vm_area_struct *vma, void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
if (tlb_ops_need_broadcast()) {
struct tlb_args ta; struct tlb_args ta;
ta.ta_vma = vma; ta.ta_vma = vma;
ta.ta_start = start; ta.ta_start = start;
ta.ta_end = end; ta.ta_end = end;
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
} else
local_flush_tlb_range(vma, start, end);
} }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
if (tlb_ops_need_broadcast()) {
struct tlb_args ta; struct tlb_args ta;
ta.ta_start = start; ta.ta_start = start;
ta.ta_end = end; ta.ta_end = end;
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
} else
local_flush_tlb_kernel_range(start, end);
} }
...@@ -176,8 +176,8 @@ cpu_v7_name: ...@@ -176,8 +176,8 @@ cpu_v7_name:
*/ */
__v7_setup: __v7_setup:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and
orr r0, r0, #(0x1 << 6) orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting
mcr p15, 0, r0, c1, c0, 1 mcr p15, 0, r0, c1, c0, 1
#endif #endif
adr r12, __v7_setup_stack @ the local stack adr r12, __v7_setup_stack @ the local stack
......
...@@ -42,9 +42,11 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -42,9 +42,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
mov r1, r1, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags vma_vm_flags r2, r2 @ get vma->vm_flags
1: 1:
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) #ifdef CONFIG_SMP
tst r2, #VM_EXEC @ Executable area ? mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) #else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
#endif
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
...@@ -69,8 +71,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) ...@@ -69,8 +71,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT
1: 1:
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA #ifdef CONFIG_SMP
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
#endif
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
...@@ -87,5 +92,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) ...@@ -87,5 +92,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_tlb_fns) ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range .long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range .long v7wbi_flush_kern_tlb_range
.long v6wbi_tlb_flags .long v7wbi_tlb_flags
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns .size v7wbi_tlb_fns, . - v7wbi_tlb_fns
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment