Commit 3db6d5a5 authored by Nadav Amit's avatar Nadav Amit Committed by Ingo Molnar

x86/mm/tlb: Remove 'struct flush_tlb_info' from the stack

Move flush_tlb_info variables off the stack. This allows to align
flush_tlb_info to cache-line and avoid potentially unnecessary cache
line movements. It also allows to have a fixed virtual-to-physical
translation of the variables, which reduces TLB misses.

Use per-CPU struct for flush_tlb_mm_range() and
flush_tlb_kernel_range(). Add debug assertions to ensure there are
no nested TLB flushes that might overwrite the per-CPU data. For
arch_tlbbatch_flush() use a const struct.

Results when running a microbenchmarks that performs 10^6 MADV_DONTEED
operations and touching a page, in which 3 additional threads run a
busy-wait loop (5 runs, PTI and retpolines are turned off):

			base		off-stack
			----		---------
  avg (usec/op)		1.629		1.570	(-3%)
  stddev		0.014		0.009
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20190425230143.7008-1-namit@vmware.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent da398dbd
...@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, ...@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
} }
static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
{ {
const struct flush_tlb_info *f = info; const struct flush_tlb_info *f = info;
...@@ -722,43 +722,81 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -722,43 +722,81 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
*/ */
unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
#ifdef CONFIG_DEBUG_VM
static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
#endif
static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int stride_shift, bool freed_tables,
u64 new_tlb_gen)
{
struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
#ifdef CONFIG_DEBUG_VM
/*
* Ensure that the following code is non-reentrant and flush_tlb_info
* is not overwritten. This means no TLB flushing is initiated by
* interrupt handlers and machine-check exception handlers.
*/
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
#endif
info->start = start;
info->end = end;
info->mm = mm;
info->stride_shift = stride_shift;
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
return info;
}
static inline void put_flush_tlb_info(void)
{
#ifdef CONFIG_DEBUG_VM
/* Complete reentrency prevention checks */
barrier();
this_cpu_dec(flush_tlb_info_idx);
#endif
}
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift, unsigned long end, unsigned int stride_shift,
bool freed_tables) bool freed_tables)
{ {
struct flush_tlb_info *info;
u64 new_tlb_gen;
int cpu; int cpu;
struct flush_tlb_info info = {
.mm = mm,
.stride_shift = stride_shift,
.freed_tables = freed_tables,
};
cpu = get_cpu(); cpu = get_cpu();
/* This is also a barrier that synchronizes with switch_mm(). */
info.new_tlb_gen = inc_mm_tlb_gen(mm);
/* Should we flush just the requested range? */ /* Should we flush just the requested range? */
if ((end != TLB_FLUSH_ALL) && if ((end == TLB_FLUSH_ALL) ||
((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) { ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
info.start = start; start = 0;
info.end = end; end = TLB_FLUSH_ALL;
} else {
info.start = 0UL;
info.end = TLB_FLUSH_ALL;
} }
/* This is also a barrier that synchronizes with switch_mm(). */
new_tlb_gen = inc_mm_tlb_gen(mm);
info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
new_tlb_gen);
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
VM_WARN_ON(irqs_disabled()); lockdep_assert_irqs_enabled();
local_irq_disable(); local_irq_disable();
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
local_irq_enable(); local_irq_enable();
} }
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), &info); flush_tlb_others(mm_cpumask(mm), info);
put_flush_tlb_info();
put_cpu(); put_cpu();
} }
...@@ -787,38 +825,48 @@ static void do_kernel_range_flush(void *info) ...@@ -787,38 +825,48 @@ static void do_kernel_range_flush(void *info)
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
/* Balance as user space task's flush, a bit conservative */ /* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL || if (end == TLB_FLUSH_ALL ||
(end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
on_each_cpu(do_flush_tlb_all, NULL, 1); on_each_cpu(do_flush_tlb_all, NULL, 1);
} else { } else {
struct flush_tlb_info info; struct flush_tlb_info *info;
info.start = start;
info.end = end; preempt_disable();
on_each_cpu(do_kernel_range_flush, &info, 1); info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
on_each_cpu(do_kernel_range_flush, info, 1);
put_flush_tlb_info();
preempt_enable();
} }
} }
/*
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
* This means that the 'struct flush_tlb_info' that describes which mappings to
* flush is actually fixed. We therefore set a single fixed struct and use it in
* arch_tlbbatch_flush().
*/
static const struct flush_tlb_info full_flush_tlb_info = {
.mm = NULL,
.start = 0,
.end = TLB_FLUSH_ALL,
};
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{ {
struct flush_tlb_info info = {
.mm = NULL,
.start = 0UL,
.end = TLB_FLUSH_ALL,
};
int cpu = get_cpu(); int cpu = get_cpu();
if (cpumask_test_cpu(cpu, &batch->cpumask)) { if (cpumask_test_cpu(cpu, &batch->cpumask)) {
VM_WARN_ON(irqs_disabled()); lockdep_assert_irqs_enabled();
local_irq_disable(); local_irq_disable();
flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
local_irq_enable(); local_irq_enable();
} }
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&batch->cpumask, &info); flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
cpumask_clear(&batch->cpumask); cpumask_clear(&batch->cpumask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment