Commit 611ae8e3 authored by Alex Shi's avatar Alex Shi Committed by H. Peter Anvin

x86/tlb: enable tlb flush range support for x86

Not every tlb_flush execution moment is really need to evacuate all
TLB entries, like in munmap, just few 'invlpg' is better for whole
process performance, since it leaves most of TLB entries for later
accessing.

This patch also rewrite flush_tlb_range for 2 purposes:
1, split it out to get flush_blt_mm_range function.
2, clean up to reduce line breaking, thanks for Borislav's input.

My micro benchmark 'mummap' http://lkml.org/lkml/2012/5/17/59
show that the random memory access on other CPU has 0~50% speed up
on a 2P * 4cores * HT NHM EP while do 'munmap'.

Thanks Yongjie's testing on this patch:
-------------
I used Linux 3.4-RC6 w/ and w/o his patches as Xen dom0 and guest
kernel.
After running two benchmarks in Xen HVM guest, I found his patches
brought about 1%~3% performance gain in 'kernel build' and 'netperf'
testing, though the performance gain was not very stable in 'kernel
build' testing.

Some detailed testing results are below.

Testing Environment:
	Hardware: Romley-EP platform
	Xen version: latest upstream
	Linux kernel: 3.4-RC6
	Guest vCPU number: 8
	NIC: Intel 82599 (10GB bandwidth)

In 'kernel build' testing in guest:
	Command line  |  performance gain
    make -j 4      |    3.81%
    make -j 8      |    0.37%
    make -j 16     |    -0.52%

In 'netperf' testing, we tested TCP_STREAM with default socket size
16384 byte as large packet and 64 byte as small packet.
I used several clients to add networking pressure, then 'netperf' server
automatically generated several threads to response them.
I also used large-size packet and small-size packet in the testing.
	Packet size  |  Thread number | performance gain
	16384 bytes  |      4       |   0.02%
	16384 bytes  |      8       |   2.21%
	16384 bytes  |      16      |   2.04%
	64 bytes     |      4       |   1.07%
	64 bytes     |      8       |   3.31%
	64 bytes     |      16      |   0.71%
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-8-git-send-email-alex.shi@intel.comTested-by: default avatarRen, Yongjie <yongjie.ren@intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 597e1c35
...@@ -4,7 +4,14 @@ ...@@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define tlb_flush(tlb) \
{ \
if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
}
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb(); __flush_tlb();
} }
static inline void flush_tlb_mm_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask, static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long start,
...@@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void) ...@@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void)
#define local_flush_tlb() __flush_tlb() #define local_flush_tlb() __flush_tlb()
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
#define flush_tlb_range(vma, start, end) \
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void); extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_range(struct vm_area_struct *vma, extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long start, unsigned long end); unsigned long end, unsigned long vmflag);
#define flush_tlb() flush_tlb_current_task() #define flush_tlb() flush_tlb_current_task()
......
...@@ -301,23 +301,10 @@ void flush_tlb_current_task(void) ...@@ -301,23 +301,10 @@ void flush_tlb_current_task(void)
preempt_enable(); preempt_enable();
} }
void flush_tlb_mm(struct mm_struct *mm) /*
{ * It can find out the THP large page, or
preempt_disable(); * HUGETLB page in tlb_flush when THP disabled
*/
if (current->active_mm == mm) {
if (current->mm)
local_flush_tlb();
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long has_large_page(struct mm_struct *mm, static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm, ...@@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm,
} }
return 0; return 0;
} }
#else
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
return 0;
}
#endif
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm;
if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) { void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
flush_all: unsigned long end, unsigned long vmflag)
flush_tlb_mm(vma->vm_mm); {
return; unsigned long addr;
} unsigned act_entries, tlb_entries = 0;
preempt_disable(); preempt_disable();
mm = vma->vm_mm; if (current->active_mm != mm)
if (current->active_mm == mm) { goto flush_all;
if (current->mm) {
unsigned long addr, vmflag = vma->vm_flags;
unsigned act_entries, tlb_entries = 0;
if (vmflag & VM_EXEC) if (!current->mm) {
tlb_entries = tlb_lli_4k[ENTRIES]; leave_mm(smp_processor_id());
else goto flush_all;
tlb_entries = tlb_lld_4k[ENTRIES]; }
act_entries = tlb_entries > mm->total_vm ?
mm->total_vm : tlb_entries;
if ((end - start) >> PAGE_SHIFT > if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
act_entries >> tlb_flushall_shift) || vmflag == VM_HUGETLB) {
local_flush_tlb(); local_flush_tlb();
else { goto flush_all;
if (has_large_page(mm, start, end)) { }
preempt_enable();
goto flush_all;
}
for (addr = start; addr < end;
addr += PAGE_SIZE)
__flush_tlb_single(addr);
if (cpumask_any_but(mm_cpumask(mm), /* In modern CPU, last level tlb used for both data/ins */
smp_processor_id()) < nr_cpu_ids) if (vmflag & VM_EXEC)
flush_tlb_others(mm_cpumask(mm), mm, tlb_entries = tlb_lli_4k[ENTRIES];
start, end); else
preempt_enable(); tlb_entries = tlb_lld_4k[ENTRIES];
return; /* Assume all of TLB entries was occupied by this task */
} act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
} else {
leave_mm(smp_processor_id()); /* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
local_flush_tlb();
goto flush_all;
} }
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE)
__flush_tlb_single(addr);
if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
return;
} }
flush_all:
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment