Commit effee4b9 authored by Alex Shi's avatar Alex Shi Committed by H. Peter Anvin

x86/tlb: do flush_tlb_kernel_range by 'invlpg'

This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
and gain was analyzed in previous patch
(x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range).

In the testing: http://lkml.org/lkml/2012/6/21/10

The pay is mostly covered by long kernel path, but the gain is still
quite clear, memory access in user APP can increase 30+% when kernel
execute this funtion.
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.comSigned-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 52aec330
...@@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void) ...@@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void)
{ {
} }
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* SMP */ #else /* SMP */
#include <asm/smp.h> #include <asm/smp.h>
...@@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void); ...@@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag); unsigned long end, unsigned long vmflag);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task() #define flush_tlb() flush_tlb_current_task()
...@@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void) ...@@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void)
native_flush_tlb_others(mask, mm, start, end) native_flush_tlb_others(mask, mm, start, end)
#endif #endif
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#endif /* _ASM_X86_TLBFLUSH_H */ #endif /* _ASM_X86_TLBFLUSH_H */
...@@ -264,6 +264,36 @@ void flush_tlb_all(void) ...@@ -264,6 +264,36 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1); on_each_cpu(do_flush_tlb_all, NULL, 1);
} }
static void do_kernel_range_flush(void *info)
{
struct flush_tlb_info *f = info;
unsigned long addr;
/* flush range by one by one 'invlpg' */
for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
__flush_tlb_single(addr);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned act_entries;
struct flush_tlb_info info;
/* In modern CPU, last level tlb used for both data/ins */
act_entries = tlb_lld_4k[ENTRIES];
/* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
(end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
on_each_cpu(do_flush_tlb_all, NULL, 1);
else {
info.flush_start = start;
info.flush_end = end;
on_each_cpu(do_kernel_range_flush, &info, 1);
}
}
#ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_DEBUG_TLBFLUSH
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment