Commit 9dfa6dee authored by Dave Hansen's avatar Dave Hansen Committed by H. Peter Anvin

x86/mm: Fix missed global TLB flush stat

If we take the

	if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
		local_flush_tlb();
		goto out;
	}

path out of flush_tlb_mm_range(), we will have flushed the tlb,
but not incremented NR_TLB_LOCAL_FLUSH_ALL.  This unifies the
way out of the function so that we always take a single path when
doing a full tlb flush.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Link: http://lkml.kernel.org/r/20140731154056.FF763B76@viggo.jf.intel.comAcked-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent e9f4e0a9
...@@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1; ...@@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag) unsigned long end, unsigned long vmflag)
{ {
int need_flush_others_all = 1;
unsigned long addr; unsigned long addr;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable(); preempt_disable();
if (current->active_mm != mm) if (current->active_mm != mm)
...@@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, ...@@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
goto out; goto out;
} }
if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) { if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
local_flush_tlb(); base_pages_to_flush = (end - start) >> PAGE_SHIFT;
goto out;
}
if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
} else { } else {
need_flush_others_all = 0;
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
...@@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, ...@@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
} }
} }
out: out:
if (need_flush_others_all) { if (base_pages_to_flush == TLB_FLUSH_ALL) {
start = 0UL; start = 0UL;
end = TLB_FLUSH_ALL; end = TLB_FLUSH_ALL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment