Commit d950c947 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: defer flush of writable TLB entries

If a PTE is unmapped and it's dirty then it was writable recently.  Due to
deferred TLB flushing, it's best to assume a writable TLB cache entry
exists.  With that assumption, the TLB must be flushed before any IO can
start or the page is freed to avoid lost writes or data corruption.  This
patch defers flushing of potentially writable TLBs as long as possible.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 72b252ae
...@@ -1354,6 +1354,13 @@ struct tlbflush_unmap_batch { ...@@ -1354,6 +1354,13 @@ struct tlbflush_unmap_batch {
/* True if any bit in cpumask is set */ /* True if any bit in cpumask is set */
bool flush_required; bool flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
* flushed before IO is initiated or a stale TLB entry potentially
* allows an update without redirtying the page.
*/
bool writable;
}; };
struct task_struct { struct task_struct {
......
...@@ -431,10 +431,14 @@ struct tlbflush_unmap_batch; ...@@ -431,10 +431,14 @@ struct tlbflush_unmap_batch;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void); void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
#else #else
static inline void try_to_unmap_flush(void) static inline void try_to_unmap_flush(void)
{ {
} }
static inline void try_to_unmap_flush_dirty(void)
{
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -626,16 +626,34 @@ void try_to_unmap_flush(void) ...@@ -626,16 +626,34 @@ void try_to_unmap_flush(void)
} }
cpumask_clear(&tlb_ubc->cpumask); cpumask_clear(&tlb_ubc->cpumask);
tlb_ubc->flush_required = false; tlb_ubc->flush_required = false;
tlb_ubc->writable = false;
put_cpu(); put_cpu();
} }
/* Flush iff there are potentially writable TLB entries that can race with IO */
void try_to_unmap_flush_dirty(void)
{
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
if (tlb_ubc->writable)
try_to_unmap_flush();
}
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
struct page *page) struct page *page, bool writable)
{ {
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
tlb_ubc->flush_required = true; tlb_ubc->flush_required = true;
/*
* If the PTE was dirty then it's best to assume it's writable. The
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
* before the page is queued for IO.
*/
if (writable)
tlb_ubc->writable = true;
} }
/* /*
...@@ -658,7 +676,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) ...@@ -658,7 +676,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
} }
#else #else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
struct page *page) struct page *page, bool writable)
{ {
} }
...@@ -1315,11 +1333,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1315,11 +1333,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/ */
pteval = ptep_get_and_clear(mm, address, pte); pteval = ptep_get_and_clear(mm, address, pte);
/* Potentially writable TLBs must be flushed before IO */ set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
if (pte_dirty(pteval))
flush_tlb_page(vma, address);
else
set_tlb_ubc_flush_pending(mm, page);
} else { } else {
pteval = ptep_clear_flush(vma, address, pte); pteval = ptep_clear_flush(vma, address, pte);
} }
......
...@@ -1098,7 +1098,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1098,7 +1098,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!sc->may_writepage) if (!sc->may_writepage)
goto keep_locked; goto keep_locked;
/* Page is dirty, try to write it out here */ /*
* Page is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after IO
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
switch (pageout(page, mapping, sc)) { switch (pageout(page, mapping, sc)) {
case PAGE_KEEP: case PAGE_KEEP:
goto keep_locked; goto keep_locked;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment