Commit fa0aafb8 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

asm-generic/tlb: Remove tlb_flush_mmu_free()

As the comment notes; it is a potentially dangerous operation. Just
use tlb_flush_mmu(), that will skip the (double) TLB invalidate if
it really isn't needed anyway.

No change in behavior intended.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b3fa8ed4
...@@ -67,16 +67,13 @@ ...@@ -67,16 +67,13 @@
* call before __tlb_remove_page*() to set the current page-size; implies a * call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call. * possible tlb_flush_mmu() call.
* *
* - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() / tlb_flush_mmu_free() * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
* *
* tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
* related state, like the range) * related state, like the range)
* *
* tlb_flush_mmu_free() - frees the queued pages; make absolutely * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
* sure no additional tlb_remove_page() * whatever pages are still batched.
* calls happen between _tlbonly() and this.
*
* tlb_flush_mmu() - the above two calls.
* *
* - mmu_gather::fullmm * - mmu_gather::fullmm
* *
...@@ -281,7 +278,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, ...@@ -281,7 +278,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb, void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force); unsigned long start, unsigned long end, bool force);
void tlb_flush_mmu_free(struct mmu_gather *tlb);
static inline void __tlb_adjust_range(struct mmu_gather *tlb, static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address, unsigned long address,
......
...@@ -1155,7 +1155,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1155,7 +1155,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/ */
if (force_flush) { if (force_flush) {
force_flush = 0; force_flush = 0;
tlb_flush_mmu_free(tlb); tlb_flush_mmu(tlb);
if (addr != end) if (addr != end)
goto again; goto again;
} }
......
...@@ -91,7 +91,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -91,7 +91,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
#endif /* HAVE_MMU_GATHER_NO_GATHER */ #endif /* HAVE_MMU_GATHER_NO_GATHER */
void tlb_flush_mmu_free(struct mmu_gather *tlb) static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{ {
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb); tlb_table_flush(tlb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment