Commit 597e1c35 authored by Alex Shi's avatar Alex Shi Committed by H. Peter Anvin

mm/mmu_gather: enable tlb flush range in generic mmu_gather

This patch enabled the tlb flush range support in generic mmu layer.

Most of arch has self tlb flush range support, like ARM/IA64 etc.
X86 arch has no this support in hardware yet. But another instruction
'invlpg' can implement this function in some degree. So, enable this
feather in generic layer for x86 now. and maybe useful for other archs
in further.

Generic mmu_gather struct is protected by micro
HAVE_GENERIC_MMU_GATHER. Other archs that has flush range supported
own self mmu_gather struct. So, now this change is safe for them.

In future we may unify this struct and related functions on multiple
archs.

Thanks for Peter Zijlstra time and time reminder for multiple
architecture code safe!
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-7-git-send-email-alex.shi@intel.comSigned-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 3df3212f
...@@ -86,6 +86,8 @@ struct mmu_gather { ...@@ -86,6 +86,8 @@ struct mmu_gather {
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch; struct mmu_table_batch *batch;
#endif #endif
unsigned long start;
unsigned long end;
unsigned int need_flush : 1, /* Did free PTEs */ unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */ fast_mode : 1; /* No batching */
......
...@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) ...@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
tlb->mm = mm; tlb->mm = mm;
tlb->fullmm = fullmm; tlb->fullmm = fullmm;
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0; tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1); tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL; tlb->local.next = NULL;
...@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e ...@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{ {
struct mmu_gather_batch *batch, *next; struct mmu_gather_batch *batch, *next;
tlb->start = start;
tlb->end = end;
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */ /* keep the page table cache within bounds */
...@@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/ */
if (force_flush) { if (force_flush) {
force_flush = 0; force_flush = 0;
#ifdef HAVE_GENERIC_MMU_GATHER
tlb->start = addr;
tlb->end = end;
#endif
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
if (addr != end) if (addr != end)
goto again; goto again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment