Commit 0574ecd1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: refactor __purge_vmap_area_lazy()

Move the purge_lock synchronization to the callers, move the call to
purge_fragmented_blocks_allcpus at the beginning of the function to the
callers that need it, move the force_flush behavior to the caller that
needs it, and pass start and end by value instead of by reference.

No change in behavior.

Link: http://lkml.kernel.org/r/1479474236-4139-4-git-send-email-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarJisheng Zhang <jszhang@marvell.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Dias <joaodias@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9c3acf60
...@@ -601,6 +601,13 @@ static unsigned long lazy_max_pages(void) ...@@ -601,6 +601,13 @@ static unsigned long lazy_max_pages(void)
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
/*
* Serialize vmap purging. There is no actual criticial section protected
* by this look, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
static DEFINE_SPINLOCK(vmap_purge_lock);
/* for per-CPU blocks */ /* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void); static void purge_fragmented_blocks_allcpus(void);
...@@ -615,59 +622,36 @@ void set_iounmap_nonlazy(void) ...@@ -615,59 +622,36 @@ void set_iounmap_nonlazy(void)
/* /*
* Purges all lazily-freed vmap areas. * Purges all lazily-freed vmap areas.
*
* If sync is 0 then don't purge if there is already a purge in progress.
* If force_flush is 1, then flush kernel TLBs between *start and *end even
* if we found no lazy vmap areas to unmap (callers can use this to optimise
* their own TLB flushing).
* Returns with *start = min(*start, lowest purged address)
* *end = max(*end, highest purged address)
*/ */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
int sync, int force_flush)
{ {
static DEFINE_SPINLOCK(purge_lock);
struct llist_node *valist; struct llist_node *valist;
struct vmap_area *va; struct vmap_area *va;
struct vmap_area *n_va; struct vmap_area *n_va;
int nr = 0; int nr = 0;
/* lockdep_assert_held(&vmap_purge_lock);
* If sync is 0 but force_flush is 1, we'll go sync anyway but callers
* should not expect such behaviour. This just simplifies locking for
* the case that isn't actually used at the moment anyway.
*/
if (!sync && !force_flush) {
if (!spin_trylock(&purge_lock))
return;
} else
spin_lock(&purge_lock);
if (sync)
purge_fragmented_blocks_allcpus();
valist = llist_del_all(&vmap_purge_list); valist = llist_del_all(&vmap_purge_list);
llist_for_each_entry(va, valist, purge_list) { llist_for_each_entry(va, valist, purge_list) {
if (va->va_start < *start) if (va->va_start < start)
*start = va->va_start; start = va->va_start;
if (va->va_end > *end) if (va->va_end > end)
*end = va->va_end; end = va->va_end;
nr += (va->va_end - va->va_start) >> PAGE_SHIFT; nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
} }
if (nr) if (!nr)
atomic_sub(nr, &vmap_lazy_nr); return false;
if (nr || force_flush) atomic_sub(nr, &vmap_lazy_nr);
flush_tlb_kernel_range(*start, *end); flush_tlb_kernel_range(start, end);
if (nr) {
spin_lock(&vmap_area_lock); spin_lock(&vmap_area_lock);
llist_for_each_entry_safe(va, n_va, valist, purge_list) llist_for_each_entry_safe(va, n_va, valist, purge_list)
__free_vmap_area(va); __free_vmap_area(va);
spin_unlock(&vmap_area_lock); spin_unlock(&vmap_area_lock);
} return true;
spin_unlock(&purge_lock);
} }
/* /*
...@@ -676,9 +660,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, ...@@ -676,9 +660,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
*/ */
static void try_purge_vmap_area_lazy(void) static void try_purge_vmap_area_lazy(void)
{ {
unsigned long start = ULONG_MAX, end = 0; if (spin_trylock(&vmap_purge_lock)) {
__purge_vmap_area_lazy(ULONG_MAX, 0);
__purge_vmap_area_lazy(&start, &end, 0, 0); spin_unlock(&vmap_purge_lock);
}
} }
/* /*
...@@ -686,9 +671,10 @@ static void try_purge_vmap_area_lazy(void) ...@@ -686,9 +671,10 @@ static void try_purge_vmap_area_lazy(void)
*/ */
static void purge_vmap_area_lazy(void) static void purge_vmap_area_lazy(void)
{ {
unsigned long start = ULONG_MAX, end = 0; spin_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
__purge_vmap_area_lazy(&start, &end, 1, 0); __purge_vmap_area_lazy(ULONG_MAX, 0);
spin_unlock(&vmap_purge_lock);
} }
/* /*
...@@ -1075,7 +1061,11 @@ void vm_unmap_aliases(void) ...@@ -1075,7 +1061,11 @@ void vm_unmap_aliases(void)
rcu_read_unlock(); rcu_read_unlock();
} }
__purge_vmap_area_lazy(&start, &end, 1, flush); spin_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
if (!__purge_vmap_area_lazy(start, end) && flush)
flush_tlb_kernel_range(start, end);
spin_unlock(&vmap_purge_lock);
} }
EXPORT_SYMBOL_GPL(vm_unmap_aliases); EXPORT_SYMBOL_GPL(vm_unmap_aliases);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment