Commit bf22e37a authored by Andrey Ryabinin's avatar Andrey Ryabinin Committed by Linus Torvalds

mm: add vfree_atomic()

We are going to use sleeping lock for freeing vmap.  However some
vfree() users want to free memory from atomic (but not from interrupt)
context.  For this we add vfree_atomic() - deferred variation of vfree()
which can be used in any atomic context (except NMIs).

[akpm@linux-foundation.org: tweak comment grammar]
[aryabinin@virtuozzo.com: use raw_cpu_ptr() instead of this_cpu_ptr()]
  Link: http://lkml.kernel.org/r/1481553981-3856-1-git-send-email-aryabinin@virtuozzo.com
Link: http://lkml.kernel.org/r/1479474236-4139-5-git-send-email-hch@lst.deSigned-off-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Jisheng Zhang <jszhang@marvell.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Dias <joaodias@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0574ecd1
...@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller); const void *caller);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count, extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot); unsigned long flags, pgprot_t prot);
......
...@@ -1487,6 +1487,38 @@ static void __vunmap(const void *addr, int deallocate_pages) ...@@ -1487,6 +1487,38 @@ static void __vunmap(const void *addr, int deallocate_pages)
return; return;
} }
static inline void __vfree_deferred(const void *addr)
{
/*
* Use raw_cpu_ptr() because this can be called from preemptible
* context. Preemption is absolutely fine here, because the llist_add()
* implementation is lockless, so it works even if we are adding to
* nother cpu's list. schedule_work() should be fine with this too.
*/
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
if (llist_add((struct llist_node *)addr, &p->list))
schedule_work(&p->wq);
}
/**
* vfree_atomic - release memory allocated by vmalloc()
* @addr: memory base address
*
* This one is just like vfree() but can be called in any atomic context
* except NMIs.
*/
void vfree_atomic(const void *addr)
{
BUG_ON(in_nmi());
kmemleak_free(addr);
if (!addr)
return;
__vfree_deferred(addr);
}
/** /**
* vfree - release memory allocated by vmalloc() * vfree - release memory allocated by vmalloc()
* @addr: memory base address * @addr: memory base address
...@@ -1509,11 +1541,9 @@ void vfree(const void *addr) ...@@ -1509,11 +1541,9 @@ void vfree(const void *addr)
if (!addr) if (!addr)
return; return;
if (unlikely(in_interrupt())) { if (unlikely(in_interrupt()))
struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); __vfree_deferred(addr);
if (llist_add((struct llist_node *)addr, &p->list)) else
schedule_work(&p->wq);
} else
__vunmap(addr, 1); __vunmap(addr, 1);
} }
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment