Commit 32fcfd40 authored by Al Viro's avatar Al Viro

make vfree() safe to call from interrupt contexts

A bunch of RCU callbacks want to be able to do vfree() and end up with
rather kludgy schemes.  Just let vfree() do the right thing - put the
victim on llist and schedule actual __vunmap() via schedule_work(), so
that it runs from non-interrupt context.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6dbe51c2
...@@ -27,10 +27,30 @@ ...@@ -27,10 +27,30 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/llist.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
struct vfree_deferred {
struct llist_head list;
struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
static void __vunmap(const void *, int);
static void free_work(struct work_struct *w)
{
struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
struct llist_node *llnode = llist_del_all(&p->list);
while (llnode) {
void *p = llnode;
llnode = llist_next(llnode);
__vunmap(p, 1);
}
}
/*** Page table manipulation functions ***/ /*** Page table manipulation functions ***/
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
...@@ -1184,10 +1204,14 @@ void __init vmalloc_init(void) ...@@ -1184,10 +1204,14 @@ void __init vmalloc_init(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct vmap_block_queue *vbq; struct vmap_block_queue *vbq;
struct vfree_deferred *p;
vbq = &per_cpu(vmap_block_queue, i); vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock); spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free); INIT_LIST_HEAD(&vbq->free);
p = &per_cpu(vfree_deferred, i);
init_llist_head(&p->list);
INIT_WORK(&p->wq, free_work);
} }
/* Import existing vmlist entries. */ /* Import existing vmlist entries. */
...@@ -1511,7 +1535,7 @@ static void __vunmap(const void *addr, int deallocate_pages) ...@@ -1511,7 +1535,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
kfree(area); kfree(area);
return; return;
} }
/** /**
* vfree - release memory allocated by vmalloc() * vfree - release memory allocated by vmalloc()
* @addr: memory base address * @addr: memory base address
...@@ -1520,15 +1544,25 @@ static void __vunmap(const void *addr, int deallocate_pages) ...@@ -1520,15 +1544,25 @@ static void __vunmap(const void *addr, int deallocate_pages)
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed. * NULL, no operation is performed.
* *
* Must not be called in interrupt context. * Must not be called in NMI context (strictly speaking, only if we don't
* have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
* conventions for vfree() arch-depenedent would be a really bad idea)
*
*/ */
void vfree(const void *addr) void vfree(const void *addr)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_nmi());
kmemleak_free(addr); kmemleak_free(addr);
__vunmap(addr, 1); if (!addr)
return;
if (unlikely(in_interrupt())) {
struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
llist_add((struct llist_node *)addr, &p->list);
schedule_work(&p->wq);
} else
__vunmap(addr, 1);
} }
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
...@@ -1545,7 +1579,8 @@ void vunmap(const void *addr) ...@@ -1545,7 +1579,8 @@ void vunmap(const void *addr)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
might_sleep(); might_sleep();
__vunmap(addr, 0); if (addr)
__vunmap(addr, 0);
} }
EXPORT_SYMBOL(vunmap); EXPORT_SYMBOL(vunmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment