Commit 600fe975 authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

ipc_schedule_free() can do vfree() directly now

Commit 32fcfd40 ("make vfree() safe to call from interrupt
contexts") made it safe to do vfree directly from the RCU callback,
which allows us to simplify ipc/util.c a lot by getting rid of the
differences between vmalloc/kmalloc memory.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4ada8db3
...@@ -466,51 +466,13 @@ void ipc_free(void* ptr, int size) ...@@ -466,51 +466,13 @@ void ipc_free(void* ptr, int size)
kfree(ptr); kfree(ptr);
} }
/* struct ipc_rcu {
* rcu allocations:
* There are three headers that are prepended to the actual allocation:
* - during use: ipc_rcu_hdr.
* - during the rcu grace period: ipc_rcu_grace.
* - [only if vmalloc]: ipc_rcu_sched.
* Their lifetime doesn't overlap, thus the headers share the same memory.
* Unlike a normal union, they are right-aligned, thus some container_of
* forward/backward casting is necessary:
*/
struct ipc_rcu_hdr
{
atomic_t refcount;
int is_vmalloc;
void *data[0];
};
struct ipc_rcu_grace
{
struct rcu_head rcu; struct rcu_head rcu;
atomic_t refcount;
/* "void *" makes sure alignment of following data is sane. */ /* "void *" makes sure alignment of following data is sane. */
void *data[0]; void *data[0];
}; };
struct ipc_rcu_sched
{
struct work_struct work;
/* "void *" makes sure alignment of following data is sane. */
void *data[0];
};
#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
static inline int rcu_use_vmalloc(int size)
{
/* Too big for a single page? */
if (HDRLEN_KMALLOC + size > PAGE_SIZE)
return 1;
return 0;
}
/** /**
* ipc_rcu_alloc - allocate ipc and rcu space * ipc_rcu_alloc - allocate ipc and rcu space
* @size: size desired * @size: size desired
...@@ -520,74 +482,41 @@ static inline int rcu_use_vmalloc(int size) ...@@ -520,74 +482,41 @@ static inline int rcu_use_vmalloc(int size)
*/ */
void *ipc_rcu_alloc(int size) void *ipc_rcu_alloc(int size)
{ {
void *out;
/* /*
* We prepend the allocation with the rcu struct, and * We prepend the allocation with the rcu struct
* workqueue if necessary (for vmalloc).
*/ */
if (rcu_use_vmalloc(size)) { struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size);
out = vmalloc(HDRLEN_VMALLOC + size); if (unlikely(!out))
if (!out) return NULL;
goto done; atomic_set(&out->refcount, 1);
return out->data;
out += HDRLEN_VMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
if (!out)
goto done;
out += HDRLEN_KMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
/* set reference counter no matter what kind of allocation was done */
atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
done:
return out;
} }
int ipc_rcu_getref(void *ptr) int ipc_rcu_getref(void *ptr)
{ {
return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount); return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
{
vfree(container_of(work, struct ipc_rcu_sched, work));
} }
/** /**
* ipc_schedule_free - free ipc + rcu space * ipc_schedule_free - free ipc + rcu space
* @head: RCU callback structure for queued work * @head: RCU callback structure for queued work
*
* Since RCU callback function is called in bh,
* we need to defer the vfree to schedule_work().
*/ */
static void ipc_schedule_free(struct rcu_head *head) static void ipc_schedule_free(struct rcu_head *head)
{ {
struct ipc_rcu_grace *grace; vfree(container_of(head, struct ipc_rcu, rcu));
struct ipc_rcu_sched *sched;
grace = container_of(head, struct ipc_rcu_grace, rcu);
sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
data[0]);
INIT_WORK(&sched->work, ipc_do_vfree);
schedule_work(&sched->work);
} }
void ipc_rcu_putref(void *ptr) void ipc_rcu_putref(void *ptr)
{ {
if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount)) struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data);
if (!atomic_dec_and_test(&p->refcount))
return; return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { if (is_vmalloc_addr(ptr)) {
call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, call_rcu(&p->rcu, ipc_schedule_free);
ipc_schedule_free);
} else { } else {
kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu); kfree_rcu(p, rcu);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment