Commit 196aa013 authored by Manfred Spraul's avatar Manfred Spraul Committed by Linus Torvalds

ipc/util.c, ipc_rcu_alloc: cacheline align allocation

Enforce that ipc_rcu_alloc returns a cacheline aligned pointer on SMP.

Rationale:

The SysV sem code tries to move the main spinlock into a seperate
cacheline (____cacheline_aligned_in_smp).  This works only if
ipc_rcu_alloc returns cacheline aligned pointers.  vmalloc and kmalloc
return cacheline algined pointers, the implementation of ipc_rcu_alloc
breaks that.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9ad66ae6
......@@ -468,9 +468,7 @@ void ipc_free(void* ptr, int size)
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
/* "void *" makes sure alignment of following data is sane. */
void *data[0];
};
} ____cacheline_aligned_in_smp;
/**
* ipc_rcu_alloc - allocate ipc and rcu space
......@@ -488,12 +486,14 @@ void *ipc_rcu_alloc(int size)
if (unlikely(!out))
return NULL;
atomic_set(&out->refcount, 1);
return out->data;
return out + 1;
}
int ipc_rcu_getref(void *ptr)
{
return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount);
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
return atomic_inc_not_zero(&p->refcount);
}
/**
......@@ -507,7 +507,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data);
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
if (!atomic_dec_and_test(&p->refcount))
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment