Commit 6062a8dc authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

ipc,sem: fine grained locking for semtimedop

Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.

If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.

If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.

On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:

	vanilla		Davidlohr's	Davidlohr's +	Davidlohr's +
threads			patches		rwlock patches	v3 patches
10	610652		726325		1783589		2142206
20	341570		365699		1520453		1977878
30	288102		307037		1498167		2037995
40	290714		305955		1612665		2256484
50	288620		312890		1733453		2650292
60	289987		306043		1649360		2388008
70	291298		306347		1723167		2717486
80	290948		305662		1729545		2763582
90	290996		306680		1736021		2757524
100	292243		306700		1773700		3059159

[davidlohr.bueso@hp.com: do not call sem_lock when bogus sma]
[davidlohr.bueso@hp.com: make refcounter atomic]
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarDavidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Cc: Jason Low <jason.low2@hp.com>
Reviewed-by: default avatarMichel Lespinasse <walken@google.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
Tested-by: default avatarEmmanuel Benisty <benisty.e@gmail.com>
Tested-by: default avatarSedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9f1bc2c9
......@@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
ipc_rcu_getref(msq);
if (!ipc_rcu_getref(msq)) {
err = -EIDRM;
goto out_unlock_free;
}
msg_unlock(msq);
schedule();
......
This diff is collapsed.
......@@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
void* ipc_alloc(int size)
void *ipc_alloc(int size)
{
void* out;
void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
......@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
int refcount;
atomic_t refcount;
int is_vmalloc;
void *data[0];
};
......@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
* Returns the pointer to the object.
* NULL is returned if the allocation fails.
* Returns the pointer to the object or NULL upon failure.
*/
void* ipc_rcu_alloc(int size)
void *ipc_rcu_alloc(int size)
{
void* out;
void *out;
/*
* We prepend the allocation with the rcu struct, and
* workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
if (out) {
if (!out)
goto done;
out += HDRLEN_VMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
}
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
if (out) {
if (!out)
goto done;
out += HDRLEN_KMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
}
}
/* set reference counter no matter what kind of allocation was done */
atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
done:
return out;
}
void ipc_rcu_getref(void *ptr)
int ipc_rcu_getref(void *ptr)
{
container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
......@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
......
......@@ -119,7 +119,7 @@ void ipc_free(void* ptr, int size);
* to 0 schedules the rcu destruction. Caller must guarantee locking.
*/
void* ipc_rcu_alloc(int size);
void ipc_rcu_getref(void *ptr);
int ipc_rcu_getref(void *ptr);
void ipc_rcu_putref(void *ptr);
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment