Commit 72785ef7 authored by Bob Miller's avatar Bob Miller Committed by Linus Torvalds

[PATCH] 2.5.25 remove global semaphore_lock spin lock.

Replace the global semaphore_lock with the spinlock embedded in
the wait_queue_head_t.  None of the data protected by semaphore_lock
is global and there is no need to restrict the system to only allow
one semaphore to be dealt with at a time.

This removes 2 lock round trips from __down() and __down_interruptible().
It also reduces the number of cache lines touched by 1 (the cache line
with seamphore_lock).
parent 5150c802
......@@ -28,8 +28,8 @@
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is
* protected by the semaphore spinlock.
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
......@@ -53,39 +53,41 @@ void __up(struct semaphore *sem)
wake_up(&sem->wait);
}
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
......@@ -93,11 +95,13 @@ int __down_interruptible(struct semaphore * sem)
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
......@@ -117,25 +121,27 @@ int __down_interruptible(struct semaphore * sem)
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
* playing, because we own the spinlock in
* wait_queue_head. The "-1" is because we're
* still hoping to get the semaphore.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
......@@ -152,18 +158,20 @@ int __down_trylock(struct semaphore * sem)
int sleepers;
unsigned long flags;
spin_lock_irqsave(&semaphore_lock, flags);
spin_lock_irqsave(&sem->wait.lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
if (!atomic_add_negative(sleepers, &sem->count)) {
wake_up_locked(&sem->wait);
}
spin_unlock_irqrestore(&semaphore_lock, flags);
spin_unlock_irqrestore(&sem->wait.lock, flags);
return 1;
}
......
......@@ -487,6 +487,7 @@ extern unsigned long prof_len;
extern unsigned long prof_shift;
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
......@@ -504,6 +505,7 @@ extern void FASTCALL(sched_exit(task_t * p));
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
#ifdef CONFIG_SMP
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#else
......@@ -696,6 +698,25 @@ do { \
remove_wait_queue(&wq, &__wait); \
} while (0)
/*
* Must be called with the spinlock in the wait_queue_head_t held.
*/
static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
wait_queue_t * wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(q, wait);
}
/*
* Must be called with the spinlock in the wait_queue_head_t held.
*/
static inline void remove_wait_queue_locked(wait_queue_head_t *q,
wait_queue_t * wait)
{
__remove_wait_queue(q, wait);
}
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
......
......@@ -928,7 +928,7 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{
struct list_head *tmp, *next;
......@@ -956,6 +956,14 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
spin_unlock_irqrestore(&q->lock, flags);
}
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0);
}
#if CONFIG_SMP
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment