Commit dfdacf59 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use prepare_to_wait in VM/VFS

This uses the new wakeup machinery in some hot parts of the VFS and
block layers.

wait_on_buffer(), wait_on_page(), lock_page(), blk_congestion_wait().
Also in get_request_wait(), although the benefit for exclusive wakeups
will be lower.
parent 3da08d6c
......@@ -1233,24 +1233,23 @@ static struct request *get_request(request_queue_t *q, int rw)
*/
static struct request *get_request_wait(request_queue_t *q, int rw)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq[rw];
struct request *rq;
spin_lock_prefetch(q->queue_lock);
generic_unplug_device(q);
add_wait_queue_exclusive(&rl->wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
prepare_to_wait_exclusive(&rl->wait, &wait,
TASK_UNINTERRUPTIBLE);
if (!rl->count)
schedule();
finish_wait(&rl->wait, &wait);
spin_lock_irq(q->queue_lock);
rq = get_request(q, rw);
spin_unlock_irq(q->queue_lock);
} while (rq == NULL);
remove_wait_queue(&rl->wait, &wait);
current->state = TASK_RUNNING;
return rq;
}
......@@ -1460,18 +1459,16 @@ void blk_put_request(struct request *req)
*/
void blk_congestion_wait(int rw, long timeout)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
struct congestion_state *cs = &congestion_states[rw];
if (atomic_read(&cs->nr_congested_queues) == 0)
return;
blk_run_queues();
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cs->wqh, &wait);
prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&cs->nr_congested_queues) != 0)
schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&cs->wqh, &wait);
finish_wait(&cs->wqh, &wait);
}
/*
......
......@@ -128,22 +128,18 @@ void unlock_buffer(struct buffer_head *bh)
*/
void __wait_on_buffer(struct buffer_head * bh)
{
wait_queue_head_t *wq = bh_waitq_head(bh);
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_WAIT(wait);
get_bh(bh);
add_wait_queue(wq, &wait);
do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
blk_run_queues();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!buffer_locked(bh))
break;
schedule();
if (buffer_locked(bh))
schedule();
} while (buffer_locked(bh));
tsk->state = TASK_RUNNING;
remove_wait_queue(wq, &wait);
put_bh(bh);
finish_wait(wqh, &wait);
}
static inline void
......
......@@ -74,9 +74,15 @@ static inline void ___add_to_page_cache(struct page *page,
inc_page_state(nr_pagecache);
}
extern void FASTCALL(lock_page(struct page *page));
extern void FASTCALL(__lock_page(struct page *page));
extern void FASTCALL(unlock_page(struct page *page));
static inline void lock_page(struct page *page)
{
if (TestSetPageLocked(page))
__lock_page(page);
}
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
* Never use this directly!
......
......@@ -632,19 +632,15 @@ static inline wait_queue_head_t *page_waitqueue(struct page *page)
void wait_on_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
DEFINE_WAIT(wait);
add_wait_queue(waitqueue, &wait);
do {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!test_bit(bit_nr, &page->flags))
break;
prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
sync_page(page);
schedule();
if (test_bit(bit_nr, &page->flags))
schedule();
} while (test_bit(bit_nr, &page->flags));
__set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(waitqueue, &wait);
finish_wait(waitqueue, &wait);
}
EXPORT_SYMBOL(wait_on_page_bit);
......@@ -690,38 +686,27 @@ void end_page_writeback(struct page *page)
EXPORT_SYMBOL(end_page_writeback);
/*
* Get a lock on the page, assuming we need to sleep
* to get it..
* Get a lock on the page, assuming we need to sleep to get it.
*
* Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
static void __lock_page(struct page *page)
void __lock_page(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT(wait);
add_wait_queue_exclusive(waitqueue, &wait);
for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (PageLocked(page)) {
sync_page(page);
while (TestSetPageLocked(page)) {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
sync_page(page);
if (PageLocked(page))
schedule();
}
if (!TestSetPageLocked(page))
break;
}
__set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(waitqueue, &wait);
}
/*
* Get an exclusive lock on the page, optimistically
* assuming it's not locked..
*/
void lock_page(struct page *page)
{
if (TestSetPageLocked(page))
__lock_page(page);
finish_wait(wqh, &wait);
}
EXPORT_SYMBOL(__lock_page);
/*
* a rather lightweight function, finding and getting a reference to a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment