Commit dfdacf59 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use prepare_to_wait in VM/VFS

This uses the new wakeup machinery in some hot parts of the VFS and
block layers.

wait_on_buffer(), wait_on_page(), lock_page(), blk_congestion_wait().
Also in get_request_wait(), although the benefit for exclusive wakeups
will be lower.
parent 3da08d6c
...@@ -1233,24 +1233,23 @@ static struct request *get_request(request_queue_t *q, int rw) ...@@ -1233,24 +1233,23 @@ static struct request *get_request(request_queue_t *q, int rw)
*/ */
static struct request *get_request_wait(request_queue_t *q, int rw) static struct request *get_request_wait(request_queue_t *q, int rw)
{ {
DECLARE_WAITQUEUE(wait, current); DEFINE_WAIT(wait);
struct request_list *rl = &q->rq[rw]; struct request_list *rl = &q->rq[rw];
struct request *rq; struct request *rq;
spin_lock_prefetch(q->queue_lock); spin_lock_prefetch(q->queue_lock);
generic_unplug_device(q); generic_unplug_device(q);
add_wait_queue_exclusive(&rl->wait, &wait);
do { do {
set_current_state(TASK_UNINTERRUPTIBLE); prepare_to_wait_exclusive(&rl->wait, &wait,
TASK_UNINTERRUPTIBLE);
if (!rl->count) if (!rl->count)
schedule(); schedule();
finish_wait(&rl->wait, &wait);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, rw); rq = get_request(q, rw);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} while (rq == NULL); } while (rq == NULL);
remove_wait_queue(&rl->wait, &wait);
current->state = TASK_RUNNING;
return rq; return rq;
} }
...@@ -1460,18 +1459,16 @@ void blk_put_request(struct request *req) ...@@ -1460,18 +1459,16 @@ void blk_put_request(struct request *req)
*/ */
void blk_congestion_wait(int rw, long timeout) void blk_congestion_wait(int rw, long timeout)
{ {
DECLARE_WAITQUEUE(wait, current); DEFINE_WAIT(wait);
struct congestion_state *cs = &congestion_states[rw]; struct congestion_state *cs = &congestion_states[rw];
if (atomic_read(&cs->nr_congested_queues) == 0) if (atomic_read(&cs->nr_congested_queues) == 0)
return; return;
blk_run_queues(); blk_run_queues();
set_current_state(TASK_UNINTERRUPTIBLE); prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE);
add_wait_queue(&cs->wqh, &wait);
if (atomic_read(&cs->nr_congested_queues) != 0) if (atomic_read(&cs->nr_congested_queues) != 0)
schedule_timeout(timeout); schedule_timeout(timeout);
set_current_state(TASK_RUNNING); finish_wait(&cs->wqh, &wait);
remove_wait_queue(&cs->wqh, &wait);
} }
/* /*
......
...@@ -128,22 +128,18 @@ void unlock_buffer(struct buffer_head *bh) ...@@ -128,22 +128,18 @@ void unlock_buffer(struct buffer_head *bh)
*/ */
void __wait_on_buffer(struct buffer_head * bh) void __wait_on_buffer(struct buffer_head * bh)
{ {
wait_queue_head_t *wq = bh_waitq_head(bh); wait_queue_head_t *wqh = bh_waitq_head(bh);
struct task_struct *tsk = current; DEFINE_WAIT(wait);
DECLARE_WAITQUEUE(wait, tsk);
get_bh(bh); get_bh(bh);
add_wait_queue(wq, &wait);
do { do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
blk_run_queues(); blk_run_queues();
set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (buffer_locked(bh))
if (!buffer_locked(bh)) schedule();
break;
schedule();
} while (buffer_locked(bh)); } while (buffer_locked(bh));
tsk->state = TASK_RUNNING;
remove_wait_queue(wq, &wait);
put_bh(bh); put_bh(bh);
finish_wait(wqh, &wait);
} }
static inline void static inline void
......
...@@ -74,9 +74,15 @@ static inline void ___add_to_page_cache(struct page *page, ...@@ -74,9 +74,15 @@ static inline void ___add_to_page_cache(struct page *page,
inc_page_state(nr_pagecache); inc_page_state(nr_pagecache);
} }
extern void FASTCALL(lock_page(struct page *page)); extern void FASTCALL(__lock_page(struct page *page));
extern void FASTCALL(unlock_page(struct page *page)); extern void FASTCALL(unlock_page(struct page *page));
static inline void lock_page(struct page *page)
{
if (TestSetPageLocked(page))
__lock_page(page);
}
/* /*
* This is exported only for wait_on_page_locked/wait_on_page_writeback. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
* Never use this directly! * Never use this directly!
......
...@@ -632,19 +632,15 @@ static inline wait_queue_head_t *page_waitqueue(struct page *page) ...@@ -632,19 +632,15 @@ static inline wait_queue_head_t *page_waitqueue(struct page *page)
void wait_on_page_bit(struct page *page, int bit_nr) void wait_on_page_bit(struct page *page, int bit_nr)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *waitqueue = page_waitqueue(page);
struct task_struct *tsk = current; DEFINE_WAIT(wait);
DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue(waitqueue, &wait);
do { do {
set_task_state(tsk, TASK_UNINTERRUPTIBLE); prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(bit_nr, &page->flags))
break;
sync_page(page); sync_page(page);
schedule(); if (test_bit(bit_nr, &page->flags))
schedule();
} while (test_bit(bit_nr, &page->flags)); } while (test_bit(bit_nr, &page->flags));
__set_task_state(tsk, TASK_RUNNING); finish_wait(waitqueue, &wait);
remove_wait_queue(waitqueue, &wait);
} }
EXPORT_SYMBOL(wait_on_page_bit); EXPORT_SYMBOL(wait_on_page_bit);
...@@ -690,38 +686,27 @@ void end_page_writeback(struct page *page) ...@@ -690,38 +686,27 @@ void end_page_writeback(struct page *page)
EXPORT_SYMBOL(end_page_writeback); EXPORT_SYMBOL(end_page_writeback);
/* /*
* Get a lock on the page, assuming we need to sleep * Get a lock on the page, assuming we need to sleep to get it.
* to get it.. *
* Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/ */
static void __lock_page(struct page *page) void __lock_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *wqh = page_waitqueue(page);
struct task_struct *tsk = current; DEFINE_WAIT(wait);
DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue_exclusive(waitqueue, &wait); while (TestSetPageLocked(page)) {
for (;;) { prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
set_task_state(tsk, TASK_UNINTERRUPTIBLE); sync_page(page);
if (PageLocked(page)) { if (PageLocked(page))
sync_page(page);
schedule(); schedule();
}
if (!TestSetPageLocked(page))
break;
} }
__set_task_state(tsk, TASK_RUNNING); finish_wait(wqh, &wait);
remove_wait_queue(waitqueue, &wait);
}
/*
* Get an exclusive lock on the page, optimistically
* assuming it's not locked..
*/
void lock_page(struct page *page)
{
if (TestSetPageLocked(page))
__lock_page(page);
} }
EXPORT_SYMBOL(__lock_page);
/* /*
* a rather lightweight function, finding and getting a reference to a * a rather lightweight function, finding and getting a reference to a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment