Commit 08aaf1cc authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] filtered wakeups: apply to pagecache functions

From: William Lee Irwin III <wli@holomorphy.com>

This patch implements wake-one semantics for page wakeups in a single step.
Discrimination between distinct pages is achieved by passing the page to the
wakeup function, which compares it to a pointer in its own on-stack structure
containing the waitqueue element and the page.  Bit discrimination is achieved
by storing the bit number in that same structure and testing the bit in the
wakeup function.  Wake-one semantics are achieved by using WQ_FLAG_EXCLUSIVE
in the codepaths waiting to acquire the bit for mutual exclusion.
parent 2afafa3b
...@@ -291,6 +291,40 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, ...@@ -291,6 +291,40 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
* at a cost of "thundering herd" phenomena during rare hash * at a cost of "thundering herd" phenomena during rare hash
* collisions. * collisions.
*/ */
struct page_wait_queue {
struct page *page;
int bit;
wait_queue_t wait;
};
static int page_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
struct page *page = key;
struct page_wait_queue *wq;
wq = container_of(wait, struct page_wait_queue, wait);
if (wq->page != page || test_bit(wq->bit, &page->flags))
return 0;
else
return autoremove_wake_function(wait, mode, sync, NULL);
}
#define __DEFINE_PAGE_WAIT(name, p, b, f) \
struct page_wait_queue name = { \
.page = p, \
.bit = b, \
.wait = { \
.task = current, \
.func = page_wake_function, \
.flags = f, \
.task_list = LIST_HEAD_INIT(name.wait.task_list),\
}, \
}
#define DEFINE_PAGE_WAIT(name, p, b) __DEFINE_PAGE_WAIT(name, p, b, 0)
#define DEFINE_PAGE_WAIT_EXCLUSIVE(name, p, b) \
__DEFINE_PAGE_WAIT(name, p, b, WQ_FLAG_EXCLUSIVE)
static wait_queue_head_t *page_waitqueue(struct page *page) static wait_queue_head_t *page_waitqueue(struct page *page)
{ {
const struct zone *zone = page_zone(page); const struct zone *zone = page_zone(page);
...@@ -298,19 +332,28 @@ static wait_queue_head_t *page_waitqueue(struct page *page) ...@@ -298,19 +332,28 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
} }
static void wake_up_page(struct page *page)
{
const unsigned int mode = TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE;
wait_queue_head_t *waitqueue = page_waitqueue(page);
if (waitqueue_active(waitqueue))
__wake_up(waitqueue, mode, 1, page);
}
void fastcall wait_on_page_bit(struct page *page, int bit_nr) void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_WAIT(wait); DEFINE_PAGE_WAIT(wait, page, bit_nr);
do { do {
prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE);
if (test_bit(bit_nr, &page->flags)) { if (test_bit(bit_nr, &page->flags)) {
sync_page(page); sync_page(page);
io_schedule(); io_schedule();
} }
} while (test_bit(bit_nr, &page->flags)); } while (test_bit(bit_nr, &page->flags));
finish_wait(waitqueue, &wait); finish_wait(waitqueue, &wait.wait);
} }
EXPORT_SYMBOL(wait_on_page_bit); EXPORT_SYMBOL(wait_on_page_bit);
...@@ -332,13 +375,11 @@ EXPORT_SYMBOL(wait_on_page_bit); ...@@ -332,13 +375,11 @@ EXPORT_SYMBOL(wait_on_page_bit);
*/ */
void fastcall unlock_page(struct page *page) void fastcall unlock_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
if (!TestClearPageLocked(page)) if (!TestClearPageLocked(page))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (waitqueue_active(waitqueue)) wake_up_page(page);
wake_up_all(waitqueue);
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
...@@ -349,15 +390,12 @@ EXPORT_SYMBOL(lock_page); ...@@ -349,15 +390,12 @@ EXPORT_SYMBOL(lock_page);
*/ */
void end_page_writeback(struct page *page) void end_page_writeback(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
if (!test_clear_page_writeback(page)) if (!test_clear_page_writeback(page))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
} }
if (waitqueue_active(waitqueue)) wake_up_page(page);
wake_up_all(waitqueue);
} }
EXPORT_SYMBOL(end_page_writeback); EXPORT_SYMBOL(end_page_writeback);
...@@ -373,16 +411,16 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -373,16 +411,16 @@ EXPORT_SYMBOL(end_page_writeback);
void fastcall __lock_page(struct page *page) void fastcall __lock_page(struct page *page)
{ {
wait_queue_head_t *wqh = page_waitqueue(page); wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT(wait); DEFINE_PAGE_WAIT_EXCLUSIVE(wait, page, PG_locked);
while (TestSetPageLocked(page)) { while (TestSetPageLocked(page)) {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
if (PageLocked(page)) { if (PageLocked(page)) {
sync_page(page); sync_page(page);
io_schedule(); io_schedule();
} }
} }
finish_wait(wqh, &wait); finish_wait(wqh, &wait.wait);
} }
EXPORT_SYMBOL(__lock_page); EXPORT_SYMBOL(__lock_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment