Commit 4ab4cd4b authored by Benjamin LaHaise's avatar Benjamin LaHaise

[PATCH] add wait queue function callback support

This adds support for wait queue function callbacks, which are used by
aio to build async read / write operations on top of existing wait
queues at points that would normally block a process.
parent 905d294d
......@@ -19,13 +19,17 @@
#include <asm/page.h>
#include <asm/processor.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
struct task_struct * task;
wait_queue_func_t func;
struct list_head task_list;
};
typedef struct __wait_queue wait_queue_t;
struct __wait_queue_head {
spinlock_t lock;
......@@ -40,13 +44,14 @@ typedef struct __wait_queue_head wait_queue_head_t;
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
task: tsk, \
func: default_wake_function, \
task_list: { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
lock: SPIN_LOCK_UNLOCKED, \
lock: SPIN_LOCK_UNLOCKED, \
task_list: { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
......@@ -62,6 +67,15 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
q->flags = 0;
q->task = p;
q->func = default_wake_function;
}
static inline void init_waitqueue_func_entry(wait_queue_t *q,
wait_queue_func_t func)
{
q->flags = 0;
q->task = NULL;
q->func = func;
}
static inline int waitqueue_active(wait_queue_head_t *q)
......@@ -89,6 +103,22 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list);
}
#define add_wait_queue_cond(q, wait, cond) \
({ \
unsigned long flags; \
int _raced = 0; \
spin_lock_irqsave(&(q)->lock, flags); \
(wait)->flags = 0; \
__add_wait_queue((q), (wait)); \
rmb(); \
if (!(cond)) { \
_raced = 1; \
__remove_wait_queue((q), (wait)); \
} \
spin_lock_irqrestore(&(q)->lock, flags); \
_raced; \
})
#endif /* __KERNEL__ */
#endif
......@@ -905,6 +905,12 @@ asmlinkage void preempt_schedule(void)
}
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{
task_t *p = curr->task;
return ((p->state & mode) && try_to_wake_up(p, sync));
}
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
......@@ -916,18 +922,17 @@ asmlinkage void preempt_schedule(void)
*/
static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{
struct list_head *tmp;
unsigned int state;
wait_queue_t *curr;
task_t *p;
struct list_head *tmp, *next;
list_for_each(tmp, &q->task_list) {
list_for_each_safe(tmp, next, &q->task_list) {
wait_queue_t *curr;
unsigned flags;
curr = list_entry(tmp, wait_queue_t, task_list);
p = curr->task;
state = p->state;
if ((state & mode) && try_to_wake_up(p, sync) &&
((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))
break;
flags = curr->flags;
if (curr->func(curr, mode, sync) &&
(flags & WQ_FLAG_EXCLUSIVE) &&
!--nr_exclusive)
break;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment