Commit 4ab4cd4b authored by Benjamin LaHaise's avatar Benjamin LaHaise

[PATCH] add wait queue function callback support

This adds support for wait queue function callbacks, which are used by
aio to build async read / write operations on top of existing wait
queues at points that would normally block a process.
parent 905d294d
...@@ -19,13 +19,17 @@ ...@@ -19,13 +19,17 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
struct __wait_queue { struct __wait_queue {
unsigned int flags; unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_EXCLUSIVE 0x01
struct task_struct * task; struct task_struct * task;
wait_queue_func_t func;
struct list_head task_list; struct list_head task_list;
}; };
typedef struct __wait_queue wait_queue_t;
struct __wait_queue_head { struct __wait_queue_head {
spinlock_t lock; spinlock_t lock;
...@@ -40,13 +44,14 @@ typedef struct __wait_queue_head wait_queue_head_t; ...@@ -40,13 +44,14 @@ typedef struct __wait_queue_head wait_queue_head_t;
#define __WAITQUEUE_INITIALIZER(name, tsk) { \ #define __WAITQUEUE_INITIALIZER(name, tsk) { \
task: tsk, \ task: tsk, \
func: default_wake_function, \
task_list: { NULL, NULL } } task_list: { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \ #define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
lock: SPIN_LOCK_UNLOCKED, \ lock: SPIN_LOCK_UNLOCKED, \
task_list: { &(name).task_list, &(name).task_list } } task_list: { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \ #define DECLARE_WAIT_QUEUE_HEAD(name) \
...@@ -62,6 +67,15 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) ...@@ -62,6 +67,15 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{ {
q->flags = 0; q->flags = 0;
q->task = p; q->task = p;
q->func = default_wake_function;
}
static inline void init_waitqueue_func_entry(wait_queue_t *q,
wait_queue_func_t func)
{
q->flags = 0;
q->task = NULL;
q->func = func;
} }
static inline int waitqueue_active(wait_queue_head_t *q) static inline int waitqueue_active(wait_queue_head_t *q)
...@@ -89,6 +103,22 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, ...@@ -89,6 +103,22 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list); list_del(&old->task_list);
} }
#define add_wait_queue_cond(q, wait, cond) \
({ \
unsigned long flags; \
int _raced = 0; \
spin_lock_irqsave(&(q)->lock, flags); \
(wait)->flags = 0; \
__add_wait_queue((q), (wait)); \
rmb(); \
if (!(cond)) { \
_raced = 1; \
__remove_wait_queue((q), (wait)); \
} \
spin_lock_irqrestore(&(q)->lock, flags); \
_raced; \
})
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -905,6 +905,12 @@ asmlinkage void preempt_schedule(void) ...@@ -905,6 +905,12 @@ asmlinkage void preempt_schedule(void)
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{
task_t *p = curr->task;
return ((p->state & mode) && try_to_wake_up(p, sync));
}
/* /*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
...@@ -916,18 +922,17 @@ asmlinkage void preempt_schedule(void) ...@@ -916,18 +922,17 @@ asmlinkage void preempt_schedule(void)
*/ */
static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{ {
struct list_head *tmp; struct list_head *tmp, *next;
unsigned int state;
wait_queue_t *curr;
task_t *p;
list_for_each(tmp, &q->task_list) { list_for_each_safe(tmp, next, &q->task_list) {
wait_queue_t *curr;
unsigned flags;
curr = list_entry(tmp, wait_queue_t, task_list); curr = list_entry(tmp, wait_queue_t, task_list);
p = curr->task; flags = curr->flags;
state = p->state; if (curr->func(curr, mode, sync) &&
if ((state & mode) && try_to_wake_up(p, sync) && (flags & WQ_FLAG_EXCLUSIVE) &&
((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) !--nr_exclusive)
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment