Commit 4b1191c8 authored by Trond Myklebust's avatar Trond Myklebust

[PATCH] 2.5.25 Clean up RPC receive code [part 2]

  After getting rid of rpc_lock_task() from net/sunrpc/xprt.c (see the
previous patch), we can now remove it from the generic RPC queue
handling code.
parent 1eedbd78
...@@ -75,9 +75,7 @@ struct rpc_task { ...@@ -75,9 +75,7 @@ struct rpc_task {
wait_queue_head_t tk_wait; /* sync: sleep on this q */ wait_queue_head_t tk_wait; /* sync: sleep on this q */
unsigned long tk_timeout; /* timeout for rpc_sleep() */ unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */ unsigned short tk_flags; /* misc flags */
unsigned short tk_lock; /* Task lock counter */ unsigned char tk_active : 1;/* Task has been activated */
unsigned char tk_active : 1,/* Task has been activated */
tk_wakeup : 1;/* Task waiting to wake up */
unsigned long tk_runstate; /* Task run status */ unsigned long tk_runstate; /* Task run status */
#ifdef RPC_DEBUG #ifdef RPC_DEBUG
unsigned short tk_pid; /* debugging aid */ unsigned short tk_pid; /* debugging aid */
...@@ -181,15 +179,11 @@ int rpc_add_wait_queue(struct rpc_wait_queue *, struct rpc_task *); ...@@ -181,15 +179,11 @@ int rpc_add_wait_queue(struct rpc_wait_queue *, struct rpc_task *);
void rpc_remove_wait_queue(struct rpc_task *); void rpc_remove_wait_queue(struct rpc_task *);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action, rpc_action timer); rpc_action action, rpc_action timer);
void rpc_sleep_locked(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action, rpc_action timer);
void rpc_add_timer(struct rpc_task *, rpc_action); void rpc_add_timer(struct rpc_task *, rpc_action);
void rpc_wake_up_task(struct rpc_task *); void rpc_wake_up_task(struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *); void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int); void rpc_wake_up_status(struct rpc_wait_queue *, int);
int __rpc_lock_task(struct rpc_task *);
void rpc_unlock_task(struct rpc_task *);
void rpc_delay(struct rpc_task *, unsigned long); void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_allocate(unsigned int flags, unsigned int); void * rpc_allocate(unsigned int flags, unsigned int);
void rpc_free(void *); void rpc_free(void *);
......
...@@ -73,7 +73,7 @@ static int rpc_inhibit; ...@@ -73,7 +73,7 @@ static int rpc_inhibit;
* Spinlock for wait queues. Access to the latter also has to be * Spinlock for wait queues. Access to the latter also has to be
* interrupt-safe in order to allow timers to wake up sleeping tasks. * interrupt-safe in order to allow timers to wake up sleeping tasks.
*/ */
spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED; static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
/* /*
* Spinlock for other critical sections of code. * Spinlock for other critical sections of code.
*/ */
...@@ -157,7 +157,7 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer) ...@@ -157,7 +157,7 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer)
void rpc_add_timer(struct rpc_task *task, rpc_action timer) void rpc_add_timer(struct rpc_task *task, rpc_action timer)
{ {
spin_lock_bh(&rpc_queue_lock); spin_lock_bh(&rpc_queue_lock);
if (!(RPC_IS_RUNNING(task) || task->tk_wakeup)) if (!RPC_IS_RUNNING(task))
__rpc_add_timer(task, timer); __rpc_add_timer(task, timer);
spin_unlock_bh(&rpc_queue_lock); spin_unlock_bh(&rpc_queue_lock);
} }
...@@ -358,27 +358,10 @@ rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, ...@@ -358,27 +358,10 @@ rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
spin_unlock_bh(&rpc_queue_lock); spin_unlock_bh(&rpc_queue_lock);
} }
void
rpc_sleep_locked(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, rpc_action timer)
{
/*
* Protect the queue operations.
*/
spin_lock_bh(&rpc_queue_lock);
__rpc_sleep_on(q, task, action, timer);
__rpc_lock_task(task);
spin_unlock_bh(&rpc_queue_lock);
}
/** /**
* __rpc_wake_up_task - wake up a single rpc_task * __rpc_wake_up_task - wake up a single rpc_task
* @task: task to be woken up * @task: task to be woken up
* *
* If the task is locked, it is merely removed from the queue, and
* 'task->tk_wakeup' is set. rpc_unlock_task() will then ensure
* that it is woken up as soon as the lock count goes to zero.
*
* Caller must hold rpc_queue_lock * Caller must hold rpc_queue_lock
*/ */
static void static void
...@@ -407,14 +390,6 @@ __rpc_wake_up_task(struct rpc_task *task) ...@@ -407,14 +390,6 @@ __rpc_wake_up_task(struct rpc_task *task)
if (task->tk_rpcwait != &schedq) if (task->tk_rpcwait != &schedq)
__rpc_remove_wait_queue(task); __rpc_remove_wait_queue(task);
/* If the task has been locked, then set tk_wakeup so that
* rpc_unlock_task() wakes us up... */
if (task->tk_lock) {
task->tk_wakeup = 1;
return;
} else
task->tk_wakeup = 0;
rpc_make_runnable(task); rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n"); dprintk("RPC: __rpc_wake_up_task done\n");
...@@ -501,30 +476,6 @@ rpc_wake_up_status(struct rpc_wait_queue *queue, int status) ...@@ -501,30 +476,6 @@ rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
spin_unlock_bh(&rpc_queue_lock); spin_unlock_bh(&rpc_queue_lock);
} }
/*
* Lock down a sleeping task to prevent it from waking up
* and disappearing from beneath us.
*
* This function should always be called with the
* rpc_queue_lock held.
*/
int
__rpc_lock_task(struct rpc_task *task)
{
if (!RPC_IS_RUNNING(task))
return ++task->tk_lock;
return 0;
}
void
rpc_unlock_task(struct rpc_task *task)
{
spin_lock_bh(&rpc_queue_lock);
if (task->tk_lock && !--task->tk_lock && task->tk_wakeup)
__rpc_wake_up_task(task);
spin_unlock_bh(&rpc_queue_lock);
}
/* /*
* Run a task at a later time * Run a task at a later time
*/ */
...@@ -707,15 +658,6 @@ __rpc_schedule(void) ...@@ -707,15 +658,6 @@ __rpc_schedule(void)
spin_lock_bh(&rpc_queue_lock); spin_lock_bh(&rpc_queue_lock);
task_for_first(task, &schedq.tasks) { task_for_first(task, &schedq.tasks) {
if (task->tk_lock) {
spin_unlock_bh(&rpc_queue_lock);
printk(KERN_ERR "RPC: Locked task was scheduled !!!!\n");
#ifdef RPC_DEBUG
rpc_debug = ~0;
rpc_show_tasks();
#endif
break;
}
__rpc_remove_wait_queue(task); __rpc_remove_wait_queue(task);
spin_unlock_bh(&rpc_queue_lock); spin_unlock_bh(&rpc_queue_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment