Commit 7b31f4da authored by NeilBrown's avatar NeilBrown Committed by Chuck Lever

SUNRPC: rename and refactor svc_get_next_xprt()

svc_get_next_xprt() does a lot more than just get an xprt.  It also
decides if it needs to sleep, depending not only on the availability of
xprts but also on the need to exit or handle external work.

So rename it to svc_rqst_wait_for_work() and only do the testing and
waiting.  Move all the waiting-related code out of svc_recv() into the
new svc_rqst_wait_for_work().

Move the dequeueing code out of svc_get_next_xprt() into svc_recv().

Previously svc_xprt_dequeue() would be called twice, once before waiting
and possibly once after.  Now instead rqst_should_sleep() is called
twice.  Once to decide if waiting is needed, and once to check against
after setting the task state do see if we might have missed a wakeup.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent e3274026
......@@ -722,51 +722,34 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return true;
}
static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
{
struct svc_pool *pool = rqstp->rq_pool;
/* rq_xprt should be clear on entry */
WARN_ON_ONCE(rqstp->rq_xprt);
struct svc_pool *pool = rqstp->rq_pool;
rqstp->rq_xprt = svc_xprt_dequeue(pool);
if (rqstp->rq_xprt)
goto out_found;
set_current_state(TASK_IDLE);
smp_mb__before_atomic();
clear_bit(SP_CONGESTED, &pool->sp_flags);
clear_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
if (likely(rqst_should_sleep(rqstp)))
schedule();
else
__set_current_state(TASK_RUNNING);
if (rqst_should_sleep(rqstp)) {
set_current_state(TASK_IDLE);
smp_mb__before_atomic();
clear_bit(SP_CONGESTED, &pool->sp_flags);
clear_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
/* Need to check should_sleep() again after
* setting task state in case a wakeup happened
* between testing and setting.
*/
if (rqst_should_sleep(rqstp)) {
schedule();
} else {
__set_current_state(TASK_RUNNING);
cond_resched();
}
set_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
} else {
cond_resched();
}
try_to_freeze();
set_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
clear_bit(SP_TASK_PENDING, &pool->sp_flags);
rqstp->rq_xprt = svc_xprt_dequeue(pool);
if (rqstp->rq_xprt)
goto out_found;
if (kthread_should_stop())
return NULL;
return NULL;
out_found:
clear_bit(SP_TASK_PENDING, &pool->sp_flags);
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
if (!test_bit(SP_CONGESTED, &pool->sp_flags))
rqstp->rq_chandle.thread_wait = 5*HZ;
else
rqstp->rq_chandle.thread_wait = 1*HZ;
trace_svc_xprt_dequeue(rqstp);
return rqstp->rq_xprt;
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
......@@ -858,20 +841,33 @@ static void svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
*/
void svc_recv(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = NULL;
struct svc_pool *pool = rqstp->rq_pool;
if (!svc_alloc_arg(rqstp))
return;
try_to_freeze();
cond_resched();
svc_rqst_wait_for_work(rqstp);
clear_bit(SP_TASK_PENDING, &pool->sp_flags);
if (kthread_should_stop())
goto out;
return;
rqstp->rq_xprt = svc_xprt_dequeue(pool);
if (rqstp->rq_xprt) {
struct svc_xprt *xprt = rqstp->rq_xprt;
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
if (!test_bit(SP_CONGESTED, &pool->sp_flags))
rqstp->rq_chandle.thread_wait = 5 * HZ;
else
rqstp->rq_chandle.thread_wait = 1 * HZ;
xprt = svc_get_next_xprt(rqstp);
if (xprt)
trace_svc_xprt_dequeue(rqstp);
svc_handle_xprt(rqstp, xprt);
out:
}
}
EXPORT_SYMBOL_GPL(svc_recv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment