Commit 8c7b0172 authored by Tom Tucker's avatar Tom Tucker Committed by J. Bruce Fields

svc: Make deferral processing xprt independent

This patch moves the transport independent sk_deferred list to the svc_xprt
structure and updates the svc_deferred_req structure to keep pointers to
svc_xprt's directly. The deferral processing code is also moved out of the
transport dependent recvfrom functions and into the generic svc_recv path.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
Acked-by: default avatarNeil Brown <neilb@suse.de>
Reviewed-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarGreg Banks <gnb@sgi.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent def13d74
...@@ -320,7 +320,7 @@ static inline void svc_free_res_pages(struct svc_rqst *rqstp) ...@@ -320,7 +320,7 @@ static inline void svc_free_res_pages(struct svc_rqst *rqstp)
struct svc_deferred_req { struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */ u32 prot; /* protocol (UDP or TCP) */
struct svc_sock *svsk; struct svc_xprt *xprt;
struct sockaddr_storage addr; /* where reply must go */ struct sockaddr_storage addr; /* where reply must go */
size_t addrlen; size_t addrlen;
union svc_addr_u daddr; /* where reply must come from */ union svc_addr_u daddr; /* where reply must come from */
......
...@@ -59,6 +59,8 @@ struct svc_xprt { ...@@ -59,6 +59,8 @@ struct svc_xprt {
spinlock_t xpt_lock; /* protects sk_deferred spinlock_t xpt_lock; /* protects sk_deferred
* and xpt_auth_cache */ * and xpt_auth_cache */
void *xpt_auth_cache;/* auth cache */ void *xpt_auth_cache;/* auth cache */
struct list_head xpt_deferred; /* deferred requests that need
* to be revisted */
}; };
int svc_reg_xprt_class(struct svc_xprt_class *); int svc_reg_xprt_class(struct svc_xprt_class *);
......
...@@ -20,9 +20,6 @@ struct svc_sock { ...@@ -20,9 +20,6 @@ struct svc_sock {
struct socket * sk_sock; /* berkeley socket layer */ struct socket * sk_sock; /* berkeley socket layer */
struct sock * sk_sk; /* INET layer */ struct sock * sk_sk; /* INET layer */
struct list_head sk_deferred; /* deferred requests that need to
* be revisted */
/* We keep the old state_change and data_ready CB's here */ /* We keep the old state_change and data_ready CB's here */
void (*sk_ostate)(struct sock *); void (*sk_ostate)(struct sock *);
void (*sk_odata)(struct sock *, int bytes); void (*sk_odata)(struct sock *, int bytes);
......
...@@ -102,6 +102,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, ...@@ -102,6 +102,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
xprt->xpt_server = serv; xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list); INIT_LIST_HEAD(&xprt->xpt_list);
INIT_LIST_HEAD(&xprt->xpt_ready); INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
mutex_init(&xprt->xpt_mutex); mutex_init(&xprt->xpt_mutex);
spin_lock_init(&xprt->xpt_lock); spin_lock_init(&xprt->xpt_lock);
} }
......
...@@ -89,7 +89,7 @@ static void svc_close_xprt(struct svc_xprt *xprt); ...@@ -89,7 +89,7 @@ static void svc_close_xprt(struct svc_xprt *xprt);
static void svc_sock_detach(struct svc_xprt *); static void svc_sock_detach(struct svc_xprt *);
static void svc_sock_free(struct svc_xprt *); static void svc_sock_free(struct svc_xprt *);
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp); static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req); static struct cache_deferred_req *svc_defer(struct cache_req *req);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int, static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
...@@ -771,11 +771,6 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -771,11 +771,6 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
(serv->sv_nrthreads+3) * serv->sv_max_mesg, (serv->sv_nrthreads+3) * serv->sv_max_mesg,
(serv->sv_nrthreads+3) * serv->sv_max_mesg); (serv->sv_nrthreads+3) * serv->sv_max_mesg);
if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
svc_xprt_received(&svsk->sk_xprt);
return svc_deferred_recv(rqstp);
}
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
skb = NULL; skb = NULL;
err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
...@@ -1138,11 +1133,6 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1138,11 +1133,6 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
svc_xprt_received(&svsk->sk_xprt);
return svc_deferred_recv(rqstp);
}
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* sndbuf needs to have room for one request /* sndbuf needs to have room for one request
* per thread, otherwise we can stall even when the * per thread, otherwise we can stall even when the
...@@ -1601,7 +1591,12 @@ svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -1601,7 +1591,12 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
rqstp, pool->sp_id, svsk, rqstp, pool->sp_id, svsk,
atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp); rqstp->rq_deferred = svc_deferred_dequeue(&svsk->sk_xprt);
if (rqstp->rq_deferred) {
svc_xprt_received(&svsk->sk_xprt);
len = svc_deferred_recv(rqstp);
} else
len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
dprintk("svc: got len=%d\n", len); dprintk("svc: got len=%d\n", len);
} }
...@@ -1758,7 +1753,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, ...@@ -1758,7 +1753,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
svsk->sk_ostate = inet->sk_state_change; svsk->sk_ostate = inet->sk_state_change;
svsk->sk_odata = inet->sk_data_ready; svsk->sk_odata = inet->sk_data_ready;
svsk->sk_owspace = inet->sk_write_space; svsk->sk_owspace = inet->sk_write_space;
INIT_LIST_HEAD(&svsk->sk_deferred);
/* Initialize the socket */ /* Initialize the socket */
if (sock->type == SOCK_DGRAM) if (sock->type == SOCK_DGRAM)
...@@ -1976,22 +1970,21 @@ void svc_close_all(struct list_head *xprt_list) ...@@ -1976,22 +1970,21 @@ void svc_close_all(struct list_head *xprt_list)
static void svc_revisit(struct cache_deferred_req *dreq, int too_many) static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
{ {
struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
struct svc_sock *svsk; struct svc_xprt *xprt = dr->xprt;
if (too_many) { if (too_many) {
svc_xprt_put(&dr->svsk->sk_xprt); svc_xprt_put(xprt);
kfree(dr); kfree(dr);
return; return;
} }
dprintk("revisit queued\n"); dprintk("revisit queued\n");
svsk = dr->svsk; dr->xprt = NULL;
dr->svsk = NULL; spin_lock(&xprt->xpt_lock);
spin_lock(&svsk->sk_xprt.xpt_lock); list_add(&dr->handle.recent, &xprt->xpt_deferred);
list_add(&dr->handle.recent, &svsk->sk_deferred); spin_unlock(&xprt->xpt_lock);
spin_unlock(&svsk->sk_xprt.xpt_lock); set_bit(XPT_DEFERRED, &xprt->xpt_flags);
set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(xprt);
svc_xprt_enqueue(&svsk->sk_xprt); svc_xprt_put(xprt);
svc_xprt_put(&svsk->sk_xprt);
} }
static struct cache_deferred_req * static struct cache_deferred_req *
...@@ -2022,7 +2015,7 @@ svc_defer(struct cache_req *req) ...@@ -2022,7 +2015,7 @@ svc_defer(struct cache_req *req)
memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
} }
svc_xprt_get(rqstp->rq_xprt); svc_xprt_get(rqstp->rq_xprt);
dr->svsk = rqstp->rq_sock; dr->xprt = rqstp->rq_xprt;
dr->handle.revisit = svc_revisit; dr->handle.revisit = svc_revisit;
return &dr->handle; return &dr->handle;
...@@ -2048,21 +2041,21 @@ static int svc_deferred_recv(struct svc_rqst *rqstp) ...@@ -2048,21 +2041,21 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
} }
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
{ {
struct svc_deferred_req *dr = NULL; struct svc_deferred_req *dr = NULL;
if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags)) if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
return NULL; return NULL;
spin_lock(&svsk->sk_xprt.xpt_lock); spin_lock(&xprt->xpt_lock);
clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
if (!list_empty(&svsk->sk_deferred)) { if (!list_empty(&xprt->xpt_deferred)) {
dr = list_entry(svsk->sk_deferred.next, dr = list_entry(xprt->xpt_deferred.next,
struct svc_deferred_req, struct svc_deferred_req,
handle.recent); handle.recent);
list_del_init(&dr->handle.recent); list_del_init(&dr->handle.recent);
set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DEFERRED, &xprt->xpt_flags);
} }
spin_unlock(&svsk->sk_xprt.xpt_lock); spin_unlock(&xprt->xpt_lock);
return dr; return dr;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment