Commit 7ac1bea5 authored by NeilBrown's avatar NeilBrown Committed by Linus Torvalds

knfsd: rename sk_defer_lock to sk_lock

Now that sk_defer_lock protects two different things, make the name more
generic.

Also don't bother with disabling _bh as the lock is only ever taken from
process context.
Signed-off-by: default avatarNeil Brown <neilb@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f34b9568
...@@ -37,7 +37,8 @@ struct svc_sock { ...@@ -37,7 +37,8 @@ struct svc_sock {
atomic_t sk_reserved; /* space on outq that is reserved */ atomic_t sk_reserved; /* space on outq that is reserved */
spinlock_t sk_defer_lock; /* protects sk_deferred */ spinlock_t sk_lock; /* protects sk_deferred and
* sk_info_authunix */
struct list_head sk_deferred; /* deferred requests that need to struct list_head sk_deferred; /* deferred requests that need to
* be revisted */ * be revisted */
struct mutex sk_mutex; /* to serialize sending data */ struct mutex sk_mutex; /* to serialize sending data */
......
...@@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp) ...@@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp)
{ {
struct ip_map *ipm; struct ip_map *ipm;
struct svc_sock *svsk = rqstp->rq_sock; struct svc_sock *svsk = rqstp->rq_sock;
spin_lock_bh(&svsk->sk_defer_lock); spin_lock(&svsk->sk_lock);
ipm = svsk->sk_info_authunix; ipm = svsk->sk_info_authunix;
if (ipm != NULL) { if (ipm != NULL) {
if (!cache_valid(&ipm->h)) { if (!cache_valid(&ipm->h)) {
...@@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp) ...@@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp)
* same IP address. * same IP address.
*/ */
svsk->sk_info_authunix = NULL; svsk->sk_info_authunix = NULL;
spin_unlock_bh(&svsk->sk_defer_lock); spin_unlock(&svsk->sk_lock);
cache_put(&ipm->h, &ip_map_cache); cache_put(&ipm->h, &ip_map_cache);
return NULL; return NULL;
} }
cache_get(&ipm->h); cache_get(&ipm->h);
} }
spin_unlock_bh(&svsk->sk_defer_lock); spin_unlock(&svsk->sk_lock);
return ipm; return ipm;
} }
...@@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) ...@@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
{ {
struct svc_sock *svsk = rqstp->rq_sock; struct svc_sock *svsk = rqstp->rq_sock;
spin_lock_bh(&svsk->sk_defer_lock); spin_lock(&svsk->sk_lock);
if (svsk->sk_sock->type == SOCK_STREAM && if (svsk->sk_sock->type == SOCK_STREAM &&
svsk->sk_info_authunix == NULL) { svsk->sk_info_authunix == NULL) {
/* newly cached, keep the reference */ /* newly cached, keep the reference */
svsk->sk_info_authunix = ipm; svsk->sk_info_authunix = ipm;
ipm = NULL; ipm = NULL;
} }
spin_unlock_bh(&svsk->sk_defer_lock); spin_unlock(&svsk->sk_lock);
if (ipm) if (ipm)
cache_put(&ipm->h, &ip_map_cache); cache_put(&ipm->h, &ip_map_cache);
} }
......
...@@ -53,7 +53,8 @@ ...@@ -53,7 +53,8 @@
* svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
* when both need to be taken (rare), svc_serv->sv_lock is first. * when both need to be taken (rare), svc_serv->sv_lock is first.
* BKL protects svc_serv->sv_nrthread. * BKL protects svc_serv->sv_nrthread.
* svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list * svc_sock->sk_lock protects the svc_sock->sk_deferred list
* and the ->sk_info_authunix cache.
* svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
* *
* Some flags can be set to certain values at any time * Some flags can be set to certain values at any time
...@@ -1633,7 +1634,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, ...@@ -1633,7 +1634,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
svsk->sk_server = serv; svsk->sk_server = serv;
atomic_set(&svsk->sk_inuse, 1); atomic_set(&svsk->sk_inuse, 1);
svsk->sk_lastrecv = get_seconds(); svsk->sk_lastrecv = get_seconds();
spin_lock_init(&svsk->sk_defer_lock); spin_lock_init(&svsk->sk_lock);
INIT_LIST_HEAD(&svsk->sk_deferred); INIT_LIST_HEAD(&svsk->sk_deferred);
INIT_LIST_HEAD(&svsk->sk_ready); INIT_LIST_HEAD(&svsk->sk_ready);
mutex_init(&svsk->sk_mutex); mutex_init(&svsk->sk_mutex);
...@@ -1857,9 +1858,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) ...@@ -1857,9 +1858,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
dprintk("revisit queued\n"); dprintk("revisit queued\n");
svsk = dr->svsk; svsk = dr->svsk;
dr->svsk = NULL; dr->svsk = NULL;
spin_lock_bh(&svsk->sk_defer_lock); spin_lock(&svsk->sk_lock);
list_add(&dr->handle.recent, &svsk->sk_deferred); list_add(&dr->handle.recent, &svsk->sk_deferred);
spin_unlock_bh(&svsk->sk_defer_lock); spin_unlock(&svsk->sk_lock);
set_bit(SK_DEFERRED, &svsk->sk_flags); set_bit(SK_DEFERRED, &svsk->sk_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
svc_sock_put(svsk); svc_sock_put(svsk);
...@@ -1925,7 +1926,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) ...@@ -1925,7 +1926,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
return NULL; return NULL;
spin_lock_bh(&svsk->sk_defer_lock); spin_lock(&svsk->sk_lock);
clear_bit(SK_DEFERRED, &svsk->sk_flags); clear_bit(SK_DEFERRED, &svsk->sk_flags);
if (!list_empty(&svsk->sk_deferred)) { if (!list_empty(&svsk->sk_deferred)) {
dr = list_entry(svsk->sk_deferred.next, dr = list_entry(svsk->sk_deferred.next,
...@@ -1934,6 +1935,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) ...@@ -1934,6 +1935,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
list_del_init(&dr->handle.recent); list_del_init(&dr->handle.recent);
set_bit(SK_DEFERRED, &svsk->sk_flags); set_bit(SK_DEFERRED, &svsk->sk_flags);
} }
spin_unlock_bh(&svsk->sk_defer_lock); spin_unlock(&svsk->sk_lock);
return dr; return dr;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment