Commit 2c6b691c authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

nfsd: when updating an entry with RC_NOCACHE, just free it

There's no need to keep entries around that we're declaring RC_NOCACHE.
Ditto if there's a problem with the entry.

With this change too, there's no need to test for RC_UNUSED in the
search function. If the entry's in the hash table then it's either
INPROG or DONE.
Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 13cc8a78
...@@ -98,6 +98,14 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp) ...@@ -98,6 +98,14 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
kmem_cache_free(drc_slab, rp); kmem_cache_free(drc_slab, rp);
} }
static void
nfsd_reply_cache_free(struct svc_cacherep *rp)
{
spin_lock(&cache_lock);
nfsd_reply_cache_free_locked(rp);
spin_unlock(&cache_lock);
}
int nfsd_reply_cache_init(void) int nfsd_reply_cache_init(void)
{ {
drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
...@@ -182,8 +190,7 @@ nfsd_cache_search(struct svc_rqst *rqstp) ...@@ -182,8 +190,7 @@ nfsd_cache_search(struct svc_rqst *rqstp)
rh = &cache_hash[request_hash(xid)]; rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) { hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED && if (xid == rp->c_xid && proc == rp->c_proc &&
xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers && proto == rp->c_prot && vers == rp->c_vers &&
!nfsd_cache_entry_expired(rp) && !nfsd_cache_entry_expired(rp) &&
rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
...@@ -353,7 +360,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) ...@@ -353,7 +360,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
/* Don't cache excessive amounts of data and XDR failures */ /* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) { if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED; nfsd_reply_cache_free(rp);
return; return;
} }
...@@ -367,12 +374,15 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) ...@@ -367,12 +374,15 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
cachv = &rp->c_replvec; cachv = &rp->c_replvec;
cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
if (!cachv->iov_base) { if (!cachv->iov_base) {
rp->c_state = RC_UNUSED; nfsd_reply_cache_free(rp);
return; return;
} }
cachv->iov_len = len << 2; cachv->iov_len = len << 2;
memcpy(cachv->iov_base, statp, len << 2); memcpy(cachv->iov_base, statp, len << 2);
break; break;
case RC_NOCACHE:
nfsd_reply_cache_free(rp);
return;
} }
spin_lock(&cache_lock); spin_lock(&cache_lock);
lru_put_end(rp); lru_put_end(rp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment