Commit c135e126 authored by Chuck Lever's avatar Chuck Lever

NFSD: Refactor the duplicate reply cache shrinker

Avoid holding the bucket lock while freeing cache entries. This
change also caps the number of entries that are freed when the
shrinker calls to reduce the shrinker's impact on the cache's
effectiveness.
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent a9507f6a
...@@ -310,68 +310,64 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, ...@@ -310,68 +310,64 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
} }
} }
static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn, /**
unsigned int max) * nfsd_reply_cache_count - count_objects method for the DRC shrinker
* @shrink: our registered shrinker context
* @sc: garbage collection parameters
*
* Returns the total number of entries in the duplicate reply cache. To
* keep things simple and quick, this is not the number of expired entries
* in the cache (ie, the number that would be removed by a call to
* nfsd_reply_cache_scan).
*/
static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{ {
struct svc_cacherep *rp, *tmp; struct nfsd_net *nn = container_of(shrink,
long freed = 0; struct nfsd_net, nfsd_reply_cache_shrinker);
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { return atomic_read(&nn->num_drc_entries);
/*
* Don't free entries attached to calls that are still
* in-progress, but do keep scanning the list.
*/
if (rp->c_state == RC_INPROG)
continue;
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
break;
nfsd_reply_cache_free_locked(b, rp, nn);
if (max && freed++ > max)
break;
}
return freed;
} }
/* /**
* Walk the LRU list and prune off entries that are older than RC_EXPIRE. * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
* Also prune the oldest ones when the total exceeds the max number of entries. * @shrink: our registered shrinker context
* @sc: garbage collection parameters
*
* Free expired entries on each bucket's LRU list until we've released
* nr_to_scan freed objects. Nothing will be released if the cache
* has not exceeded it's max_drc_entries limit.
*
* Returns the number of entries released by this call.
*/ */
static long static unsigned long
prune_cache_entries(struct nfsd_net *nn) nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{ {
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_reply_cache_shrinker);
unsigned long freed = 0;
LIST_HEAD(dispose);
unsigned int i; unsigned int i;
long freed = 0;
for (i = 0; i < nn->drc_hashsize; i++) { for (i = 0; i < nn->drc_hashsize; i++) {
struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
if (list_empty(&b->lru_head)) if (list_empty(&b->lru_head))
continue; continue;
spin_lock(&b->cache_lock); spin_lock(&b->cache_lock);
freed += prune_bucket(b, nn, 0); nfsd_prune_bucket_locked(nn, b, 0, &dispose);
spin_unlock(&b->cache_lock); spin_unlock(&b->cache_lock);
}
return freed;
}
static unsigned long freed += nfsd_cacherep_dispose(&dispose);
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) if (freed > sc->nr_to_scan)
{ break;
struct nfsd_net *nn = container_of(shrink, }
struct nfsd_net, nfsd_reply_cache_shrinker);
return atomic_read(&nn->num_drc_entries); trace_nfsd_drc_gc(nn, freed);
return freed;
} }
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_reply_cache_shrinker);
return prune_cache_entries(nn);
}
/* /*
* Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment