Commit 7f8275d0 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

mm: add context argument to shrinker callback

The current shrinker implementation requires the registered callback
to have global state to work from. This makes it difficult to shrink
caches that are not global (e.g. per-filesystem caches). Pass the shrinker
structure to the callback so that users can embed the shrinker structure
in the context the shrinker needs to operate on and get back to it in the
callback via container_of().
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent d0c6f625
...@@ -2926,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm) ...@@ -2926,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
return kvm_mmu_zap_page(kvm, page) + 1; return kvm_mmu_zap_page(kvm, page) + 1;
} }
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{ {
struct kvm *kvm; struct kvm *kvm;
struct kvm *kvm_freed = NULL; struct kvm *kvm_freed = NULL;
......
...@@ -4978,7 +4978,7 @@ i915_gpu_is_active(struct drm_device *dev) ...@@ -4978,7 +4978,7 @@ i915_gpu_is_active(struct drm_device *dev)
} }
static int static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{ {
drm_i915_private_t *dev_priv, *next_dev; drm_i915_private_t *dev_priv, *next_dev;
struct drm_i915_gem_object *obj_priv, *next_obj; struct drm_i915_gem_object *obj_priv, *next_obj;
......
...@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent); ...@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
* *
* In this case we return -1 to tell the caller that we baled. * In this case we return -1 to tell the caller that we baled.
*/ */
static int shrink_dcache_memory(int nr, gfp_t gfp_mask) static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
if (!(gfp_mask & __GFP_FS)) if (!(gfp_mask & __GFP_FS))
......
...@@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) ...@@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
} }
static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
int may_demote; int may_demote;
......
...@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list); ...@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0); static atomic_t qd_lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(qd_lru_lock); static DEFINE_SPINLOCK(qd_lru_lock);
int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
struct gfs2_quota_data *qd; struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
......
...@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) ...@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret; return ret;
} }
extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
extern const struct quotactl_ops gfs2_quotactl_ops; extern const struct quotactl_ops gfs2_quotactl_ops;
#endif /* __QUOTA_DOT_H__ */ #endif /* __QUOTA_DOT_H__ */
...@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan) ...@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan)
* This function is passed the number of inodes to scan, and it returns the * This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes. * total number of remaining possibly-reclaimable inodes.
*/ */
static int shrink_icache_memory(int nr, gfp_t gfp_mask) static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
/* /*
......
...@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache) ...@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache)
* What the mbcache registers as to get shrunk dynamically. * What the mbcache registers as to get shrunk dynamically.
*/ */
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
static struct shrinker mb_cache_shrinker = { static struct shrinker mb_cache_shrinker = {
.shrink = mb_cache_shrink_fn, .shrink = mb_cache_shrink_fn,
...@@ -191,13 +191,14 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) ...@@ -191,13 +191,14 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
* This function is called by the kernel memory management when memory * This function is called by the kernel memory management when memory
* gets low. * gets low.
* *
* @shrink: (ignored)
* @nr_to_scan: Number of objects to scan * @nr_to_scan: Number of objects to scan
* @gfp_mask: (ignored) * @gfp_mask: (ignored)
* *
* Returns the number of objects which are present in the cache. * Returns the number of objects which are present in the cache.
*/ */
static int static int
mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct list_head *l, *ltmp; struct list_head *l, *ltmp;
......
...@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head) ...@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head)
} }
} }
int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{ {
LIST_HEAD(head); LIST_HEAD(head);
struct nfs_inode *nfsi; struct nfs_inode *nfsi;
......
...@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[]; ...@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[];
void nfs_close_context(struct nfs_open_context *ctx, int is_sync); void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
/* dir.c */ /* dir.c */
extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); extern int nfs_access_cache_shrinker(struct shrinker *shrink,
int nr_to_scan, gfp_t gfp_mask);
/* inode.c */ /* inode.c */
extern struct workqueue_struct *nfsiod_workqueue; extern struct workqueue_struct *nfsiod_workqueue;
......
...@@ -676,7 +676,7 @@ static void prune_dqcache(int count) ...@@ -676,7 +676,7 @@ static void prune_dqcache(int count)
* This is called from kswapd when we think we need some * This is called from kswapd when we think we need some
* more memory * more memory
*/ */
static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
spin_lock(&dq_list_lock); spin_lock(&dq_list_lock);
......
...@@ -277,7 +277,7 @@ static int kick_a_thread(void) ...@@ -277,7 +277,7 @@ static int kick_a_thread(void)
return 0; return 0;
} }
int ubifs_shrinker(int nr, gfp_t gfp_mask) int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{ {
int freed, contention = 0; int freed, contention = 0;
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
......
...@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); ...@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int ubifs_tnc_end_commit(struct ubifs_info *c); int ubifs_tnc_end_commit(struct ubifs_info *c);
/* shrinker.c */ /* shrinker.c */
int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
/* commit.c */ /* commit.c */
int ubifs_bg_thread(void *info); int ubifs_bg_thread(void *info);
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
static kmem_zone_t *xfs_buf_zone; static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *); STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t); STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
static struct shrinker xfs_buf_shake = { static struct shrinker xfs_buf_shake = {
.shrink = xfsbufd_wakeup, .shrink = xfsbufd_wakeup,
...@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages( ...@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages(
__func__, gfp_mask); __func__, gfp_mask);
XFS_STATS_INC(xb_page_retries); XFS_STATS_INC(xb_page_retries);
xfsbufd_wakeup(0, gfp_mask); xfsbufd_wakeup(NULL, 0, gfp_mask);
congestion_wait(BLK_RW_ASYNC, HZ/50); congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry; goto retry;
} }
...@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues( ...@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues(
STATIC int STATIC int
xfsbufd_wakeup( xfsbufd_wakeup(
struct shrinker *shrink,
int priority, int priority,
gfp_t mask) gfp_t mask)
{ {
......
...@@ -838,6 +838,7 @@ static struct rw_semaphore xfs_mount_list_lock; ...@@ -838,6 +838,7 @@ static struct rw_semaphore xfs_mount_list_lock;
static int static int
xfs_reclaim_inode_shrink( xfs_reclaim_inode_shrink(
struct shrinker *shrink,
int nr_to_scan, int nr_to_scan,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
......
...@@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); ...@@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, gfp_t); STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
static struct shrinker xfs_qm_shaker = { static struct shrinker xfs_qm_shaker = {
.shrink = xfs_qm_shake, .shrink = xfs_qm_shake,
...@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist( ...@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist(
*/ */
/* ARGSUSED */ /* ARGSUSED */
STATIC int STATIC int
xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) xfs_qm_shake(
struct shrinker *shrink,
int nr_to_scan,
gfp_t gfp_mask)
{ {
int ndqused, nfree, n; int ndqused, nfree, n;
......
...@@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) ...@@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
* querying the cache size, so a fastpath for that case is appropriate. * querying the cache size, so a fastpath for that case is appropriate.
*/ */
struct shrinker { struct shrinker {
int (*shrink)(int nr_to_scan, gfp_t gfp_mask); int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
int seeks; /* seeks to recreate an obj */ int seeks; /* seeks to recreate an obj */
/* These are for internal use */ /* These are for internal use */
......
...@@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, ...@@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
list_for_each_entry(shrinker, &shrinker_list, list) { list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta; unsigned long long delta;
unsigned long total_scan; unsigned long total_scan;
unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); unsigned long max_pass;
max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
delta = (4 * scanned) / shrinker->seeks; delta = (4 * scanned) / shrinker->seeks;
delta *= max_pass; delta *= max_pass;
do_div(delta, lru_pages + 1); do_div(delta, lru_pages + 1);
...@@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, ...@@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
int shrink_ret; int shrink_ret;
int nr_before; int nr_before;
nr_before = (*shrinker->shrink)(0, gfp_mask); nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
gfp_mask);
if (shrink_ret == -1) if (shrink_ret == -1)
break; break;
if (shrink_ret < nr_before) if (shrink_ret < nr_before)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment