Commit 70e60ce7 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: convert inode shrinker to per-filesystem contexts

Now the shrinker passes us a context, wire up a shrinker context per
filesystem. This allows us to remove the global mount list and the
locking problems that introduced. It also means that a shrinker call
does not need to traverse clean filesystems before finding a
filesystem with reclaimable inodes.  This significantly reduces
scanning overhead when lots of filesystems are present.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 7f8275d0
...@@ -1883,7 +1883,6 @@ init_xfs_fs(void) ...@@ -1883,7 +1883,6 @@ init_xfs_fs(void)
goto out_cleanup_procfs; goto out_cleanup_procfs;
vfs_initquota(); vfs_initquota();
xfs_inode_shrinker_init();
error = register_filesystem(&xfs_fs_type); error = register_filesystem(&xfs_fs_type);
if (error) if (error)
...@@ -1911,7 +1910,6 @@ exit_xfs_fs(void) ...@@ -1911,7 +1910,6 @@ exit_xfs_fs(void)
{ {
vfs_exitquota(); vfs_exitquota();
unregister_filesystem(&xfs_fs_type); unregister_filesystem(&xfs_fs_type);
xfs_inode_shrinker_destroy();
xfs_sysctl_unregister(); xfs_sysctl_unregister();
xfs_cleanup_procfs(); xfs_cleanup_procfs();
xfs_buf_terminate(); xfs_buf_terminate();
......
...@@ -828,14 +828,7 @@ xfs_reclaim_inodes( ...@@ -828,14 +828,7 @@ xfs_reclaim_inodes(
/* /*
* Shrinker infrastructure. * Shrinker infrastructure.
*
* This is all far more complex than it needs to be. It adds a global list of
* mounts because the shrinkers can only call a global context. We need to make
* the shrinkers pass a context to avoid the need for global state.
*/ */
static LIST_HEAD(xfs_mount_list);
static struct rw_semaphore xfs_mount_list_lock;
static int static int
xfs_reclaim_inode_shrink( xfs_reclaim_inode_shrink(
struct shrinker *shrink, struct shrinker *shrink,
...@@ -847,65 +840,38 @@ xfs_reclaim_inode_shrink( ...@@ -847,65 +840,38 @@ xfs_reclaim_inode_shrink(
xfs_agnumber_t ag; xfs_agnumber_t ag;
int reclaimable = 0; int reclaimable = 0;
mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) { if (nr_to_scan) {
if (!(gfp_mask & __GFP_FS)) if (!(gfp_mask & __GFP_FS))
return -1; return -1;
down_read(&xfs_mount_list_lock); xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
if (nr_to_scan <= 0) /* if we don't exhaust the scan, don't bother coming back */
break; if (nr_to_scan > 0)
} return -1;
up_read(&xfs_mount_list_lock); }
}
down_read(&xfs_mount_list_lock); for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
list_for_each_entry(mp, &xfs_mount_list, m_mplist) { pag = xfs_perag_get(mp, ag);
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { reclaimable += pag->pag_ici_reclaimable;
pag = xfs_perag_get(mp, ag); xfs_perag_put(pag);
reclaimable += pag->pag_ici_reclaimable;
xfs_perag_put(pag);
}
} }
up_read(&xfs_mount_list_lock);
return reclaimable; return reclaimable;
} }
static struct shrinker xfs_inode_shrinker = {
.shrink = xfs_reclaim_inode_shrink,
.seeks = DEFAULT_SEEKS,
};
void __init
xfs_inode_shrinker_init(void)
{
init_rwsem(&xfs_mount_list_lock);
register_shrinker(&xfs_inode_shrinker);
}
void
xfs_inode_shrinker_destroy(void)
{
ASSERT(list_empty(&xfs_mount_list));
unregister_shrinker(&xfs_inode_shrinker);
}
void void
xfs_inode_shrinker_register( xfs_inode_shrinker_register(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
down_write(&xfs_mount_list_lock); mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
list_add_tail(&mp->m_mplist, &xfs_mount_list); mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
up_write(&xfs_mount_list_lock); register_shrinker(&mp->m_inode_shrink);
} }
void void
xfs_inode_shrinker_unregister( xfs_inode_shrinker_unregister(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
down_write(&xfs_mount_list_lock); unregister_shrinker(&mp->m_inode_shrink);
list_del(&mp->m_mplist);
up_write(&xfs_mount_list_lock);
} }
...@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp, ...@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
int flags, int tag, int write_lock, int *nr_to_scan); int flags, int tag, int write_lock, int *nr_to_scan);
void xfs_inode_shrinker_init(void);
void xfs_inode_shrinker_destroy(void);
void xfs_inode_shrinker_register(struct xfs_mount *mp); void xfs_inode_shrinker_register(struct xfs_mount *mp);
void xfs_inode_shrinker_unregister(struct xfs_mount *mp); void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
......
...@@ -259,7 +259,7 @@ typedef struct xfs_mount { ...@@ -259,7 +259,7 @@ typedef struct xfs_mount {
wait_queue_head_t m_wait_single_sync_task; wait_queue_head_t m_wait_single_sync_task;
__int64_t m_update_flags; /* sb flags we need to update __int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */ on the next remount,rw */
struct list_head m_mplist; /* inode shrinker mount list */ struct shrinker m_inode_shrink; /* inode reclaim shrinker */
} xfs_mount_t; } xfs_mount_t;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment