Commit 15444054 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

mm: shmem: extend shmem_unused_huge_shrink() to all sizes

Although shmem_get_folio_gfp() is correctly putting inodes on the
shrinklist according to the folio size, shmem_unused_huge_shrink() was
still dealing with that shrinklist in terms of HPAGE_PMD_SIZE.

Generalize that; and to handle the mixture of sizes more sensibly,
shmem_alloc_and_add_folio() give it a number of pages to be freed
(approximate: no need to minimize that with an exact calculation) instead
of a number of inodes to split.

[akpm@linux-foundation.org: comment tweak, per David]
Link: https://lkml.kernel.org/r/d8c40850-6774-7a93-1e2c-8d941683b260@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent de5b8526
......@@ -636,15 +636,14 @@ static const char *shmem_format_huge(int huge)
#endif
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
struct shrink_control *sc, unsigned long nr_to_free)
{
LIST_HEAD(list), *pos, *next;
LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct folio *folio;
unsigned long batch = sc ? sc->nr_to_scan : 128;
int split = 0;
unsigned long split = 0, freed = 0;
if (list_empty(&sbinfo->shrinklist))
return SHRINK_STOP;
......@@ -662,13 +661,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
goto next;
}
/* Check if there's anything to gain */
if (round_up(inode->i_size, PAGE_SIZE) ==
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
list_move(&info->shrinklist, &to_remove);
goto next;
}
list_move(&info->shrinklist, &list);
next:
sbinfo->shrinklist_len--;
......@@ -677,34 +669,36 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
spin_unlock(&sbinfo->shrinklist_lock);
list_for_each_safe(pos, next, &to_remove) {
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
list_del_init(&info->shrinklist);
iput(inode);
}
list_for_each_safe(pos, next, &list) {
pgoff_t next, end;
loff_t i_size;
int ret;
pgoff_t index;
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
if (nr_to_split && split >= nr_to_split)
if (nr_to_free && freed >= nr_to_free)
goto move_back;
index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
folio = filemap_get_folio(inode->i_mapping, index);
if (IS_ERR(folio))
i_size = i_size_read(inode);
folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
if (!folio || xa_is_value(folio))
goto drop;
/* No huge page at the end of the file: nothing to split */
/* No large folio at the end of the file: nothing to split */
if (!folio_test_large(folio)) {
folio_put(folio);
goto drop;
}
/* Check if there is anything to gain from splitting */
next = folio_next_index(folio);
end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
if (end <= folio->index || end >= next) {
folio_put(folio);
goto drop;
}
/*
* Move the inode on the list back to shrinklist if we failed
* to lock the page at this time.
......@@ -725,6 +719,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
if (ret)
goto move_back;
freed += next - end;
split++;
drop:
list_del_init(&info->shrinklist);
......@@ -769,7 +764,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
struct shrink_control *sc, unsigned long nr_to_free)
{
return 0;
}
......@@ -1851,7 +1846,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
* Try to reclaim some space by splitting a few
* large folios beyond i_size on the filesystem.
*/
shmem_unused_huge_shrink(sbinfo, NULL, 2);
shmem_unused_huge_shrink(sbinfo, NULL, pages);
/*
* And do a shmem_recalc_inode() to account for freed pages:
* except our folio is there in cache, so not quite balanced.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment