Commit 05624571 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/shmem: use a folio in shmem_unused_huge_shrink

When calling split_huge_page() we usually have to find the precise page,
but that's not necessary here because we only need to unlock and put the
folio afterwards.  Saves 231 bytes of text (20% of this function).

Link: https://lkml.kernel.org/r/20220504182857.4013401-17-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c28a0e96
...@@ -554,7 +554,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, ...@@ -554,7 +554,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
LIST_HEAD(to_remove); LIST_HEAD(to_remove);
struct inode *inode; struct inode *inode;
struct shmem_inode_info *info; struct shmem_inode_info *info;
struct page *page; struct folio *folio;
unsigned long batch = sc ? sc->nr_to_scan : 128; unsigned long batch = sc ? sc->nr_to_scan : 128;
int split = 0; int split = 0;
...@@ -598,6 +598,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, ...@@ -598,6 +598,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
list_for_each_safe(pos, next, &list) { list_for_each_safe(pos, next, &list) {
int ret; int ret;
pgoff_t index;
info = list_entry(pos, struct shmem_inode_info, shrinklist); info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode; inode = &info->vfs_inode;
...@@ -605,14 +606,14 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, ...@@ -605,14 +606,14 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
if (nr_to_split && split >= nr_to_split) if (nr_to_split && split >= nr_to_split)
goto move_back; goto move_back;
page = find_get_page(inode->i_mapping, index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); folio = filemap_get_folio(inode->i_mapping, index);
if (!page) if (!folio)
goto drop; goto drop;
/* No huge page at the end of the file: nothing to split */ /* No huge page at the end of the file: nothing to split */
if (!PageTransHuge(page)) { if (!folio_test_large(folio)) {
put_page(page); folio_put(folio);
goto drop; goto drop;
} }
...@@ -623,14 +624,14 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, ...@@ -623,14 +624,14 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
* Waiting for the lock may lead to deadlock in the * Waiting for the lock may lead to deadlock in the
* reclaim path. * reclaim path.
*/ */
if (!trylock_page(page)) { if (!folio_trylock(folio)) {
put_page(page); folio_put(folio);
goto move_back; goto move_back;
} }
ret = split_huge_page(page); ret = split_huge_page(&folio->page);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
/* If split failed move the inode on the list back to shrinklist */ /* If split failed move the inode on the list back to shrinklist */
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment