Commit b1d0ec3a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/shmem: convert shmem_getpage_gfp to use a folio

Rename shmem_alloc_and_acct_page() to shmem_alloc_and_acct_folio() and
have it return a folio, then use a folio throuughout shmem_getpage_gfp(). 
It continues to return a struct page.

Link: https://lkml.kernel.org/r/20220504182857.4013401-23-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 72827e5c
...@@ -1563,8 +1563,7 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -1563,8 +1563,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
return &shmem_alloc_folio(gfp, info, index)->page; return &shmem_alloc_folio(gfp, info, index)->page;
} }
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
struct inode *inode,
pgoff_t index, bool huge) pgoff_t index, bool huge)
{ {
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
...@@ -1586,7 +1585,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1586,7 +1585,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
if (folio) { if (folio) {
__folio_set_locked(folio); __folio_set_locked(folio);
__folio_set_swapbacked(folio); __folio_set_swapbacked(folio);
return &folio->page; return folio;
} }
err = -ENOMEM; err = -ENOMEM;
...@@ -1800,7 +1799,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1800,7 +1799,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct shmem_sb_info *sbinfo; struct shmem_sb_info *sbinfo;
struct mm_struct *charge_mm; struct mm_struct *charge_mm;
struct folio *folio; struct folio *folio;
struct page *page;
pgoff_t hindex = index; pgoff_t hindex = index;
gfp_t huge_gfp; gfp_t huge_gfp;
int error; int error;
...@@ -1818,19 +1816,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1818,19 +1816,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
sbinfo = SHMEM_SB(inode->i_sb); sbinfo = SHMEM_SB(inode->i_sb);
charge_mm = vma ? vma->vm_mm : NULL; charge_mm = vma ? vma->vm_mm : NULL;
page = pagecache_get_page(mapping, index, folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0); if (folio && vma && userfaultfd_minor(vma)) {
if (!xa_is_value(folio)) {
if (page && vma && userfaultfd_minor(vma)) { folio_unlock(folio);
if (!xa_is_value(page)) { folio_put(folio);
unlock_page(page);
put_page(page);
} }
*fault_type = handle_userfault(vmf, VM_UFFD_MINOR); *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
return 0; return 0;
} }
if (xa_is_value(page)) { if (xa_is_value(folio)) {
struct page *page = &folio->page;
error = shmem_swapin_page(inode, index, &page, error = shmem_swapin_page(inode, index, &page,
sgp, gfp, vma, fault_type); sgp, gfp, vma, fault_type);
if (error == -EEXIST) if (error == -EEXIST)
...@@ -1840,17 +1837,17 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1840,17 +1837,17 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
return error; return error;
} }
if (page) { if (folio) {
hindex = page->index; hindex = folio->index;
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
mark_page_accessed(page); folio_mark_accessed(folio);
if (PageUptodate(page)) if (folio_test_uptodate(folio))
goto out; goto out;
/* fallocated page */ /* fallocated page */
if (sgp != SGP_READ) if (sgp != SGP_READ)
goto clear; goto clear;
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} }
/* /*
...@@ -1877,17 +1874,16 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1877,17 +1874,16 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp); huge_gfp = limit_gfp_mask(huge_gfp, gfp);
page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
if (IS_ERR(page)) { if (IS_ERR(folio)) {
alloc_nohuge: alloc_nohuge:
page = shmem_alloc_and_acct_page(gfp, inode, folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
index, false);
} }
if (IS_ERR(page)) { if (IS_ERR(folio)) {
int retry = 5; int retry = 5;
error = PTR_ERR(page); error = PTR_ERR(folio);
page = NULL; folio = NULL;
if (error != -ENOSPC) if (error != -ENOSPC)
goto unlock; goto unlock;
/* /*
...@@ -1906,30 +1902,26 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1906,30 +1902,26 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
goto unlock; goto unlock;
} }
if (PageTransHuge(page)) hindex = round_down(index, folio_nr_pages(folio));
hindex = round_down(index, HPAGE_PMD_NR);
else
hindex = index;
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
__SetPageReferenced(page); __folio_set_referenced(folio);
folio = page_folio(page);
error = shmem_add_to_page_cache(folio, mapping, hindex, error = shmem_add_to_page_cache(folio, mapping, hindex,
NULL, gfp & GFP_RECLAIM_MASK, NULL, gfp & GFP_RECLAIM_MASK,
charge_mm); charge_mm);
if (error) if (error)
goto unacct; goto unacct;
lru_cache_add(page); folio_add_lru(folio);
spin_lock_irq(&info->lock); spin_lock_irq(&info->lock);
info->alloced += compound_nr(page); info->alloced += folio_nr_pages(folio);
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); inode->i_blocks += BLOCKS_PER_PAGE << folio_order(folio);
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
alloced = true; alloced = true;
if (PageTransHuge(page) && if (folio_test_pmd_mappable(folio) &&
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
hindex + HPAGE_PMD_NR - 1) { hindex + HPAGE_PMD_NR - 1) {
/* /*
...@@ -1960,22 +1952,21 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1960,22 +1952,21 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
* but SGP_FALLOC on a page fallocated earlier must initialize * but SGP_FALLOC on a page fallocated earlier must initialize
* it now, lest undo on failure cancel our earlier guarantee. * it now, lest undo on failure cancel our earlier guarantee.
*/ */
if (sgp != SGP_WRITE && !PageUptodate(page)) { if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
int i; long i, n = folio_nr_pages(folio);
for (i = 0; i < compound_nr(page); i++) { for (i = 0; i < n; i++)
clear_highpage(page + i); clear_highpage(folio_page(folio, i));
flush_dcache_page(page + i); flush_dcache_folio(folio);
} folio_mark_uptodate(folio);
SetPageUptodate(page);
} }
/* Perhaps the file has been truncated since we checked */ /* Perhaps the file has been truncated since we checked */
if (sgp <= SGP_CACHE && if (sgp <= SGP_CACHE &&
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
if (alloced) { if (alloced) {
ClearPageDirty(page); folio_clear_dirty(folio);
delete_from_page_cache(page); filemap_remove_folio(folio);
spin_lock_irq(&info->lock); spin_lock_irq(&info->lock);
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
...@@ -1984,24 +1975,24 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1984,24 +1975,24 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
goto unlock; goto unlock;
} }
out: out:
*pagep = page + index - hindex; *pagep = folio_page(folio, index - hindex);
return 0; return 0;
/* /*
* Error recovery. * Error recovery.
*/ */
unacct: unacct:
shmem_inode_unacct_blocks(inode, compound_nr(page)); shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
if (PageTransHuge(page)) { if (folio_test_large(folio)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
goto alloc_nohuge; goto alloc_nohuge;
} }
unlock: unlock:
if (page) { if (folio) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} }
if (error == -ENOSPC && !once++) { if (error == -ENOSPC && !once++) {
spin_lock_irq(&info->lock); spin_lock_irq(&info->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment