Commit b7dd44a1 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/shmem: convert shmem_add_to_page_cache to take a folio

Shrinks shmem_add_to_page_cache() by 16 bytes.  All the callers grow,
but this is temporary as they will all be converted to folios soon.

Link: https://lkml.kernel.org/r/20220504182857.4013401-19-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 039bc124
...@@ -696,36 +696,35 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, ...@@ -696,36 +696,35 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
/* /*
* Like add_to_page_cache_locked, but error if expected item has gone. * Like add_to_page_cache_locked, but error if expected item has gone.
*/ */
static int shmem_add_to_page_cache(struct page *page, static int shmem_add_to_page_cache(struct folio *folio,
struct address_space *mapping, struct address_space *mapping,
pgoff_t index, void *expected, gfp_t gfp, pgoff_t index, void *expected, gfp_t gfp,
struct mm_struct *charge_mm) struct mm_struct *charge_mm)
{ {
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
unsigned long nr = compound_nr(page); long nr = folio_nr_pages(folio);
int error; int error;
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
VM_BUG_ON_PAGE(index != round_down(index, nr), page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page); VM_BUG_ON(expected && folio_test_large(folio));
VM_BUG_ON(expected && PageTransHuge(page));
page_ref_add(page, nr); folio_ref_add(folio, nr);
page->mapping = mapping; folio->mapping = mapping;
page->index = index; folio->index = index;
if (!PageSwapCache(page)) { if (!folio_test_swapcache(folio)) {
error = mem_cgroup_charge(page_folio(page), charge_mm, gfp); error = mem_cgroup_charge(folio, charge_mm, gfp);
if (error) { if (error) {
if (PageTransHuge(page)) { if (folio_test_pmd_mappable(folio)) {
count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE); count_vm_event(THP_FILE_FALLBACK_CHARGE);
} }
goto error; goto error;
} }
} }
cgroup_throttle_swaprate(page, gfp); folio_throttle_swaprate(folio, gfp);
do { do {
xas_lock_irq(&xas); xas_lock_irq(&xas);
...@@ -737,16 +736,16 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -737,16 +736,16 @@ static int shmem_add_to_page_cache(struct page *page,
xas_set_err(&xas, -EEXIST); xas_set_err(&xas, -EEXIST);
goto unlock; goto unlock;
} }
xas_store(&xas, page); xas_store(&xas, folio);
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
if (PageTransHuge(page)) { if (folio_test_pmd_mappable(folio)) {
count_vm_event(THP_FILE_ALLOC); count_vm_event(THP_FILE_ALLOC);
__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
} }
mapping->nrpages += nr; mapping->nrpages += nr;
__mod_lruvec_page_state(page, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
__mod_lruvec_page_state(page, NR_SHMEM, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
unlock: unlock:
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp)); } while (xas_nomem(&xas, gfp));
...@@ -758,8 +757,8 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -758,8 +757,8 @@ static int shmem_add_to_page_cache(struct page *page,
return 0; return 0;
error: error:
page->mapping = NULL; folio->mapping = NULL;
page_ref_sub(page, nr); folio_ref_sub(folio, nr);
return error; return error;
} }
...@@ -1691,7 +1690,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, ...@@ -1691,7 +1690,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
struct page *page; struct page *page = NULL;
struct folio *folio;
swp_entry_t swap; swp_entry_t swap;
int error; int error;
...@@ -1741,7 +1741,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, ...@@ -1741,7 +1741,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
goto failed; goto failed;
} }
error = shmem_add_to_page_cache(page, mapping, index, folio = page_folio(page);
error = shmem_add_to_page_cache(folio, mapping, index,
swp_to_radix_entry(swap), gfp, swp_to_radix_entry(swap), gfp,
charge_mm); charge_mm);
if (error) if (error)
...@@ -1792,6 +1793,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1792,6 +1793,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo; struct shmem_sb_info *sbinfo;
struct mm_struct *charge_mm; struct mm_struct *charge_mm;
struct folio *folio;
struct page *page; struct page *page;
pgoff_t hindex = index; pgoff_t hindex = index;
gfp_t huge_gfp; gfp_t huge_gfp;
...@@ -1906,7 +1908,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1906,7 +1908,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
__SetPageReferenced(page); __SetPageReferenced(page);
error = shmem_add_to_page_cache(page, mapping, hindex, folio = page_folio(page);
error = shmem_add_to_page_cache(folio, mapping, hindex,
NULL, gfp & GFP_RECLAIM_MASK, NULL, gfp & GFP_RECLAIM_MASK,
charge_mm); charge_mm);
if (error) if (error)
...@@ -2328,6 +2331,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2328,6 +2331,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
gfp_t gfp = mapping_gfp_mask(mapping); gfp_t gfp = mapping_gfp_mask(mapping);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
void *page_kaddr; void *page_kaddr;
struct folio *folio;
struct page *page; struct page *page;
int ret; int ret;
pgoff_t max_off; pgoff_t max_off;
...@@ -2386,7 +2390,8 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2386,7 +2390,8 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (unlikely(pgoff >= max_off)) if (unlikely(pgoff >= max_off))
goto out_release; goto out_release;
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, folio = page_folio(page);
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm); gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret) if (ret)
goto out_release; goto out_release;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment