Commit da08e9b7 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/shmem: convert shmem_swapin_page() to shmem_swapin_folio()

shmem_swapin_page() only brings in order-0 pages, which are folios
by definition.

Link: https://lkml.kernel.org/r/20220504182857.4013401-24-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b1d0ec3a
...@@ -981,10 +981,10 @@ static inline void arch_swap_invalidate_area(int type) ...@@ -981,10 +981,10 @@ static inline void arch_swap_invalidate_area(int type)
} }
#define __HAVE_ARCH_SWAP_RESTORE #define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct page *page) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{ {
if (system_supports_mte() && mte_restore_tags(entry, page)) if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
set_bit(PG_mte_tagged, &page->flags); set_bit(PG_mte_tagged, &folio->flags);
} }
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
......
...@@ -758,7 +758,7 @@ static inline void arch_swap_invalidate_area(int type) ...@@ -758,7 +758,7 @@ static inline void arch_swap_invalidate_area(int type)
#endif #endif
#ifndef __HAVE_ARCH_SWAP_RESTORE #ifndef __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct page *page) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{ {
} }
#endif #endif
......
...@@ -135,8 +135,8 @@ static unsigned long shmem_default_max_inodes(void) ...@@ -135,8 +135,8 @@ static unsigned long shmem_default_max_inodes(void)
} }
#endif #endif
static int shmem_swapin_page(struct inode *inode, pgoff_t index, static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, struct folio **foliop, enum sgp_type sgp,
gfp_t gfp, struct vm_area_struct *vma, gfp_t gfp, struct vm_area_struct *vma,
vm_fault_t *fault_type); vm_fault_t *fault_type);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
...@@ -1159,69 +1159,64 @@ static void shmem_evict_inode(struct inode *inode) ...@@ -1159,69 +1159,64 @@ static void shmem_evict_inode(struct inode *inode)
} }
static int shmem_find_swap_entries(struct address_space *mapping, static int shmem_find_swap_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries, pgoff_t start, struct folio_batch *fbatch,
struct page **entries, pgoff_t *indices, pgoff_t *indices, unsigned int type)
unsigned int type)
{ {
XA_STATE(xas, &mapping->i_pages, start); XA_STATE(xas, &mapping->i_pages, start);
struct page *page; struct folio *folio;
swp_entry_t entry; swp_entry_t entry;
unsigned int ret = 0; unsigned int ret = 0;
if (!nr_entries)
return 0;
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, ULONG_MAX) { xas_for_each(&xas, folio, ULONG_MAX) {
if (xas_retry(&xas, page)) if (xas_retry(&xas, folio))
continue; continue;
if (!xa_is_value(page)) if (!xa_is_value(folio))
continue; continue;
entry = radix_to_swp_entry(page); entry = radix_to_swp_entry(folio);
if (swp_type(entry) != type) if (swp_type(entry) != type)
continue; continue;
indices[ret] = xas.xa_index; indices[ret] = xas.xa_index;
entries[ret] = page; if (!folio_batch_add(fbatch, folio))
break;
if (need_resched()) { if (need_resched()) {
xas_pause(&xas); xas_pause(&xas);
cond_resched_rcu(); cond_resched_rcu();
} }
if (++ret == nr_entries)
break;
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return xas.xa_index;
} }
/* /*
* Move the swapped pages for an inode to page cache. Returns the count * Move the swapped pages for an inode to page cache. Returns the count
* of pages swapped in, or the error in case of failure. * of pages swapped in, or the error in case of failure.
*/ */
static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, static int shmem_unuse_swap_entries(struct inode *inode,
pgoff_t *indices) struct folio_batch *fbatch, pgoff_t *indices)
{ {
int i = 0; int i = 0;
int ret = 0; int ret = 0;
int error = 0; int error = 0;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
for (i = 0; i < pvec.nr; i++) { for (i = 0; i < folio_batch_count(fbatch); i++) {
struct page *page = pvec.pages[i]; struct folio *folio = fbatch->folios[i];
if (!xa_is_value(page)) if (!xa_is_value(folio))
continue; continue;
error = shmem_swapin_page(inode, indices[i], error = shmem_swapin_folio(inode, indices[i],
&page, SGP_CACHE, &folio, SGP_CACHE,
mapping_gfp_mask(mapping), mapping_gfp_mask(mapping),
NULL, NULL); NULL, NULL);
if (error == 0) { if (error == 0) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
ret++; ret++;
} }
if (error == -ENOMEM) if (error == -ENOMEM)
...@@ -1238,26 +1233,23 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type) ...@@ -1238,26 +1233,23 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
pgoff_t start = 0; pgoff_t start = 0;
struct pagevec pvec; struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
int ret = 0; int ret = 0;
pagevec_init(&pvec);
do { do {
unsigned int nr_entries = PAGEVEC_SIZE; folio_batch_init(&fbatch);
shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, if (folio_batch_count(&fbatch) == 0) {
pvec.pages, indices, type);
if (pvec.nr == 0) {
ret = 0; ret = 0;
break; break;
} }
ret = shmem_unuse_swap_entries(inode, pvec, indices); ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
if (ret < 0) if (ret < 0)
break; break;
start = indices[pvec.nr - 1]; start = indices[folio_batch_count(&fbatch) - 1];
} while (true); } while (true);
return ret; return ret;
...@@ -1687,22 +1679,22 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1687,22 +1679,22 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
* Returns 0 and the page in pagep if success. On failure, returns the * Returns 0 and the page in pagep if success. On failure, returns the
* error code and NULL in *pagep. * error code and NULL in *pagep.
*/ */
static int shmem_swapin_page(struct inode *inode, pgoff_t index, static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, struct folio **foliop, enum sgp_type sgp,
gfp_t gfp, struct vm_area_struct *vma, gfp_t gfp, struct vm_area_struct *vma,
vm_fault_t *fault_type) vm_fault_t *fault_type)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
struct page *page = NULL; struct page *page;
struct folio *folio; struct folio *folio = NULL;
swp_entry_t swap; swp_entry_t swap;
int error; int error;
VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
swap = radix_to_swp_entry(*pagep); swap = radix_to_swp_entry(*foliop);
*pagep = NULL; *foliop = NULL;
/* Look it up and read it in.. */ /* Look it up and read it in.. */
page = lookup_swap_cache(swap, NULL, 0); page = lookup_swap_cache(swap, NULL, 0);
...@@ -1720,27 +1712,28 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, ...@@ -1720,27 +1712,28 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
goto failed; goto failed;
} }
} }
folio = page_folio(page);
/* We have to do this with page locked to prevent races */ /* We have to do this with page locked to prevent races */
lock_page(page); folio_lock(folio);
if (!PageSwapCache(page) || page_private(page) != swap.val || if (!folio_test_swapcache(folio) ||
folio_swap_entry(folio).val != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) { !shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST; error = -EEXIST;
goto unlock; goto unlock;
} }
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
error = -EIO; error = -EIO;
goto failed; goto failed;
} }
wait_on_page_writeback(page); folio_wait_writeback(folio);
/* /*
* Some architectures may have to restore extra metadata to the * Some architectures may have to restore extra metadata to the
* physical page after reading from swap. * folio after reading from swap.
*/ */
arch_swap_restore(swap, page); arch_swap_restore(swap, folio);
folio = page_folio(page);
if (shmem_should_replace_folio(folio, gfp)) { if (shmem_should_replace_folio(folio, gfp)) {
error = shmem_replace_page(&page, gfp, info, index); error = shmem_replace_page(&page, gfp, info, index);
if (error) if (error)
...@@ -1759,21 +1752,21 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, ...@@ -1759,21 +1752,21 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
mark_page_accessed(page); folio_mark_accessed(folio);
delete_from_swap_cache(page); delete_from_swap_cache(&folio->page);
set_page_dirty(page); folio_mark_dirty(folio);
swap_free(swap); swap_free(swap);
*pagep = page; *foliop = folio;
return 0; return 0;
failed: failed:
if (!shmem_confirm_swap(mapping, index, swap)) if (!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST; error = -EEXIST;
unlock: unlock:
if (page) { if (folio) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} }
return error; return error;
...@@ -1827,13 +1820,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1827,13 +1820,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
} }
if (xa_is_value(folio)) { if (xa_is_value(folio)) {
struct page *page = &folio->page; error = shmem_swapin_folio(inode, index, &folio,
error = shmem_swapin_page(inode, index, &page,
sgp, gfp, vma, fault_type); sgp, gfp, vma, fault_type);
if (error == -EEXIST) if (error == -EEXIST)
goto repeat; goto repeat;
*pagep = page; *pagep = &folio->page;
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment