truncate,shmem: Add truncate_inode_folio()

Convert all callers of truncate_inode_page() to call
truncate_inode_folio() instead, and move the declaration to mm/internal.h.
Move the assertion that the caller is not passing in a tail page to
generic_error_remove_page().  We can't entirely remove the struct page
from the callers yet because the page pointer in the pvec might be a
shadow/dax/swap entry instead of actually a page.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 7b774aab
......@@ -1859,7 +1859,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
......
......@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
/**
* folio_evictable - Test whether a folio is evictable.
......
......@@ -950,7 +950,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index += folio_nr_pages(folio) - 1;
if (!unfalloc || !folio_test_uptodate(folio))
truncate_inode_page(mapping, &folio->page);
truncate_inode_folio(mapping, folio);
folio_unlock(folio);
}
pagevec_remove_exceptionals(&pvec);
......@@ -1027,7 +1027,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
}
VM_BUG_ON_PAGE(PageWriteback(page), page);
if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page);
truncate_inode_folio(mapping,
page_folio(page));
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Wipe the page and don't get stuck */
clear_highpage(page);
......
......@@ -218,12 +218,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret;
}
int truncate_inode_page(struct address_space *mapping, struct page *page)
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
{
struct folio *folio = page_folio(page);
VM_BUG_ON_PAGE(PageTail(page), page);
if (page->mapping != mapping)
if (folio->mapping != mapping)
return -EIO;
truncate_cleanup_folio(folio);
......@@ -236,6 +233,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
*/
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
if (!mapping)
return -EINVAL;
/*
......@@ -244,7 +243,7 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/
if (!S_ISREG(mapping->host->i_mode))
return -EIO;
return truncate_inode_page(mapping, page);
return truncate_inode_folio(mapping, page_folio(page));
}
EXPORT_SYMBOL(generic_error_remove_page);
......@@ -395,18 +394,20 @@ void truncate_inode_pages_range(struct address_space *mapping,
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
struct folio *folio;
/* We rely upon deletion not changing page->index */
index = indices[i];
if (xa_is_value(page))
continue;
folio = page_folio(page);
lock_page(page);
WARN_ON(page_to_index(page) != index);
wait_on_page_writeback(page);
truncate_inode_page(mapping, page);
unlock_page(page);
folio_lock(folio);
VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
folio_wait_writeback(folio);
truncate_inode_folio(mapping, folio);
folio_unlock(folio);
}
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
pagevec_release(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment