truncate,shmem: Add truncate_inode_folio()

Convert all callers of truncate_inode_page() to call
truncate_inode_folio() instead, and move the declaration to mm/internal.h.
Move the assertion that the caller is not passing in a tail page to
generic_error_remove_page().  We can't entirely remove the struct page
from the callers yet because the page pointer in the pvec might be a
shadow/dax/swap entry instead of actually a page.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 7b774aab
...@@ -1859,7 +1859,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new); ...@@ -1859,7 +1859,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize); extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page); int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page); int invalidate_inode_page(struct page *page);
......
...@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping, ...@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices); pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
/** /**
* folio_evictable - Test whether a folio is evictable. * folio_evictable - Test whether a folio is evictable.
......
...@@ -950,7 +950,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -950,7 +950,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index += folio_nr_pages(folio) - 1; index += folio_nr_pages(folio) - 1;
if (!unfalloc || !folio_test_uptodate(folio)) if (!unfalloc || !folio_test_uptodate(folio))
truncate_inode_page(mapping, &folio->page); truncate_inode_folio(mapping, folio);
folio_unlock(folio); folio_unlock(folio);
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
...@@ -1027,7 +1027,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -1027,7 +1027,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
} }
VM_BUG_ON_PAGE(PageWriteback(page), page); VM_BUG_ON_PAGE(PageWriteback(page), page);
if (shmem_punch_compound(page, start, end)) if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page); truncate_inode_folio(mapping,
page_folio(page));
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Wipe the page and don't get stuck */ /* Wipe the page and don't get stuck */
clear_highpage(page); clear_highpage(page);
......
...@@ -218,12 +218,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) ...@@ -218,12 +218,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret; return ret;
} }
int truncate_inode_page(struct address_space *mapping, struct page *page) int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
{ {
struct folio *folio = page_folio(page); if (folio->mapping != mapping)
VM_BUG_ON_PAGE(PageTail(page), page);
if (page->mapping != mapping)
return -EIO; return -EIO;
truncate_cleanup_folio(folio); truncate_cleanup_folio(folio);
...@@ -236,6 +233,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) ...@@ -236,6 +233,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
*/ */
int generic_error_remove_page(struct address_space *mapping, struct page *page) int generic_error_remove_page(struct address_space *mapping, struct page *page)
{ {
VM_BUG_ON_PAGE(PageTail(page), page);
if (!mapping) if (!mapping)
return -EINVAL; return -EINVAL;
/* /*
...@@ -244,7 +243,7 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page) ...@@ -244,7 +243,7 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/ */
if (!S_ISREG(mapping->host->i_mode)) if (!S_ISREG(mapping->host->i_mode))
return -EIO; return -EIO;
return truncate_inode_page(mapping, page); return truncate_inode_folio(mapping, page_folio(page));
} }
EXPORT_SYMBOL(generic_error_remove_page); EXPORT_SYMBOL(generic_error_remove_page);
...@@ -395,18 +394,20 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -395,18 +394,20 @@ void truncate_inode_pages_range(struct address_space *mapping,
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
struct folio *folio;
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
if (xa_is_value(page)) if (xa_is_value(page))
continue; continue;
folio = page_folio(page);
lock_page(page); folio_lock(folio);
WARN_ON(page_to_index(page) != index); VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
wait_on_page_writeback(page); folio_wait_writeback(folio);
truncate_inode_page(mapping, page); truncate_inode_folio(mapping, folio);
unlock_page(page); folio_unlock(folio);
} }
truncate_exceptional_pvec_entries(mapping, &pvec, indices); truncate_exceptional_pvec_entries(mapping, &pvec, indices);
pagevec_release(&pvec); pagevec_release(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment