truncate,shmem: Handle truncates that split large folios

Handle folio splitting in the parts of the truncation functions which
already handle partial pages.  Factor all that code out into a new
function called truncate_inode_partial_folio().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent f6357c3a
...@@ -98,6 +98,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start, ...@@ -98,6 +98,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio); void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
/** /**
* folio_evictable - Test whether a folio is evictable. * folio_evictable - Test whether a folio is evictable.
......
...@@ -880,30 +880,26 @@ void shmem_unlock_mapping(struct address_space *mapping) ...@@ -880,30 +880,26 @@ void shmem_unlock_mapping(struct address_space *mapping)
} }
} }
/* static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* Check whether a hole-punch or truncation needs to split a huge page,
* returning true if no split was required, or the split has been successful.
*
* Eviction (or truncation to 0 size) should never need to split a huge page;
* but in rare cases might do so, if shmem_undo_range() failed to trylock on
* head, and then succeeded to trylock on tail.
*
* A split can only succeed when there are no additional references on the
* huge page: so the split below relies upon find_get_entries() having stopped
* when it found a subpage of the huge page, without getting further references.
*/
static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
{ {
if (!PageTransCompound(page)) struct folio *folio;
return true; struct page *page;
/* Just proceed to delete a huge page wholly within the range punched */
if (PageHead(page) &&
page->index >= start && page->index + HPAGE_PMD_NR <= end)
return true;
/* Try to split huge page, so we can truly punch the hole or truncate */ /*
return split_huge_page(page) >= 0; * At first avoid shmem_getpage(,,,SGP_READ): that fails
* beyond i_size, and reports fallocated pages as holes.
*/
folio = __filemap_get_folio(inode->i_mapping, index,
FGP_ENTRY | FGP_LOCK, 0);
if (!xa_is_value(folio))
return folio;
/*
* But read a page back from swap if any of it is within i_size
* (although in some cases this is just a waste of time).
*/
page = NULL;
shmem_getpage(inode, index, &page, SGP_READ);
return page ? page_folio(page) : NULL;
} }
/* /*
...@@ -917,10 +913,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -917,10 +913,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_SHIFT; pgoff_t end = (lend + 1) >> PAGE_SHIFT;
unsigned int partial_start = lstart & (PAGE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
struct folio_batch fbatch; struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct folio *folio;
bool same_folio;
long nr_swaps_freed = 0; long nr_swaps_freed = 0;
pgoff_t index; pgoff_t index;
int i; int i;
...@@ -936,7 +932,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -936,7 +932,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&fbatch, indices)) { &fbatch, indices)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i]; folio = fbatch.folios[i];
index = indices[i]; index = indices[i];
...@@ -959,33 +955,30 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -959,33 +955,30 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index++; index++;
} }
if (partial_start) { same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
struct page *page = NULL; folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
shmem_getpage(inode, start - 1, &page, SGP_READ); if (folio) {
if (page) { same_folio = lend < folio_pos(folio) + folio_size(folio);
unsigned int top = PAGE_SIZE; folio_mark_dirty(folio);
if (start > end) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
top = partial_end; start = folio->index + folio_nr_pages(folio);
partial_end = 0; if (same_folio)
} end = folio->index;
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
put_page(page);
}
} }
if (partial_end) { folio_unlock(folio);
struct page *page = NULL; folio_put(folio);
shmem_getpage(inode, end, &page, SGP_READ); folio = NULL;
if (page) {
zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
put_page(page);
} }
if (!same_folio)
folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
if (folio) {
folio_mark_dirty(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend))
end = folio->index;
folio_unlock(folio);
folio_put(folio);
} }
if (start >= end)
return;
index = start; index = start;
while (index < end) { while (index < end) {
...@@ -1001,7 +994,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -1001,7 +994,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
continue; continue;
} }
for (i = 0; i < folio_batch_count(&fbatch); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i]; folio = fbatch.folios[i];
index = indices[i]; index = indices[i];
if (xa_is_value(folio)) { if (xa_is_value(folio)) {
...@@ -1019,8 +1012,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -1019,8 +1012,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
folio_lock(folio); folio_lock(folio);
if (!unfalloc || !folio_test_uptodate(folio)) { if (!unfalloc || !folio_test_uptodate(folio)) {
struct page *page = folio_file_page(folio,
index);
if (folio_mapping(folio) != mapping) { if (folio_mapping(folio) != mapping) {
/* Page was replaced by swap: retry */ /* Page was replaced by swap: retry */
folio_unlock(folio); folio_unlock(folio);
...@@ -1029,18 +1020,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -1029,18 +1020,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
} }
VM_BUG_ON_FOLIO(folio_test_writeback(folio), VM_BUG_ON_FOLIO(folio_test_writeback(folio),
folio); folio);
if (shmem_punch_compound(page, start, end))
truncate_inode_folio(mapping, folio); truncate_inode_folio(mapping, folio);
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Wipe the page and don't get stuck */
clear_highpage(page);
flush_dcache_page(page);
folio_mark_dirty(folio);
if (index <
round_up(start, HPAGE_PMD_NR))
start = index + 1;
}
} }
index = folio->index + folio_nr_pages(folio) - 1;
folio_unlock(folio); folio_unlock(folio);
} }
folio_batch_remove_exceptionals(&fbatch); folio_batch_remove_exceptionals(&fbatch);
......
...@@ -228,6 +228,58 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) ...@@ -228,6 +228,58 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
return 0; return 0;
} }
/*
* Handle partial folios. The folio may be entirely within the
* range if a split has raced with us. If not, we zero the part of the
* folio that's within the [start, end] range, and then split the folio if
* it's large. split_page_range() will discard pages which now lie beyond
* i_size, and we rely on the caller to discard pages which lie within a
* newly created hole.
*
* Returns false if splitting failed so the caller can avoid
* discarding the entire folio which is stubbornly unsplit.
*/
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
{
loff_t pos = folio_pos(folio);
unsigned int offset, length;
if (pos < start)
offset = start - pos;
else
offset = 0;
length = folio_size(folio);
if (pos + length <= (u64)end)
length = length - offset;
else
length = end + 1 - pos - offset;
folio_wait_writeback(folio);
if (length == folio_size(folio)) {
truncate_inode_folio(folio->mapping, folio);
return true;
}
/*
* We may be zeroing pages we're about to discard, but it avoids
* doing a complex calculation here, and then doing the zeroing
* anyway if the page split fails.
*/
folio_zero_range(folio, offset, length);
cleancache_invalidate_page(folio->mapping, &folio->page);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, offset, length);
if (!folio_test_large(folio))
return true;
if (split_huge_page(&folio->page) == 0)
return true;
if (folio_test_dirty(folio))
return false;
truncate_inode_folio(folio->mapping, folio);
return true;
}
/* /*
* Used to get rid of pages on hardware memory corruption. * Used to get rid of pages on hardware memory corruption.
*/ */
...@@ -294,20 +346,16 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -294,20 +346,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
{ {
pgoff_t start; /* inclusive */ pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */ pgoff_t end; /* exclusive */
unsigned int partial_start; /* inclusive */
unsigned int partial_end; /* exclusive */
struct folio_batch fbatch; struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index; pgoff_t index;
int i; int i;
struct folio *folio;
bool same_folio;
if (mapping_empty(mapping)) if (mapping_empty(mapping))
goto out; goto out;
/* Offsets within partial pages */
partial_start = lstart & (PAGE_SIZE - 1);
partial_end = (lend + 1) & (PAGE_SIZE - 1);
/* /*
* 'start' and 'end' always covers the range of pages to be fully * 'start' and 'end' always covers the range of pages to be fully
* truncated. Partial pages are covered with 'partial_start' at the * truncated. Partial pages are covered with 'partial_start' at the
...@@ -340,47 +388,32 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -340,47 +388,32 @@ void truncate_inode_pages_range(struct address_space *mapping,
cond_resched(); cond_resched();
} }
if (partial_start) { same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
struct page *page = find_lock_page(mapping, start - 1); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
if (page) { if (folio) {
unsigned int top = PAGE_SIZE; same_folio = lend < folio_pos(folio) + folio_size(folio);
if (start > end) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
/* Truncation within a single page */ start = folio->index + folio_nr_pages(folio);
top = partial_end; if (same_folio)
partial_end = 0; end = folio->index;
}
wait_on_page_writeback(page);
zero_user_segment(page, partial_start, top);
cleancache_invalidate_page(mapping, page);
if (page_has_private(page))
do_invalidatepage(page, partial_start,
top - partial_start);
unlock_page(page);
put_page(page);
} }
folio_unlock(folio);
folio_put(folio);
folio = NULL;
} }
if (partial_end) {
struct page *page = find_lock_page(mapping, end); if (!same_folio)
if (page) { folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
wait_on_page_writeback(page); FGP_LOCK, 0);
zero_user_segment(page, 0, partial_end); if (folio) {
cleancache_invalidate_page(mapping, page); if (!truncate_inode_partial_folio(folio, lstart, lend))
if (page_has_private(page)) end = folio->index;
do_invalidatepage(page, 0, folio_unlock(folio);
partial_end); folio_put(folio);
unlock_page(page);
put_page(page);
}
} }
/*
* If the truncation happened within a single page no pages
* will be released, just zeroed, so we can bail out now.
*/
if (start >= end)
goto out;
index = start; index = start;
for ( ; ; ) { while (index < end) {
cond_resched(); cond_resched();
if (!find_get_entries(mapping, index, end - 1, &fbatch, if (!find_get_entries(mapping, index, end - 1, &fbatch,
indices)) { indices)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment