Commit 16e4bce6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'folio-5.19b' of git://git.infradead.org/users/willy/pagecache

Pull pagecache fixes from Matthew Wilcox:
 "Four folio-related fixes for 5.19:

   - Mark a folio accessed at the right time (Yu Kuai)

   - Fix a race for folios being replaced in the middle of a read (Brian
     Foster)

   - Clear folio->private in more places (Xiubo Li)

   - Take the invalidate_lock in page_cache_ra_order() (Alistair Popple)"

* tag 'folio-5.19b' of git://git.infradead.org/users/willy/pagecache:
  filemap: Fix serialization adding transparent huge pages to page cache
  mm: Clear page->private when splitting or migrating a page
  filemap: Handle sibling entries in filemap_get_read_batch()
  filemap: Correct the conditions for marking a folio as accessed
parents 599d1691 00fa15e0
...@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping, ...@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping,
continue; continue;
if (xas.xa_index > max || xa_is_value(folio)) if (xas.xa_index > max || xa_is_value(folio))
break; break;
if (xa_is_sibling(folio))
break;
if (!folio_try_get_rcu(folio)) if (!folio_try_get_rcu(folio))
goto retry; goto retry;
...@@ -2629,6 +2631,13 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter, ...@@ -2629,6 +2631,13 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
return err; return err;
} }
static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
{
unsigned int shift = folio_shift(folio);
return (pos1 >> shift == pos2 >> shift);
}
/** /**
* filemap_read - Read data from the page cache. * filemap_read - Read data from the page cache.
* @iocb: The iocb to read. * @iocb: The iocb to read.
...@@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, ...@@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
writably_mapped = mapping_writably_mapped(mapping); writably_mapped = mapping_writably_mapped(mapping);
/* /*
* When a sequential read accesses a page several times, only * When a read accesses the same folio several times, only
* mark it as accessed the first time. * mark it as accessed the first time.
*/ */
if (iocb->ki_pos >> PAGE_SHIFT != if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
ra->prev_pos >> PAGE_SHIFT) fbatch.folios[0]))
folio_mark_accessed(fbatch.folios[0]); folio_mark_accessed(fbatch.folios[0]);
for (i = 0; i < folio_batch_count(&fbatch); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
......
...@@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail, ...@@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
page_tail); page_tail);
page_tail->mapping = head->mapping; page_tail->mapping = head->mapping;
page_tail->index = head->index + tail; page_tail->index = head->index + tail;
page_tail->private = 0;
/* Page flags must be visible before we make the page non-compound. */ /* Page flags must be visible before we make the page non-compound. */
smp_wmb(); smp_wmb();
......
...@@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page, ...@@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page,
if (!newpage) if (!newpage)
return -ENOMEM; return -ENOMEM;
newpage->private = 0;
rc = __unmap_and_move(page, newpage, force, mode); rc = __unmap_and_move(page, newpage, force, mode);
if (rc == MIGRATEPAGE_SUCCESS) if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason); set_page_owner_migrate_reason(newpage, reason);
......
...@@ -510,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl, ...@@ -510,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order--; new_order--;
} }
filemap_invalidate_lock_shared(mapping);
while (index <= limit) { while (index <= limit) {
unsigned int order = new_order; unsigned int order = new_order;
...@@ -536,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl, ...@@ -536,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
} }
read_pages(ractl); read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
/* /*
* If there were already pages in the page cache, then we may have * If there were already pages in the page cache, then we may have
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment