Commit 139b6a6f authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: filemap: update find_get_pages_tag() to deal with shadow entries

Dave Jones reports the following crash when find_get_pages_tag() runs
into an exceptional entry:

  kernel BUG at mm/filemap.c:1347!
  RIP: find_get_pages_tag+0x1cb/0x220
  Call Trace:
    find_get_pages_tag+0x36/0x220
    pagevec_lookup_tag+0x21/0x30
    filemap_fdatawait_range+0xbe/0x1e0
    filemap_fdatawait+0x27/0x30
    sync_inodes_sb+0x204/0x2a0
    sync_inodes_one_sb+0x19/0x20
    iterate_supers+0xb2/0x110
    sys_sync+0x44/0xb0
    ia32_do_call+0x13/0x13

  1343                         /*
  1344                          * This function is never used on a shmem/tmpfs
  1345                          * mapping, so a swap entry won't be found here.
  1346                          */
  1347                         BUG();

After commit 0cd6144a ("mm + fs: prepare for non-page entries in
page cache radix trees") this comment and BUG() are out of date because
exceptional entries can now appear in all mappings - as shadows of
recently evicted pages.

However, as Hugh Dickins notes,

  "it is truly surprising for a PAGECACHE_TAG_WRITEBACK (and probably
   any other PAGECACHE_TAG_*) to appear on an exceptional entry.

   I expect it comes down to an occasional race in RCU lookup of the
   radix_tree: lacking absolute synchronization, we might sometimes
   catch an exceptional entry, with the tag which really belongs with
   the unexceptional entry which was there an instant before."

And indeed, not only is the tree walk lockless, the tags are also read
in chunks, one radix tree node at a time.  There is plenty of time for
page reclaim to swoop in and replace a page that was already looked up
as tagged with a shadow entry.

Remove the BUG() and update the comment.  While reviewing all other
lookup sites for whether they properly deal with shadow entries of
evicted pages, update all the comments and fix memcg file charge moving
to not miss shmem/tmpfs swapcache pages.

Fixes: 0cd6144a ("mm + fs: prepare for non-page entries in page cache radix trees")
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reported-by: default avatarDave Jones <davej@redhat.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49e068f0
...@@ -906,8 +906,8 @@ EXPORT_SYMBOL(page_cache_prev_hole); ...@@ -906,8 +906,8 @@ EXPORT_SYMBOL(page_cache_prev_hole);
* Looks up the page cache slot at @mapping & @offset. If there is a * Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount. * page cache page, it is returned with an increased refcount.
* *
* If the slot holds a shadow entry of a previously evicted page, it * If the slot holds a shadow entry of a previously evicted page, or a
* is returned. * swap entry from shmem/tmpfs, it is returned.
* *
* Otherwise, %NULL is returned. * Otherwise, %NULL is returned.
*/ */
...@@ -928,9 +928,9 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) ...@@ -928,9 +928,9 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
if (radix_tree_deref_retry(page)) if (radix_tree_deref_retry(page))
goto repeat; goto repeat;
/* /*
* Otherwise, shmem/tmpfs must be storing a swap entry * A shadow entry of a recently evicted page,
* here as an exceptional entry: so return it without * or a swap entry from shmem/tmpfs. Return
* attempting to raise page count. * it without attempting to raise page count.
*/ */
goto out; goto out;
} }
...@@ -983,8 +983,8 @@ EXPORT_SYMBOL(find_get_page); ...@@ -983,8 +983,8 @@ EXPORT_SYMBOL(find_get_page);
* page cache page, it is returned locked and with an increased * page cache page, it is returned locked and with an increased
* refcount. * refcount.
* *
* If the slot holds a shadow entry of a previously evicted page, it * If the slot holds a shadow entry of a previously evicted page, or a
* is returned. * swap entry from shmem/tmpfs, it is returned.
* *
* Otherwise, %NULL is returned. * Otherwise, %NULL is returned.
* *
...@@ -1099,8 +1099,8 @@ EXPORT_SYMBOL(find_or_create_page); ...@@ -1099,8 +1099,8 @@ EXPORT_SYMBOL(find_or_create_page);
* with ascending indexes. There may be holes in the indices due to * with ascending indexes. There may be holes in the indices due to
* not-present pages. * not-present pages.
* *
* Any shadow entries of evicted pages are included in the returned * Any shadow entries of evicted pages, or swap entries from
* array. * shmem/tmpfs, are included in the returned array.
* *
* find_get_entries() returns the number of pages and shadow entries * find_get_entries() returns the number of pages and shadow entries
* which were found. * which were found.
...@@ -1128,9 +1128,9 @@ unsigned find_get_entries(struct address_space *mapping, ...@@ -1128,9 +1128,9 @@ unsigned find_get_entries(struct address_space *mapping,
if (radix_tree_deref_retry(page)) if (radix_tree_deref_retry(page))
goto restart; goto restart;
/* /*
* Otherwise, we must be storing a swap entry * A shadow entry of a recently evicted page,
* here as an exceptional entry: so return it * or a swap entry from shmem/tmpfs. Return
* without attempting to raise page count. * it without attempting to raise page count.
*/ */
goto export; goto export;
} }
...@@ -1198,9 +1198,9 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, ...@@ -1198,9 +1198,9 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
goto restart; goto restart;
} }
/* /*
* Otherwise, shmem/tmpfs must be storing a swap entry * A shadow entry of a recently evicted page,
* here as an exceptional entry: so skip over it - * or a swap entry from shmem/tmpfs. Skip
* we only reach this from invalidate_mapping_pages(). * over it.
*/ */
continue; continue;
} }
...@@ -1265,9 +1265,9 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, ...@@ -1265,9 +1265,9 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
goto restart; goto restart;
} }
/* /*
* Otherwise, shmem/tmpfs must be storing a swap entry * A shadow entry of a recently evicted page,
* here as an exceptional entry: so stop looking for * or a swap entry from shmem/tmpfs. Stop
* contiguous pages. * looking for contiguous pages.
*/ */
break; break;
} }
...@@ -1341,10 +1341,17 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, ...@@ -1341,10 +1341,17 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
goto restart; goto restart;
} }
/* /*
* This function is never used on a shmem/tmpfs * A shadow entry of a recently evicted page.
* mapping, so a swap entry won't be found here. *
* Those entries should never be tagged, but
* this tree walk is lockless and the tags are
* looked up in bulk, one radix tree node at a
* time, so there is a sizable window for page
* reclaim to evict a page we saw tagged.
*
* Skip over it.
*/ */
BUG(); continue;
} }
if (!page_cache_get_speculative(page)) if (!page_cache_get_speculative(page))
......
...@@ -6686,16 +6686,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, ...@@ -6686,16 +6686,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
pgoff = pte_to_pgoff(ptent); pgoff = pte_to_pgoff(ptent);
/* page is moved even if it's not RSS of this task(page-faulted). */ /* page is moved even if it's not RSS of this task(page-faulted). */
page = find_get_page(mapping, pgoff);
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
/* shmem/tmpfs may report page out on swap: account for that too. */ /* shmem/tmpfs may report page out on swap: account for that too. */
if (radix_tree_exceptional_entry(page)) { if (shmem_mapping(mapping)) {
swp_entry_t swap = radix_to_swp_entry(page); page = find_get_entry(mapping, pgoff);
if (do_swap_account) if (radix_tree_exceptional_entry(page)) {
*entry = swap; swp_entry_t swp = radix_to_swp_entry(page);
page = find_get_page(swap_address_space(swap), swap.val); if (do_swap_account)
} *entry = swp;
page = find_get_page(swap_address_space(swp), swp.val);
}
} else
page = find_get_page(mapping, pgoff);
#else
page = find_get_page(mapping, pgoff);
#endif #endif
return page; return page;
} }
......
...@@ -484,14 +484,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -484,14 +484,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
unsigned long count = 0; unsigned long count = 0;
int i; int i;
/*
* Note: this function may get called on a shmem/tmpfs mapping:
* pagevec_lookup() might then return 0 prematurely (because it
* got a gangful of swap entries); but it's hardly worth worrying
* about - it can rarely have anything to free from such a mapping
* (most pages are dirty), and already skips over any difficulties.
*/
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment