Commit 0142ef6c authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

shmem: replace_page must flush_dcache and others

Commit bde05d1c ("shmem: replace page if mapping excludes its zone")
is not at all likely to break for anyone, but it was an earlier version
from before review feedback was incorporated.  Fix that up now.

* shmem_replace_page must flush_dcache_page after copy_highpage [akpm]
* Expand comment on why shmem_unuse_inode needs page_swapcount [akpm]
* Remove excess of VM_BUG_ONs from shmem_replace_page [wangcong]
* Check page_private matches swap before calling shmem_replace_page [hughd]
* shmem_replace_page allow for unexpected race in radix_tree lookup [hughd]
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Stephane Marchesin <marcheu@chromium.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Airlie <airlied@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Rob Clark <rob.clark@linaro.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 71fae7e7
...@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, ...@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
mutex_lock(&shmem_swaplist_mutex); mutex_lock(&shmem_swaplist_mutex);
/* /*
* We needed to drop mutex to make that restrictive page * We needed to drop mutex to make that restrictive page
* allocation; but the inode might already be freed by now, * allocation, but the inode might have been freed while we
* and we cannot refer to inode or mapping or info to check. * dropped it: although a racing shmem_evict_inode() cannot
* However, we do hold page lock on the PageSwapCache page, * complete without emptying the radix_tree, our page lock
* so can check if that still has our reference remaining. * on this swapcache page is not enough to prevent that -
* free_swap_and_cache() of our swap entry will only
* trylock_page(), removing swap from radix_tree whatever.
*
* We must not proceed to shmem_add_to_page_cache() if the
* inode has been freed, but of course we cannot rely on
* inode or mapping or info to check that. However, we can
* safely check if our swap entry is still in use (and here
* it can't have got reused for another page): if it's still
* in use, then the inode cannot have been freed yet, and we
* can safely proceed (if it's no longer in use, that tells
* nothing about the inode, but we don't need to unuse swap).
*/ */
if (!page_swapcount(*pagep)) if (!page_swapcount(*pagep))
error = -ENOENT; error = -ENOENT;
...@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page) ...@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
/* /*
* There's a faint possibility that swap page was replaced before * There's a faint possibility that swap page was replaced before
* caller locked it: it will come back later with the right page. * caller locked it: caller will come back later with the right page.
*/ */
if (unlikely(!PageSwapCache(page))) if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
goto out; goto out;
/* /*
...@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
newpage = shmem_alloc_page(gfp, info, index); newpage = shmem_alloc_page(gfp, info, index);
if (!newpage) if (!newpage)
return -ENOMEM; return -ENOMEM;
VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
*pagep = newpage;
page_cache_get(newpage); page_cache_get(newpage);
copy_highpage(newpage, oldpage); copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
VM_BUG_ON(!PageLocked(oldpage));
__set_page_locked(newpage); __set_page_locked(newpage);
VM_BUG_ON(!PageUptodate(oldpage));
SetPageUptodate(newpage); SetPageUptodate(newpage);
VM_BUG_ON(!PageSwapBacked(oldpage));
SetPageSwapBacked(newpage); SetPageSwapBacked(newpage);
VM_BUG_ON(!swap_index);
set_page_private(newpage, swap_index); set_page_private(newpage, swap_index);
VM_BUG_ON(!PageSwapCache(oldpage));
SetPageSwapCache(newpage); SetPageSwapCache(newpage);
/* /*
...@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
spin_lock_irq(&swap_mapping->tree_lock); spin_lock_irq(&swap_mapping->tree_lock);
error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
newpage); newpage);
__inc_zone_page_state(newpage, NR_FILE_PAGES); if (!error) {
__dec_zone_page_state(oldpage, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES);
__dec_zone_page_state(oldpage, NR_FILE_PAGES);
}
spin_unlock_irq(&swap_mapping->tree_lock); spin_unlock_irq(&swap_mapping->tree_lock);
BUG_ON(error);
mem_cgroup_replace_page_cache(oldpage, newpage); if (unlikely(error)) {
lru_cache_add_anon(newpage); /*
* Is this possible? I think not, now that our callers check
* both PageSwapCache and page_private after getting page lock;
* but be defensive. Reverse old to newpage for clear and free.
*/
oldpage = newpage;
} else {
mem_cgroup_replace_page_cache(oldpage, newpage);
lru_cache_add_anon(newpage);
*pagep = newpage;
}
ClearPageSwapCache(oldpage); ClearPageSwapCache(oldpage);
set_page_private(oldpage, 0); set_page_private(oldpage, 0);
...@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
unlock_page(oldpage); unlock_page(oldpage);
page_cache_release(oldpage); page_cache_release(oldpage);
page_cache_release(oldpage); page_cache_release(oldpage);
return 0; return error;
} }
/* /*
...@@ -1107,7 +1123,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1107,7 +1123,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
/* We have to do this with page locked to prevent races */ /* We have to do this with page locked to prevent races */
lock_page(page); lock_page(page);
if (!PageSwapCache(page) || page->mapping) { if (!PageSwapCache(page) || page_private(page) != swap.val ||
page->mapping) {
error = -EEXIST; /* try again */ error = -EEXIST; /* try again */
goto failed; goto failed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment