Commit fa9949da authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: use __SetPageSwapBacked and dont ClearPageSwapBacked

v3.16 commit 07a42788 ("mm: shmem: avoid atomic operation during
shmem_getpage_gfp") rightly replaced one instance of SetPageSwapBacked
by __SetPageSwapBacked, pointing out that the newly allocated page is
not yet visible to other users (except speculative get_page_unless_zero-
ers, who may not update page flags before their further checks).

That was part of a series in which Mel was focused on tmpfs profiles:
but almost all SetPageSwapBacked uses can be so optimized, with the same
justification.

Remove ClearPageSwapBacked from __read_swap_cache_async() error path:
it's not an error to free a page with PG_swapbacked set.

Follow a convention of __SetPageLocked, __SetPageSwapBacked instead of
doing it differently in different places; but that's for tidiness - if
the ordering actually mattered, we should not be using the __variants.

There's probably scope for further __SetPageFlags in other places, but
SwapBacked is the one I'm interested in at the moment.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Reviewed-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9d5e6a9f
...@@ -332,7 +332,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -332,7 +332,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
newpage->index = page->index; newpage->index = page->index;
newpage->mapping = page->mapping; newpage->mapping = page->mapping;
if (PageSwapBacked(page)) if (PageSwapBacked(page))
SetPageSwapBacked(newpage); __SetPageSwapBacked(newpage);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
...@@ -378,7 +378,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -378,7 +378,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
newpage->index = page->index; newpage->index = page->index;
newpage->mapping = page->mapping; newpage->mapping = page->mapping;
if (PageSwapBacked(page)) if (PageSwapBacked(page))
SetPageSwapBacked(newpage); __SetPageSwapBacked(newpage);
get_page(newpage); /* add cache reference */ get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
...@@ -1791,7 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1791,7 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
/* Prepare a page as a migration target */ /* Prepare a page as a migration target */
__SetPageLocked(new_page); __SetPageLocked(new_page);
SetPageSwapBacked(new_page); __SetPageSwapBacked(new_page);
/* anon mapping, we can simply copy page->mapping to the new page: */ /* anon mapping, we can simply copy page->mapping to the new page: */
new_page->mapping = page->mapping; new_page->mapping = page->mapping;
......
...@@ -1249,7 +1249,7 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -1249,7 +1249,7 @@ void page_add_new_anon_rmap(struct page *page,
int nr = compound ? hpage_nr_pages(page) : 1; int nr = compound ? hpage_nr_pages(page) : 1;
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
SetPageSwapBacked(page); __SetPageSwapBacked(page);
if (compound) { if (compound) {
VM_BUG_ON_PAGE(!PageTransHuge(page), page); VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/* increment count (starts at -1) */ /* increment count (starts at -1) */
......
...@@ -1085,8 +1085,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1085,8 +1085,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
flush_dcache_page(newpage); flush_dcache_page(newpage);
__SetPageLocked(newpage); __SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage); SetPageUptodate(newpage);
SetPageSwapBacked(newpage);
set_page_private(newpage, swap_index); set_page_private(newpage, swap_index);
SetPageSwapCache(newpage); SetPageSwapCache(newpage);
...@@ -1276,8 +1276,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1276,8 +1276,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
goto decused; goto decused;
} }
__SetPageSwapBacked(page);
__SetPageLocked(page); __SetPageLocked(page);
__SetPageSwapBacked(page);
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
__SetPageReferenced(page); __SetPageReferenced(page);
......
...@@ -358,7 +358,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -358,7 +358,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* May fail (-ENOMEM) if radix-tree node allocation failed. */ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
__SetPageLocked(new_page); __SetPageLocked(new_page);
SetPageSwapBacked(new_page); __SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry); err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) { if (likely(!err)) {
radix_tree_preload_end(); radix_tree_preload_end();
...@@ -370,7 +370,6 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -370,7 +370,6 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page; return new_page;
} }
radix_tree_preload_end(); radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__ClearPageLocked(new_page); __ClearPageLocked(new_page);
/* /*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * add_to_swap_cache() doesn't return -EEXIST, so we can safely
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment