Commit b065b432 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

shmem: cleanup shmem_add_to_page_cache

shmem_add_to_page_cache() has three callsites, but only one of them wants
the radix_tree_preload() (an exceptional entry guarantees that the radix
tree node is present in the other cases), and only that site can achieve
mem_cgroup_uncharge_cache_page() (PageSwapCache makes it a no-op in the
other cases).  We did it this way originally to reflect
add_to_page_cache_locked(); but it's confusing now, so move the radix_tree
preloading and mem_cgroup uncharging to that one caller.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d1899228
...@@ -288,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -288,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected) pgoff_t index, gfp_t gfp, void *expected)
{ {
int error = 0; int error;
VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page)); VM_BUG_ON(!PageSwapBacked(page));
page_cache_get(page);
page->mapping = mapping;
page->index = index;
spin_lock_irq(&mapping->tree_lock);
if (!expected) if (!expected)
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); error = radix_tree_insert(&mapping->page_tree, index, page);
else
error = shmem_radix_tree_replace(mapping, index, expected,
page);
if (!error) { if (!error) {
page_cache_get(page); mapping->nrpages++;
page->mapping = mapping; __inc_zone_page_state(page, NR_FILE_PAGES);
page->index = index; __inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
spin_lock_irq(&mapping->tree_lock); } else {
if (!expected) page->mapping = NULL;
error = radix_tree_insert(&mapping->page_tree, spin_unlock_irq(&mapping->tree_lock);
index, page); page_cache_release(page);
else
error = shmem_radix_tree_replace(mapping, index,
expected, page);
if (!error) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
}
if (!expected)
radix_tree_preload_end();
} }
if (error)
mem_cgroup_uncharge_cache_page(page);
return error; return error;
} }
...@@ -1202,11 +1193,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1202,11 +1193,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
__set_page_locked(page); __set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm, error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK); gfp & GFP_RECLAIM_MASK);
if (!error)
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
if (error) if (error)
goto decused; goto decused;
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
radix_tree_preload_end();
}
if (error) {
mem_cgroup_uncharge_cache_page(page);
goto decused;
}
lru_cache_add_anon(page); lru_cache_add_anon(page);
spin_lock(&info->lock); spin_lock(&info->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment