Commit 27ab7006 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

tmpfs: simplify filepage/swappage

We can now simplify shmem_getpage_gfp(): there is no longer a dilemma of
filepage passed in via shmem_readpage(), then swappage found, which must
then be copied over to it.

Although at first it's tempting to replace the **pagep arg by returning
struct page *, that makes a mess of IS_ERR_OR_NULL(page)s in all the
callers, so leave as is.

Insert BUG_ON(!PageUptodate) when we find and lock page: some of the
complication came from uninitialized pages inserted into filecache prior
to readpage; but now we're in control, and only release pagelock on
filecache once it's uptodate (if an error occurs in reading back from
swap, the page remains in swapcache, never moved to filecache).
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e83c32e8
...@@ -1246,41 +1246,47 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1246,41 +1246,47 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo; struct shmem_sb_info *sbinfo;
struct page *filepage; struct page *page;
struct page *swappage;
struct page *prealloc_page = NULL; struct page *prealloc_page = NULL;
swp_entry_t *entry; swp_entry_t *entry;
swp_entry_t swap; swp_entry_t swap;
int error; int error;
int ret;
if (idx >= SHMEM_MAX_INDEX) if (idx >= SHMEM_MAX_INDEX)
return -EFBIG; return -EFBIG;
repeat: repeat:
filepage = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
if (filepage && PageUptodate(filepage)) if (page) {
goto done;
if (!filepage) {
/* /*
* Try to preload while we can wait, to not make a habit of * Once we can get the page lock, it must be uptodate:
* draining atomic reserves; but don't latch on to this cpu. * if there were an error in reading back from swap,
* the page would not be inserted into the filecache.
*/ */
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); BUG_ON(!PageUptodate(page));
if (error) goto done;
goto failed; }
radix_tree_preload_end();
if (sgp != SGP_READ && !prealloc_page) { /*
prealloc_page = shmem_alloc_page(gfp, info, idx); * Try to preload while we can wait, to not make a habit of
if (prealloc_page) { * draining atomic reserves; but don't latch on to this cpu.
SetPageSwapBacked(prealloc_page); */
if (mem_cgroup_cache_charge(prealloc_page, error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
current->mm, GFP_KERNEL)) { if (error)
page_cache_release(prealloc_page); goto out;
prealloc_page = NULL; radix_tree_preload_end();
}
if (sgp != SGP_READ && !prealloc_page) {
prealloc_page = shmem_alloc_page(gfp, info, idx);
if (prealloc_page) {
SetPageSwapBacked(prealloc_page);
if (mem_cgroup_cache_charge(prealloc_page,
current->mm, GFP_KERNEL)) {
page_cache_release(prealloc_page);
prealloc_page = NULL;
} }
} }
} }
error = 0;
spin_lock(&info->lock); spin_lock(&info->lock);
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
...@@ -1288,21 +1294,21 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1288,21 +1294,21 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
if (IS_ERR(entry)) { if (IS_ERR(entry)) {
spin_unlock(&info->lock); spin_unlock(&info->lock);
error = PTR_ERR(entry); error = PTR_ERR(entry);
goto failed; goto out;
} }
swap = *entry; swap = *entry;
if (swap.val) { if (swap.val) {
/* Look it up and read it in.. */ /* Look it up and read it in.. */
swappage = lookup_swap_cache(swap); page = lookup_swap_cache(swap);
if (!swappage) { if (!page) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
/* here we actually do the io */ /* here we actually do the io */
if (fault_type) if (fault_type)
*fault_type |= VM_FAULT_MAJOR; *fault_type |= VM_FAULT_MAJOR;
swappage = shmem_swapin(swap, gfp, info, idx); page = shmem_swapin(swap, gfp, info, idx);
if (!swappage) { if (!page) {
spin_lock(&info->lock); spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp, gfp); entry = shmem_swp_alloc(info, idx, sgp, gfp);
if (IS_ERR(entry)) if (IS_ERR(entry))
...@@ -1314,62 +1320,42 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1314,62 +1320,42 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (error) if (error)
goto failed; goto out;
goto repeat; goto repeat;
} }
wait_on_page_locked(swappage); wait_on_page_locked(page);
page_cache_release(swappage); page_cache_release(page);
goto repeat; goto repeat;
} }
/* We have to do this with page locked to prevent races */ /* We have to do this with page locked to prevent races */
if (!trylock_page(swappage)) { if (!trylock_page(page)) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
wait_on_page_locked(swappage); wait_on_page_locked(page);
page_cache_release(swappage); page_cache_release(page);
goto repeat; goto repeat;
} }
if (PageWriteback(swappage)) { if (PageWriteback(page)) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
wait_on_page_writeback(swappage); wait_on_page_writeback(page);
unlock_page(swappage); unlock_page(page);
page_cache_release(swappage); page_cache_release(page);
goto repeat; goto repeat;
} }
if (!PageUptodate(swappage)) { if (!PageUptodate(page)) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
unlock_page(swappage); unlock_page(page);
page_cache_release(swappage); page_cache_release(page);
error = -EIO; error = -EIO;
goto failed; goto out;
} }
if (filepage) { error = add_to_page_cache_locked(page, mapping,
shmem_swp_set(info, entry, 0); idx, GFP_NOWAIT);
shmem_swp_unmap(entry); if (error) {
delete_from_swap_cache(swappage);
spin_unlock(&info->lock);
copy_highpage(filepage, swappage);
unlock_page(swappage);
page_cache_release(swappage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
set_page_dirty(filepage);
swap_free(swap);
} else if (!(error = add_to_page_cache_locked(swappage, mapping,
idx, GFP_NOWAIT))) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
delete_from_swap_cache(swappage);
spin_unlock(&info->lock);
filepage = swappage;
set_page_dirty(filepage);
swap_free(swap);
} else {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (error == -ENOMEM) { if (error == -ENOMEM) {
...@@ -1378,28 +1364,33 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1378,28 +1364,33 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
* call memcg's OOM if needed. * call memcg's OOM if needed.
*/ */
error = mem_cgroup_shmem_charge_fallback( error = mem_cgroup_shmem_charge_fallback(
swappage, page, current->mm, gfp);
current->mm,
gfp);
if (error) { if (error) {
unlock_page(swappage); unlock_page(page);
page_cache_release(swappage); page_cache_release(page);
goto failed; goto out;
} }
} }
unlock_page(swappage); unlock_page(page);
page_cache_release(swappage); page_cache_release(page);
goto repeat; goto repeat;
} }
} else if (sgp == SGP_READ && !filepage) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
filepage = find_get_page(mapping, idx); delete_from_swap_cache(page);
if (filepage && spin_unlock(&info->lock);
(!PageUptodate(filepage) || !trylock_page(filepage))) { set_page_dirty(page);
swap_free(swap);
} else if (sgp == SGP_READ) {
shmem_swp_unmap(entry);
page = find_get_page(mapping, idx);
if (page && !trylock_page(page)) {
spin_unlock(&info->lock); spin_unlock(&info->lock);
wait_on_page_locked(filepage); wait_on_page_locked(page);
page_cache_release(filepage); page_cache_release(page);
filepage = NULL;
goto repeat; goto repeat;
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
...@@ -1417,56 +1408,52 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1417,56 +1408,52 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
} else if (shmem_acct_block(info->flags)) } else if (shmem_acct_block(info->flags))
goto nospace; goto nospace;
if (!filepage) { page = prealloc_page;
int ret; prealloc_page = NULL;
filepage = prealloc_page; entry = shmem_swp_alloc(info, idx, sgp, gfp);
prealloc_page = NULL; if (IS_ERR(entry))
error = PTR_ERR(entry);
entry = shmem_swp_alloc(info, idx, sgp, gfp); else {
if (IS_ERR(entry)) swap = *entry;
error = PTR_ERR(entry); shmem_swp_unmap(entry);
else { }
swap = *entry; ret = error || swap.val;
shmem_swp_unmap(entry); if (ret)
} mem_cgroup_uncharge_cache_page(page);
ret = error || swap.val; else
if (ret) ret = add_to_page_cache_lru(page, mapping,
mem_cgroup_uncharge_cache_page(filepage);
else
ret = add_to_page_cache_lru(filepage, mapping,
idx, GFP_NOWAIT); idx, GFP_NOWAIT);
/* /*
* At add_to_page_cache_lru() failure, uncharge will * At add_to_page_cache_lru() failure,
* be done automatically. * uncharge will be done automatically.
*/ */
if (ret) { if (ret) {
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1); shmem_free_blocks(inode, 1);
spin_unlock(&info->lock); spin_unlock(&info->lock);
page_cache_release(filepage); page_cache_release(page);
filepage = NULL; if (error)
if (error) goto out;
goto failed; goto repeat;
goto repeat;
}
info->flags |= SHMEM_PAGEIN;
} }
info->flags |= SHMEM_PAGEIN;
info->alloced++; info->alloced++;
spin_unlock(&info->lock); spin_unlock(&info->lock);
clear_highpage(filepage); clear_highpage(page);
flush_dcache_page(filepage); flush_dcache_page(page);
SetPageUptodate(filepage); SetPageUptodate(page);
if (sgp == SGP_DIRTY) if (sgp == SGP_DIRTY)
set_page_dirty(filepage); set_page_dirty(page);
} else { } else {
spin_unlock(&info->lock); spin_unlock(&info->lock);
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
} }
done: done:
*pagep = filepage; *pagep = page;
error = 0; error = 0;
out: out:
if (prealloc_page) { if (prealloc_page) {
...@@ -1482,21 +1469,13 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, ...@@ -1482,21 +1469,13 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
* but must also avoid reporting a spurious ENOSPC while working on a * but must also avoid reporting a spurious ENOSPC while working on a
* full tmpfs. * full tmpfs.
*/ */
if (!filepage) { page = find_get_page(mapping, idx);
struct page *page = find_get_page(mapping, idx);
if (page) {
spin_unlock(&info->lock);
page_cache_release(page);
goto repeat;
}
}
spin_unlock(&info->lock); spin_unlock(&info->lock);
error = -ENOSPC; if (page) {
failed: page_cache_release(page);
if (filepage) { goto repeat;
unlock_page(filepage);
page_cache_release(filepage);
} }
error = -ENOSPC;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment