Commit 7aa8800b authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] consolidate shmem_getpage and shmem_getpage_locked

The distinction between shmem_getpage and shmem_getpage_locked is not
helpful, particularly now info->sem is gone; and shmem_getpage
confusingly tailored to shmem_nopage's expectations.  Put the code of
shmem_getpage_locked into the frame of shmem_getpage, leaving its
callers to unlock_page afterwards.
parent cd7fef3d
...@@ -495,10 +495,6 @@ int shmem_unuse(swp_entry_t entry, struct page *page) ...@@ -495,10 +495,6 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
/* /*
* Move the page from the page cache to the swap cache. * Move the page from the page cache to the swap cache.
*
* The page lock prevents multiple occurences of shmem_writepage at
* once. We still need to guard against racing with
* shmem_getpage_locked().
*/ */
static int shmem_writepage(struct page * page) static int shmem_writepage(struct page * page)
{ {
...@@ -560,19 +556,16 @@ static int shmem_vm_writeback(struct page *page, struct writeback_control *wbc) ...@@ -560,19 +556,16 @@ static int shmem_vm_writeback(struct page *page, struct writeback_control *wbc)
} }
/* /*
* shmem_getpage_locked - either get the page from swap or allocate a new one * shmem_getpage - either get the page from swap or allocate a new one
* *
* If we allocate a new one we do not mark it dirty. That's up to the * If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap * vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache * entry since a page cannot live in both the swap and page cache
*
* Called with the inode locked, so it cannot race with itself, but we
* still need to guard against racing with shm_writepage(), which might
* be trying to move the page to the swap cache as we run.
*/ */
static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct inode * inode, unsigned long idx) static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo; struct shmem_sb_info *sbinfo;
struct page *page; struct page *page;
swp_entry_t *entry; swp_entry_t *entry;
...@@ -580,7 +573,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -580,7 +573,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
int error = 0; int error = 0;
if (idx >= SHMEM_MAX_INDEX) if (idx >= SHMEM_MAX_INDEX)
return ERR_PTR(-EFBIG); return -EFBIG;
/* /*
* When writing, i_sem is held against truncation and other * When writing, i_sem is held against truncation and other
...@@ -595,15 +588,17 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -595,15 +588,17 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
repeat: repeat:
page = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
if (page) if (page) {
return page; *pagep = page;
return 0;
}
spin_lock(&info->lock); spin_lock(&info->lock);
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
entry = shmem_swp_alloc(info, idx); entry = shmem_swp_alloc(info, idx);
if (IS_ERR(entry)) { if (IS_ERR(entry)) {
spin_unlock(&info->lock); spin_unlock(&info->lock);
return (void *)entry; return PTR_ERR(entry);
} }
swap = *entry; swap = *entry;
...@@ -623,7 +618,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -623,7 +618,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
error = -ENOMEM; error = -ENOMEM;
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (error) if (error)
return ERR_PTR(error); return error;
goto repeat; goto repeat;
} }
wait_on_page_locked(page); wait_on_page_locked(page);
...@@ -652,7 +647,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -652,7 +647,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
spin_unlock(&info->lock); spin_unlock(&info->lock);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
return ERR_PTR(error); return error;
} }
*entry = (swp_entry_t) {0}; *entry = (swp_entry_t) {0};
...@@ -665,7 +660,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -665,7 +660,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (sbinfo->free_blocks == 0) { if (sbinfo->free_blocks == 0) {
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
return ERR_PTR(-ENOSPC); return -ENOSPC;
} }
sbinfo->free_blocks--; sbinfo->free_blocks--;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -675,7 +670,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -675,7 +670,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_blocks++; sbinfo->free_blocks++;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
spin_lock(&info->lock); spin_lock(&info->lock);
...@@ -690,7 +685,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -690,7 +685,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
sbinfo->free_blocks++; sbinfo->free_blocks++;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
if (error) if (error)
return ERR_PTR(error); return error;
goto repeat; goto repeat;
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
...@@ -700,30 +695,8 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -700,30 +695,8 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
/* We have the page */ /* We have the page */
SetPageUptodate(page); SetPageUptodate(page);
return page; *pagep = page;
}
static int shmem_getpage(struct inode * inode, unsigned long idx, struct page **ptr)
{
struct shmem_inode_info *info = SHMEM_I(inode);
int error;
*ptr = ERR_PTR(-EFAULT);
if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
goto failed;
*ptr = shmem_getpage_locked(info, inode, idx);
if (IS_ERR (*ptr))
goto failed;
unlock_page(*ptr);
return 0; return 0;
failed:
error = PTR_ERR(*ptr);
*ptr = NOPAGE_SIGBUS;
if (error == -ENOMEM)
*ptr = NOPAGE_OOM;
return error;
} }
static struct page *shmem_holdpage(struct inode *inode, unsigned long idx) static struct page *shmem_holdpage(struct inode *inode, unsigned long idx)
...@@ -747,26 +720,32 @@ static struct page *shmem_holdpage(struct inode *inode, unsigned long idx) ...@@ -747,26 +720,32 @@ static struct page *shmem_holdpage(struct inode *inode, unsigned long idx)
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (swap.val) { if (swap.val) {
if (shmem_getpage(inode, idx, &page) < 0) if (shmem_getpage(inode, idx, &page) == 0)
page = NULL; unlock_page(page);
} }
return page; return page;
} }
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused) struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
{ {
struct page * page; struct inode *inode = vma->vm_file->f_dentry->d_inode;
unsigned int idx; struct page *page;
struct inode * inode = vma->vm_file->f_dentry->d_inode; unsigned long idx;
int error;
idx = (address - vma->vm_start) >> PAGE_CACHE_SHIFT; idx = (address - vma->vm_start) >> PAGE_CACHE_SHIFT;
idx += vma->vm_pgoff; idx += vma->vm_pgoff;
if (shmem_getpage(inode, idx, &page)) if (((loff_t) idx << PAGE_CACHE_SHIFT) >= inode->i_size)
return page; return NOPAGE_SIGBUS;
error = shmem_getpage(inode, idx, &page);
if (error)
return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
unlock_page(page);
flush_page_to_ram(page); flush_page_to_ram(page);
return(page); return page;
} }
void shmem_lock(struct file * file, int lock) void shmem_lock(struct file * file, int lock)
...@@ -882,7 +861,6 @@ static ssize_t ...@@ -882,7 +861,6 @@ static ssize_t
shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
{ {
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
struct shmem_inode_info *info;
unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
loff_t pos; loff_t pos;
struct page *page; struct page *page;
...@@ -973,11 +951,8 @@ shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) ...@@ -973,11 +951,8 @@ shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
__get_user(dummy, buf+bytes-1); __get_user(dummy, buf+bytes-1);
} }
info = SHMEM_I(inode); status = shmem_getpage(inode, index, &page);
page = shmem_getpage_locked(info, inode, index); if (status)
status = PTR_ERR(page);
if (IS_ERR(page))
break; break;
/* We have exclusive IO access to the page.. */ /* We have exclusive IO access to the page.. */
...@@ -1064,10 +1039,12 @@ static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor ...@@ -1064,10 +1039,12 @@ static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor
if (index == end_index) { if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK; nr = inode->i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) { if (nr <= offset) {
unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
} }
} }
unlock_page(page);
nr -= offset; nr -= offset;
if (!list_empty(&mapping->i_mmap_shared)) if (!list_empty(&mapping->i_mmap_shared))
...@@ -1271,6 +1248,7 @@ static int shmem_rename(struct inode * old_dir, struct dentry *old_dentry, struc ...@@ -1271,6 +1248,7 @@ static int shmem_rename(struct inode * old_dir, struct dentry *old_dentry, struc
static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * symname) static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
{ {
int error;
int len; int len;
struct inode *inode; struct inode *inode;
struct page *page; struct page *page;
...@@ -1296,11 +1274,11 @@ static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * ...@@ -1296,11 +1274,11 @@ static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char *
iput(inode); iput(inode);
return -ENOMEM; return -ENOMEM;
} }
page = shmem_getpage_locked(info, inode, 0); error = shmem_getpage(inode, 0, &page);
if (IS_ERR(page)) { if (error) {
vm_unacct_memory(VM_ACCT(1)); vm_unacct_memory(VM_ACCT(1));
iput(inode); iput(inode);
return PTR_ERR(page); return error;
} }
inode->i_op = &shmem_symlink_inode_operations; inode->i_op = &shmem_symlink_inode_operations;
spin_lock (&shmem_ilock); spin_lock (&shmem_ilock);
...@@ -1332,27 +1310,26 @@ static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) ...@@ -1332,27 +1310,26 @@ static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen) static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen)
{ {
struct page * page; struct page *page;
int res = shmem_getpage(dentry->d_inode, 0, &page); int res = shmem_getpage(dentry->d_inode, 0, &page);
if (res) if (res)
return res; return res;
res = vfs_readlink(dentry, buffer, buflen, kmap(page));
res = vfs_readlink(dentry,buffer,buflen, kmap(page));
kunmap(page); kunmap(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
return res; return res;
} }
static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{ {
struct page * page; struct page *page;
int res = shmem_getpage(dentry->d_inode, 0, &page); int res = shmem_getpage(dentry->d_inode, 0, &page);
if (res) if (res)
return res; return res;
res = vfs_follow_link(nd, kmap(page)); res = vfs_follow_link(nd, kmap(page));
kunmap(page); kunmap(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment