Commit e7e27221 authored by Andrew Morton's avatar Andrew Morton Committed by Christoph Hellwig

[PATCH] tmpfs: shmem_getpage unlock_page

Patch from Hugh Dickins

shmem_getpage does need to lock its page (to secure it against
shmem_writepage), but it's easier for its callers if it unlocks
before returning.  The only caller who appeared to be using the
page lock was shmem_file_write, but it wasn't actually protecting
against anything - i_sem prevents concurrent writes and truncates,
and do_shmem_file_read was dropping the lock before copying anyway.
parent 51924607
...@@ -737,6 +737,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -737,6 +737,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
repeat: repeat:
page = find_lock_page(mapping, idx); page = find_lock_page(mapping, idx);
if (page) { if (page) {
unlock_page(page);
*pagep = page; *pagep = page;
return 0; return 0;
} }
...@@ -852,6 +853,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -852,6 +853,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
} }
/* We have the page */ /* We have the page */
unlock_page(page);
*pagep = page; *pagep = page;
return 0; return 0;
} }
...@@ -879,8 +881,7 @@ static struct page *shmem_holdpage(struct inode *inode, unsigned long idx) ...@@ -879,8 +881,7 @@ static struct page *shmem_holdpage(struct inode *inode, unsigned long idx)
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (swap.val) { if (swap.val) {
if (shmem_getpage(inode, idx, &page) == 0) (void) shmem_getpage(inode, idx, &page);
unlock_page(page);
} }
return page; return page;
} }
...@@ -903,7 +904,6 @@ struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int ...@@ -903,7 +904,6 @@ struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int
if (error) if (error)
return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
unlock_page(page);
flush_page_to_ram(page); flush_page_to_ram(page);
return page; return page;
} }
...@@ -1101,15 +1101,9 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos) ...@@ -1101,15 +1101,9 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
} }
/* /*
* Bring in the user page that we will copy from _first_. * We don't hold page lock across copy from user -
* Otherwise there's a nasty deadlock on copying from the * what would it guard against? - so no deadlock here.
* same page as we're writing to, without it being marked
* up-to-date.
*/ */
{ volatile unsigned char dummy;
__get_user(dummy, buf);
__get_user(dummy, buf+bytes-1);
}
status = shmem_getpage(inode, index, &page); status = shmem_getpage(inode, index, &page);
if (status) if (status)
...@@ -1131,9 +1125,7 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos) ...@@ -1131,9 +1125,7 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
} }
unlock: release:
/* Mark it unlocked again and drop the page.. */
unlock_page(page);
page_cache_release(page); page_cache_release(page);
if (status < 0) if (status < 0)
...@@ -1152,7 +1144,7 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos) ...@@ -1152,7 +1144,7 @@ shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
fail_write: fail_write:
status = -EFAULT; status = -EFAULT;
ClearPageUptodate(page); ClearPageUptodate(page);
goto unlock; goto release;
} }
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc) static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc)
...@@ -1194,12 +1186,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ ...@@ -1194,12 +1186,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
if (index == end_index) { if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK; nr = inode->i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) { if (nr <= offset) {
unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
} }
} }
unlock_page(page);
nr -= offset; nr -= offset;
if (!list_empty(&mapping->i_mmap_shared)) if (!list_empty(&mapping->i_mmap_shared))
...@@ -1443,7 +1433,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s ...@@ -1443,7 +1433,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
memcpy(kaddr, symname, len); memcpy(kaddr, symname, len);
kunmap(page); kunmap(page);
set_page_dirty(page); set_page_dirty(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
} }
dir->i_size += BOGO_DIRENT_SIZE; dir->i_size += BOGO_DIRENT_SIZE;
...@@ -1471,7 +1460,6 @@ static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen) ...@@ -1471,7 +1460,6 @@ static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen)
return res; return res;
res = vfs_readlink(dentry, buffer, buflen, kmap(page)); res = vfs_readlink(dentry, buffer, buflen, kmap(page));
kunmap(page); kunmap(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
return res; return res;
} }
...@@ -1484,7 +1472,6 @@ static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) ...@@ -1484,7 +1472,6 @@ static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
return res; return res;
res = vfs_follow_link(nd, kmap(page)); res = vfs_follow_link(nd, kmap(page));
kunmap(page); kunmap(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment