Commit 3f99969f authored by Yan, Zheng's avatar Yan, Zheng Committed by Sage Weil

ceph: acquire i_mutex in __ceph_do_pending_vmtruncate

make __ceph_do_pending_vmtruncate() acquire the i_mutex if the caller
does not hold the i_mutex, so ceph_aio_read() can call safely.
Signed-off-by: default avatarYan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: default avatarGreg Farnum <greg@inktank.com>
parent 6070e0c1
...@@ -653,7 +653,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -653,7 +653,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), pos, (unsigned)len, inode); inode, ceph_vinop(inode), pos, (unsigned)len, inode);
again: again:
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode, true);
if (fi->fmode & CEPH_FILE_MODE_LAZY) if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else else
...@@ -730,7 +730,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -730,7 +730,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
ret = -ENOSPC; ret = -ENOSPC;
goto out; goto out;
} }
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode, true);
dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
inode->i_size); inode->i_size);
...@@ -801,7 +801,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) ...@@ -801,7 +801,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
int ret; int ret;
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode, false);
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
......
...@@ -1453,7 +1453,7 @@ static void ceph_invalidate_work(struct work_struct *work) ...@@ -1453,7 +1453,7 @@ static void ceph_invalidate_work(struct work_struct *work)
/* /*
* called by trunc_wq; take i_mutex ourselves * called by trunc_wq;
* *
* We also truncate in a separate thread as well. * We also truncate in a separate thread as well.
*/ */
...@@ -1464,9 +1464,7 @@ static void ceph_vmtruncate_work(struct work_struct *work) ...@@ -1464,9 +1464,7 @@ static void ceph_vmtruncate_work(struct work_struct *work)
struct inode *inode = &ci->vfs_inode; struct inode *inode = &ci->vfs_inode;
dout("vmtruncate_work %p\n", inode); dout("vmtruncate_work %p\n", inode);
mutex_lock(&inode->i_mutex); __ceph_do_pending_vmtruncate(inode, true);
__ceph_do_pending_vmtruncate(inode);
mutex_unlock(&inode->i_mutex);
iput(inode); iput(inode);
} }
...@@ -1490,12 +1488,10 @@ void ceph_queue_vmtruncate(struct inode *inode) ...@@ -1490,12 +1488,10 @@ void ceph_queue_vmtruncate(struct inode *inode)
} }
/* /*
* called with i_mutex held.
*
* Make sure any pending truncation is applied before doing anything * Make sure any pending truncation is applied before doing anything
* that may depend on it. * that may depend on it.
*/ */
void __ceph_do_pending_vmtruncate(struct inode *inode) void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock)
{ {
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
u64 to; u64 to;
...@@ -1528,7 +1524,11 @@ void __ceph_do_pending_vmtruncate(struct inode *inode) ...@@ -1528,7 +1524,11 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
ci->i_truncate_pending, to); ci->i_truncate_pending, to);
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
if (needlock)
mutex_lock(&inode->i_mutex);
truncate_inode_pages(inode->i_mapping, to); truncate_inode_pages(inode->i_mapping, to);
if (needlock)
mutex_unlock(&inode->i_mutex);
spin_lock(&ci->i_ceph_lock); spin_lock(&ci->i_ceph_lock);
if (to == ci->i_truncate_size) { if (to == ci->i_truncate_size) {
...@@ -1581,7 +1581,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -1581,7 +1581,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
if (ceph_snap(inode) != CEPH_NOSNAP) if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS; return -EROFS;
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode, false);
err = inode_change_ok(inode, attr); err = inode_change_ok(inode, attr);
if (err != 0) if (err != 0)
...@@ -1763,7 +1763,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -1763,7 +1763,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
ceph_cap_string(dirtied), mask); ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req); ceph_mdsc_put_request(req);
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode, false);
return err; return err;
out: out:
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
......
...@@ -694,7 +694,7 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, ...@@ -694,7 +694,7 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
extern int ceph_inode_holds_cap(struct inode *inode, int mask); extern int ceph_inode_holds_cap(struct inode *inode, int mask);
extern int ceph_inode_set_size(struct inode *inode, loff_t size); extern int ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode); extern void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock);
extern void ceph_queue_vmtruncate(struct inode *inode); extern void ceph_queue_vmtruncate(struct inode *inode);
extern void ceph_queue_invalidate(struct inode *inode); extern void ceph_queue_invalidate(struct inode *inode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment